Files
manual_slop/full_codebase_skeleton.txt

7232 lines
206 KiB
Plaintext

--- File: aggregate.py ---
# aggregate.py
from __future__ import annotations
"""
Note(Gemini):
This module orchestrates the construction of the final Markdown context string.
Instead of sending every file to the AI raw (which blows up tokens), this uses a pipeline:
1. Resolve paths (handles globs and absolute paths).
2. Build file items (raw content).
3. If 'summary_only' is true (which is the default behavior now), it pipes the files through
summarize.py to generate a compacted view.
This is essential for keeping prompt tokens low while giving the AI enough structural info
to use the MCP tools to fetch only what it needs.
"""
import tomllib
import re
import glob
from pathlib import Path, PureWindowsPath
from typing import Any
import summarize
import project_manager
from file_cache import ASTParser
def find_next_increment(output_dir: Path, namespace: str) -> int:
...
def is_absolute_with_drive(entry: str) -> bool:
...
def resolve_paths(base_dir: Path, entry: str) -> list[Path]:
...
def build_discussion_section(history: list[str]) -> str:
...
def build_files_section(base_dir: Path, files: list[str | dict[str, Any]]) -> str:
...
def build_screenshots_section(base_dir: Path, screenshots: list[str]) -> str:
...
def build_file_items(base_dir: Path, files: list[str | dict[str, Any]]) -> list[dict[str, Any]]:
"""
Return a list of dicts describing each file, for use by ai_client when it
wants to upload individual files rather than inline everything as markdown.
Each dict has:
path : Path (resolved absolute path)
entry : str (original config entry string)
content : str (file text, or error string)
error : bool
mtime : float (last modification time, for skip-if-unchanged optimization)
tier : int | None (optional tier for context management)
"""
...
def build_summary_section(base_dir: Path, files: list[str | dict[str, Any]]) -> str:
"""
Build a compact summary section using summarize.py — one short block per file.
Used as the initial <context> block instead of full file contents.
"""
...
def _build_files_section_from_items(file_items: list[dict[str, Any]]) -> str:
"""Build the files markdown section from pre-read file items (avoids double I/O)."""
...
def build_markdown_from_items(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False) -> str:
"""Build markdown from pre-read file items instead of re-reading from disk."""
...
def build_markdown_no_history(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], summary_only: bool = False) -> str:
"""Build markdown with only files + screenshots (no history). Used for stable caching."""
...
def build_discussion_text(history: list[str]) -> str:
"""Build just the discussion history section text. Returns empty string if no history."""
...
def build_tier1_context(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str]) -> str:
"""
Tier 1 Context: Strategic/Orchestration.
Full content for core conductor files and files with tier=1, summaries for others.
"""
...
def build_tier2_context(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str]) -> str:
"""
Tier 2 Context: Architectural/Tech Lead.
Full content for all files (standard behavior).
"""
...
def build_tier3_context(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], focus_files: list[str]) -> str:
"""
Tier 3 Context: Execution/Worker.
Full content for focus_files and files with tier=3, summaries/skeletons for others.
"""
...
def build_markdown(base_dir: Path, files: list[str | dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False) -> str:
...
def run(config: dict[str, Any]) -> tuple[str, Path, list[dict[str, Any]]]:
...
def main() -> None:
# Load global config to find active project
...
if __name__ == "__main__":
main()
--- File: ai_client.py ---
# ai_client.py
from __future__ import annotations
"""
Note(Gemini):
Acts as the unified interface for multiple LLM providers (Anthropic, Gemini).
Abstracts away the differences in how they handle tool schemas, history, and caching.
For Anthropic: aggressively manages the ~200k token limit by manually culling
stale [FILES UPDATED] entries and dropping the oldest message pairs.
For Gemini: injects the initial context directly into system_instruction
during chat creation to avoid massive history bloat.
"""
# ai_client.py
import tomllib
import json
import sys
import time
import datetime
import hashlib
import difflib
import threading
import requests
from typing import Optional, Callable, Any
import os
import project_manager
import file_cache
import mcp_client
import anthropic
from gemini_cli_adapter import GeminiCliAdapter
from google import genai
from google.genai import types
from events import EventEmitter
_provider: str = "gemini"
_model: str = "gemini-2.5-flash-lite"
_temperature: float = 0.0
_max_tokens: int = 8192
_history_trunc_limit: int = 8000
# Global event emitter for API lifecycle events
events: EventEmitter = EventEmitter()
def set_model_params(temp: float, max_tok: int, trunc_limit: int = 8000) -> None:
...
def get_history_trunc_limit() -> int:
...
def set_history_trunc_limit(val: int) -> None:
...
_gemini_client: genai.Client | None = None
_gemini_chat: Any = None
_gemini_cache: Any = None
_gemini_cache_md_hash: int | None = None
_gemini_cache_created_at: float | None = None
# Gemini cache TTL in seconds. Caches are created with this TTL and
# proactively rebuilt at 90% of this value to avoid stale-reference errors.
_GEMINI_CACHE_TTL: int = 3600
_anthropic_client: anthropic.Anthropic | None = None
_anthropic_history: list[dict] = []
_anthropic_history_lock: threading.Lock = threading.Lock()
_deepseek_client: Any = None
_deepseek_history: list[dict] = []
_deepseek_history_lock: threading.Lock = threading.Lock()
_send_lock: threading.Lock = threading.Lock()
_gemini_cli_adapter: GeminiCliAdapter | None = None
# Injected by gui.py - called when AI wants to run a command.
# Signature: (script: str, base_dir: str) -> str | None
confirm_and_run_callback: Callable[[str, str], str | None] | None = None
# Injected by gui.py - called whenever a comms entry is appended.
# Signature: (entry: dict) -> None
comms_log_callback: Callable[[dict[str, Any]], None] | None = None
# Injected by gui.py - called whenever a tool call completes.
# Signature: (script: str, result: str) -> None
tool_log_callback: Callable[[str, str], None] | None = None
# Set by caller tiers before ai_client.send(); cleared in finally.
# Safe — ai_client.send() calls are serialized by the MMA engine executor.
current_tier: str | None = None
# Increased to allow thorough code exploration before forcing a summary
MAX_TOOL_ROUNDS: int = 10
# Maximum cumulative bytes of tool output allowed per send() call.
# Prevents unbounded memory growth during long tool-calling loops.
_MAX_TOOL_OUTPUT_BYTES: int = 500_000
# Maximum characters per text chunk sent to Anthropic.
# Kept well under the ~200k token API limit.
_ANTHROPIC_CHUNK_SIZE: int = 120_000
_SYSTEM_PROMPT: str = (
"You are a helpful coding assistant with access to a PowerShell tool and MCP tools (file access: read_file, list_directory, search_files, get_file_summary, web access: web_search, fetch_url). "
"When calling file/directory tools, always use the 'path' parameter for the target path. "
"When asked to create or edit files, prefer targeted edits over full rewrites. "
"Always explain what you are doing before invoking the tool.\n\n"
"When writing or rewriting large files (especially those containing quotes, backticks, or special characters), "
"avoid python -c with inline strings. Instead: (1) write a .py helper script to disk using a PS here-string "
"(@'...'@ for literal content), (2) run it with `python <script>`, (3) delete the helper. "
"For small targeted edits, use PowerShell's (Get-Content) / .Replace() / Set-Content or Add-Content directly.\n\n"
"When making function calls using tools that accept array or object parameters "
"ensure those are structured using JSON. For example:\n"
"When you need to verify a change, rely on the exit code and stdout/stderr from the tool \u2014 "
"the user's context files are automatically refreshed after every tool call, so you do NOT "
"need to re-read files that are already provided in the <context> block."
)
_custom_system_prompt: str = ""
def set_custom_system_prompt(prompt: str) -> None:
...
def _get_combined_system_prompt() -> str:
...
_comms_log: list[dict] = []
COMMS_CLAMP_CHARS: int = 300
def _append_comms(direction: str, kind: str, payload: dict[str, Any]) -> None:
...
def get_comms_log() -> list[dict]:
...
def clear_comms_log() -> None:
...
def _load_credentials() -> dict:
...
class ProviderError(Exception):
def __init__(self, kind: str, provider: str, original: Exception) -> None:
...
def ui_message(self) -> str:
...
def _classify_anthropic_error(exc: Exception) -> ProviderError:
...
def _classify_gemini_error(exc: Exception) -> ProviderError:
...
def _classify_deepseek_error(exc: Exception) -> ProviderError:
...
def set_provider(provider: str, model: str) -> None:
...
def get_provider() -> str:
...
def cleanup() -> None:
"""Called on application exit to prevent orphaned caches from billing."""
...
def reset_session() -> None:
...
def get_gemini_cache_stats() -> dict[str, Any]:
"""
Retrieves statistics about the Gemini caches, such as count and total size.
"""
...
def list_models(provider: str) -> list[str]:
...
def _list_gemini_cli_models() -> list[str]:
"""
List available Gemini models for the CLI.
Since the CLI doesn't have a direct 'list models' command yet,
we return a curated list of supported models based on CLI metadata.
"""
...
def _list_gemini_models(api_key: str) -> list[str]:
...
def _list_anthropic_models() -> list[str]:
...
def _list_deepseek_models(api_key: str) -> list[str]:
"""
List available DeepSeek models.
"""
...
TOOL_NAME: str = "run_powershell"
_agent_tools: dict = {}
def set_agent_tools(tools: dict[str, bool]) -> None:
...
def _build_anthropic_tools() -> list[dict]:
"""Build the full Anthropic tools list: run_powershell + MCP file tools."""
...
_ANTHROPIC_TOOLS: list[dict[str, Any]] = _build_anthropic_tools()
_CACHED_ANTHROPIC_TOOLS: list[dict[str, Any]] | None = None
def _get_anthropic_tools() -> list[dict]:
"""Return the Anthropic tools list, rebuilding only once per session."""
...
def _gemini_tool_declaration() -> types.Tool | None:
...
def _run_script(script: str, base_dir: str, qa_callback: Optional[Callable[[str], str]] = None) -> str:
...
def _truncate_tool_output(output: str) -> str:
"""Truncate tool output to _history_trunc_limit chars before sending to API."""
...
def _reread_file_items(file_items: list[dict]) -> tuple[list[dict], list[dict]]:
"""
Re-read file_items from disk, but only files whose mtime has changed.
Returns (all_items, changed_items) — all_items is the full refreshed list,
changed_items contains only the files that were actually modified since
the last read (used to build a minimal [FILES UPDATED] block).
"""
...
def _build_file_context_text(file_items: list[dict]) -> str:
"""
Build a compact text summary of all files from file_items, suitable for
injecting into a tool_result message so the AI sees current file contents.
"""
...
_DIFF_LINE_THRESHOLD: int = 200
def _build_file_diff_text(changed_items: list[dict]) -> str:
"""
Build text for changed files. Small files (<= _DIFF_LINE_THRESHOLD lines)
get full content; large files get a unified diff against old_content.
"""
...
def _content_block_to_dict(block: Any) -> dict[str, Any]:
"""
Convert an Anthropic SDK content block object to a plain dict.
This ensures history entries are always JSON-serialisable dicts,
not opaque SDK objects that may fail on re-serialisation.
"""
...
def _ensure_gemini_client() -> None:
...
def _get_gemini_history_list(chat: Any | None) -> list[Any]:
...
def _send_gemini(md_content: str, user_message: str, base_dir: str,
file_items: list[dict[str, Any]] | None = None,
discussion_history: str = "",
pre_tool_callback: Optional[Callable[[str], bool]] = None,
qa_callback: Optional[Callable[[str], str]] = None,
enable_tools: bool = True,
stream_callback: Optional[Callable[[str], None]] = None) -> str:
...
def _send_gemini_cli(md_content: str, user_message: str, base_dir: str,
file_items: list[dict[str, Any]] | None = None,
discussion_history: str = "",
pre_tool_callback: Optional[Callable[[str], bool]] = None,
qa_callback: Optional[Callable[[str], str]] = None,
stream_callback: Optional[Callable[[str], None]] = None) -> str:
...
_CHARS_PER_TOKEN: float = 3.5
# Maximum token budget for the entire prompt (system + tools + messages).
# Anthropic's limit is 200k. We leave headroom for the response + tool schemas.
_ANTHROPIC_MAX_PROMPT_TOKENS: int = 180_000
# Gemini models have a 1M context window but we cap well below to leave headroom.
# If the model reports input tokens exceeding this, we trim old history.
_GEMINI_MAX_INPUT_TOKENS: int = 900_000
# Marker prefix used to identify stale file-refresh injections in history
_FILE_REFRESH_MARKER: str = "[FILES UPDATED"
def _estimate_message_tokens(msg: dict) -> int:
"""
Rough token estimate for a single Anthropic message dict.
Caches the result on the dict as '_est_tokens' so repeated calls
(e.g., from _trim_anthropic_history) don't re-scan unchanged messages.
Call _invalidate_token_estimate() when a message's content is modified.
"""
...
def _invalidate_token_estimate(msg: dict[str, Any]) -> None:
"""Remove the cached token estimate so the next call recalculates."""
...
def _estimate_prompt_tokens(system_blocks: list[dict], history: list[dict]) -> int:
"""Estimate total prompt tokens: system + tools + all history messages."""
...
def _strip_stale_file_refreshes(history: list[dict[str, Any]]) -> None:
"""
Remove [FILES UPDATED ...] text blocks from all history turns EXCEPT
the very last user message. These are stale snapshots from previous
tool rounds that bloat the context without providing value.
"""
...
def _trim_anthropic_history(system_blocks: list[dict[str, Any]], history: list[dict[str, Any]]) -> int:
"""
Trim the Anthropic history to fit within the token budget.
Strategy:
1. Strip stale file-refresh injections from old turns.
2. If still over budget, drop oldest turn pairs (user + assistant).
Returns the number of messages dropped.
"""
...
def _ensure_anthropic_client() -> None:
...
def _chunk_text(text: str, chunk_size: int) -> list[str]:
...
def _build_chunked_context_blocks(md_content: str) -> list[dict]:
"""
Split md_content into <=_ANTHROPIC_CHUNK_SIZE char chunks.
cache_control:ephemeral is placed only on the LAST block so the whole
prefix is cached as one unit.
"""
...
def _strip_cache_controls(history: list[dict[str, Any]]) -> None:
"""
Remove cache_control from all content blocks in message history.
Anthropic allows max 4 cache_control blocks total across system + tools +
messages. We reserve those slots for the stable system/tools prefix and
the current turn's context block, so all older history entries must be clean.
"""
...
def _add_history_cache_breakpoint(history: list[dict[str, Any]]) -> None:
"""
Place cache_control:ephemeral on the last content block of the
second-to-last user message. This uses one of the 4 allowed Anthropic
cache breakpoints to cache the conversation prefix so the full history
isn't reprocessed on every request.
"""
...
def _repair_anthropic_history(history: list[dict[str, Any]]) -> None:
"""
If history ends with an assistant message that contains tool_use blocks
without a following user tool_result message, append a synthetic tool_result
message so the history is valid before the next request.
"""
...
def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_items: list[dict[str, Any]] | None = None, discussion_history: str = "", pre_tool_callback: Optional[Callable[[str], bool]] = None, qa_callback: Optional[Callable[[str], str]] = None, stream_callback: Optional[Callable[[str], None]] = None) -> str:
...: str, user_message: str, base_dir: str,
file_items: list[dict[str, Any]] | None = None,
discussion_history: str = "",
stream: bool = False,
pre_tool_callback: Optional[Callable[[str], bool]] = None,
qa_callback: Optional[Callable[[str], str]] = None,
stream_callback: Optional[Callable[[str], None]] = None) -> str:
"""
Sends a message to the DeepSeek API, handling tool calls and history.
Supports streaming responses.
"""
...
def run_tier4_analysis(stderr: str) -> str:
"""
Stateless Tier 4 QA analysis of an error message.
Uses gemini-2.5-flash-lite to summarize the error and suggest a fix.
"""
...
def get_token_stats(md_content: str) -> dict[str, Any]:
"""
Returns token usage statistics for the given markdown content.
Uses the current provider's count_tokens if available, else estimates.
"""
...
def send(
md_content: str,
user_message: str,
base_dir: str = ".",
file_items: list[dict[str, Any]] | None = None,
discussion_history: str = "",
stream: bool = False,
pre_tool_callback: Optional[Callable[[str], bool]] = None,
qa_callback: Optional[Callable[[str], str]] = None,
enable_tools: bool = True,
stream_callback: Optional[Callable[[str], None]] = None,
) -> str:
"""
Sends a prompt with the full markdown context to the current AI provider.
Returns the final text response.
"""
...
def _add_bleed_derived(d: dict[str, Any], sys_tok: int = 0, tool_tok: int = 0) -> dict[str, Any]:
...
def get_history_bleed_stats(md_content: str | None = None) -> dict[str, Any]:
"""
Calculates how close the current conversation history is to the token limit.
If md_content is provided and no chat session exists, it estimates based on md_content.
"""
...
--- File: api_hook_client.py ---
from __future__ import annotations
import requests
import json
import time
from typing import Any
class ApiHookClient:
def __init__(self, base_url: str = "http://127.0.0.1:8999", max_retries: int = 5, retry_delay: float = 0.2) -> None:
...
def wait_for_server(self, timeout: float = 3) -> bool:
"""
Polls the /status endpoint until the server is ready or timeout is reached.
"""
...
def _make_request(self, method: str, endpoint: str, data: dict | None = None, timeout: float | None = None) -> dict | None:
...
def get_status(self) -> dict:
"""Checks the health of the hook server."""
...
def get_project(self) -> dict | None:
...
def post_project(self, project_data: dict) -> dict | None:
...
def get_session(self) -> dict | None:
...
def get_mma_status(self) -> dict | None:
"""Retrieves current MMA status (track, tickets, tier, etc.)"""
...
def push_event(self, event_type: str, payload: dict) -> dict | None:
"""Pushes an event to the GUI's AsyncEventQueue via the /api/gui endpoint."""
...
def get_performance(self) -> dict | None:
"""Retrieves UI performance metrics."""
...
def post_session(self, session_entries: list) -> dict | None:
...
def post_gui(self, gui_data: dict) -> dict | None:
...
def select_tab(self, tab_bar: str, tab: str) -> dict | None:
"""Tells the GUI to switch to a specific tab in a tab bar."""
...
def select_list_item(self, listbox: str, item_value: str) -> dict | None:
"""Tells the GUI to select an item in a listbox by its value."""
...
def set_value(self, item: str, value: Any) -> dict | None:
"""Sets the value of a GUI item."""
...
def get_value(self, item: str) -> Any:
"""Gets the value of a GUI item via its mapped field."""
...
def get_text_value(self, item_tag: str) -> str | None:
"""Wraps get_value and returns its string representation, or None."""
...
def get_node_status(self, node_tag: str) -> Any:
"""Wraps get_value for a DAG node or queries the diagnostic endpoint for its status."""
...
def click(self, item: str, *args: Any, **kwargs: Any) -> dict | None:
"""Simulates a click on a GUI button or item."""
...
def get_indicator_state(self, tag: str) -> dict:
"""Checks if an indicator is shown using the diagnostics endpoint."""
...
def get_events(self) -> list:
"""Fetches and clears the event queue from the server."""
...
def wait_for_event(self, event_type: str, timeout: float = 5) -> dict | None:
"""Polls for a specific event type."""
...
def wait_for_value(self, item: str, expected: Any, timeout: float = 5) -> bool:
"""Polls until get_value(item) == expected."""
...
def reset_session(self) -> dict | None:
"""Simulates clicking the 'Reset Session' button in the GUI."""
...
def request_confirmation(self, tool_name: str, args: dict) -> Any:
"""Asks the user for confirmation via the GUI (blocking call)."""
...
--- File: api_hooks.py ---
from __future__ import annotations
import json
import threading
import uuid
from http.server import ThreadingHTTPServer, BaseHTTPRequestHandler
from typing import Any
import logging
import session_logger
class HookServerInstance(ThreadingHTTPServer):
"""Custom HTTPServer that carries a reference to the main App instance."""
def __init__(self, server_address: tuple[str, int], RequestHandlerClass: type, app: Any) -> None:
...
class HookHandler(BaseHTTPRequestHandler):
"""Handles incoming HTTP requests for the API hooks."""
def do_GET(self) -> None:
...
--- File: conductor\tests\diag_subagent.py ---
import subprocess
import sys
def run_diag(role: str, prompt: str) -> str:
...
if __name__ == "__main__":
# Test 1: Simple read
print("TEST 1: read_file")
run_diag("tier3-worker", "Read the file 'pyproject.toml' and tell me the version of the project. ONLY the version string.")
print("\nTEST 2: run_shell_command")
run_diag("tier3-worker", "Use run_shell_command to execute 'echo HELLO_SUBAGENT' and return the output. ONLY the output.")
--- File: conductor\tests\test_infrastructure.py ---
import subprocess
def run_ps_script(role: str, prompt: str) -> subprocess.CompletedProcess:
"""Helper to run the run_subagent.ps1 script."""
...
def test_subagent_script_qa_live() -> None:
"""Verify that the QA role works and returns a compressed fix."""
...
def test_subagent_script_worker_live() -> None:
"""Verify that the Worker role works and returns code."""
...
def test_subagent_script_utility_live() -> None:
"""Verify that the Utility role works."""
...
def test_subagent_isolation_live() -> None:
"""Verify that the sub-agent is stateless and does not see the parent's conversation context."""
...
--- File: conductor\tests\test_mma_exec.py ---
import pytest
import os
from pathlib import Path
from unittest.mock import patch, MagicMock
from scripts.mma_exec import create_parser, get_role_documents, execute_agent, get_model_for_role, get_dependencies
def test_parser_role_choices() -> None:
"""Test that the parser accepts valid roles and the prompt argument."""
...
def test_parser_invalid_role() -> None:
"""Test that the parser rejects roles outside the specified choices."""
...
def test_parser_prompt_optional() -> None:
"""Test that the prompt argument is optional if role is provided (or handled in main)."""
...
def test_parser_help() -> None:
"""Test that the help flag works without raising errors (exits with 0)."""
...
def test_get_role_documents() -> None:
"""Test that get_role_documents returns the correct documentation paths for each tier."""
...
def test_get_model_for_role() -> None:
"""Test that get_model_for_role returns the correct model for each role."""
...
def test_execute_agent() -> None:
"""
Test that execute_agent calls subprocess.run with powershell and the correct gemini CLI arguments
including the model specified for the role.
"""
...
def test_get_dependencies(tmp_path: Path) -> None:
...
import re
def test_execute_agent_logging(tmp_path: Path) -> None:
...
def test_execute_agent_tier3_injection(tmp_path: Path) -> None:
...
--- File: conductor\tests\test_mma_skeleton.py ---
import pytest
from scripts.mma_exec import generate_skeleton
def test_generate_skeleton() -> None:
...
if __name__ == "__main__":
pytest.main([__file__])
--- File: conductor_tech_lead.py ---
import json
import ai_client
import mma_prompts
import re
def generate_tickets(track_brief: str, module_skeletons: str) -> list[dict]:
"""
Tier 2 (Tech Lead) call.
Breaks down a Track Brief and module skeletons into discrete Tier 3 Tickets.
"""
...
from dag_engine import TrackDAG
from models import Ticket
def topological_sort(tickets: list[dict]) -> list[dict]:
"""
Sorts a list of tickets based on their 'depends_on' field.
Raises ValueError if a circular dependency or missing internal dependency is detected.
"""
...
if __name__ == "__main__":
# Quick test if run directly
test_brief = "Implement a new feature."
test_skeletons = "class NewFeature: pass"
tickets = generate_tickets(test_brief, test_skeletons)
print(json.dumps(tickets, indent=2))
--- File: cost_tracker.py ---
import re
# Pricing per 1M tokens in USD
MODEL_PRICING = [
(r"gemini-2\.5-flash-lite", {"input_per_mtok": 0.075, "output_per_mtok": 0.30}),
(r"gemini-2\.5-flash", {"input_per_mtok": 0.15, "output_per_mtok": 0.60}),
(r"gemini-3-flash-preview", {"input_per_mtok": 0.15, "output_per_mtok": 0.60}),
(r"gemini-3\.1-pro-preview", {"input_per_mtok": 3.50, "output_per_mtok": 10.50}),
(r"claude-.*-sonnet", {"input_per_mtok": 3.0, "output_per_mtok": 15.0}),
(r"claude-.*-opus", {"input_per_mtok": 15.0, "output_per_mtok": 75.0}),
(r"deepseek-v3", {"input_per_mtok": 0.27, "output_per_mtok": 1.10}),
]
def estimate_cost(model: str, input_tokens: int, output_tokens: int) -> float:
"""
Estimate the cost of a model call based on input and output tokens.
Returns the total cost in USD.
"""
...
--- File: dag_engine.py ---
from typing import List
from models import Ticket
class TrackDAG:
"""
Manages a Directed Acyclic Graph of implementation tickets.
Provides methods for dependency resolution, cycle detection, and topological sorting.
"""
def __init__(self, tickets: List[Ticket]) -> None:
"""
Initializes the TrackDAG with a list of Ticket objects.
Args:
tickets: A list of Ticket instances defining the graph nodes and edges.
"""
...
def cascade_blocks(self) -> None:
"""
Transitively marks `todo` tickets as `blocked` if any dependency is `blocked`.
Runs until stable (handles multi-hop chains: A→B→C where A blocked cascades to B then C).
"""
...
def get_ready_tasks(self) -> List[Ticket]:
"""
Returns a list of tickets that are in 'todo' status and whose dependencies are all 'completed'.
Returns:
A list of Ticket objects ready for execution.
"""
...
def has_cycle(self) -> bool:
"""
Performs a Depth-First Search to detect cycles in the dependency graph.
Returns:
True if a cycle is detected, False otherwise.
"""
...-> None:
"""
Initializes the ExecutionEngine.
Args:
dag: The TrackDAG instance to manage.
auto_queue: If True, ready tasks will automatically move to 'in_progress'.
"""
...
def tick(self) -> List[Ticket]:
"""
Evaluates the DAG and returns a list of tasks that are currently 'ready' for execution.
If auto_queue is enabled, tasks without 'step_mode' will be marked as 'in_progress'.
Returns:
A list of ready Ticket objects.
"""
...
def approve_task(self, task_id: str) -> None:
"""
Manually transitions a task from 'todo' to 'in_progress' if its dependencies are met.
Args:
task_id: The ID of the task to approve.
"""
...
def update_task_status(self, task_id: str, status: str) -> None:
"""
Force-updates the status of a specific task.
Args:
task_id: The ID of the task.
status: The new status string (e.g., 'todo', 'in_progress', 'completed', 'blocked').
"""
...
--- File: events.py ---
"""
Decoupled event emission system for cross-module communication.
"""
import asyncio
from typing import Callable, Any, Dict, List, Tuple
class EventEmitter:
"""
Simple event emitter for decoupled communication between modules.
"""
def __init__(self) -> None:
"""Initializes the EventEmitter with an empty listener map."""
...
def on(self, event_name: str, callback: Callable) -> None:
"""
Registers a callback for a specific event.
Args:
event_name: The name of the event to listen for.
callback: The function to call when the event is emitted.
"""
...
def emit(self, event_name: str, *args: Any, **kwargs: Any) -> None:
"""
Emits an event, calling all registered callbacks.
Args:
event_name: The name of the event to emit.
*args: Positional arguments to pass to callbacks.
**kwargs: Keyword arguments to pass to callbacks.
"""
...
class AsyncEventQueue:
"""
Asynchronous event queue for decoupled communication using asyncio.Queue.
"""
def __init__(self) -> None:
"""Initializes the AsyncEventQueue with an internal asyncio.Queue."""
...
async def put(self, event_name: str, payload: Any = None) -> None:
"""
Puts an event into the queue.
Args:
event_name: The name of the event.
payload: Optional data associated with the event.
"""
...
async def get(self) -> Tuple[str, Any]:
"""
Gets an event from the queue.
Returns:
A tuple containing (event_name, payload).
"""
...
class UserRequestEvent:
"""
Payload for a user request event.
"""
def __init__(self, prompt: str, stable_md: str, file_items: List[Any], disc_text: str, base_dir: str) -> None:
...
def to_dict(self) -> Dict[str, Any]:
...
--- File: file_cache.py ---
# file_cache.py
"""
Stub — the Anthropic Files API path has been removed.
All context is now sent as inline chunked text via _send_anthropic_chunked.
This file is kept so that any stale imports do not break.
"""
from pathlib import Path
from typing import Optional
import tree_sitter
import tree_sitter_python
class ASTParser:
"""
Parser for extracting AST-based views of source code.
Currently supports Python.
"""
def __init__(self, language: str) -> None:
...
def parse(self, code: str) -> tree_sitter.Tree:
"""Parse the given code and return the tree-sitter Tree."""
...
def get_skeleton(self, code: str) -> str:
"""
Returns a skeleton of a Python file (preserving docstrings, stripping function bodies).
"""
...
--- File: gemini.py ---
# gemini.py
from __future__ import annotations
import tomllib
from typing import Any
from google import genai
_client: genai.Client | None = None
_chat: Any = None
def _load_key() -> str:
...
def _ensure_client() -> None:
...
def _ensure_chat() -> None:
...
def send(md_content: str, user_message: str) -> str:
...
def reset_session() -> None:
...
--- File: gemini_cli_adapter.py ---
import subprocess
import json
import os
import time
import session_logger
from typing import Optional, Callable, Any
class GeminiCliAdapter:
"""
Adapter for the Gemini CLI that parses streaming JSON output.
"""
def __init__(self, binary_path: str = "gemini"):
...
def send(self, message: str, safety_settings: list | None = None, system_instruction: str | None = None,
model: str | None = None, stream_callback: Optional[Callable[[str], None]] = None) -> dict[str, Any]:
"""
Sends a message to the Gemini CLI and processes the streaming JSON output.
Uses non-blocking line-by-line reading to allow stream_callback.
"""
...
def count_tokens(self, contents: list[str]) -> int:
"""
Provides a character-based token estimation for the Gemini CLI.
Uses 4 chars/token as a conservative average.
"""
...
--- File: gui_2.py ---
# gui_2.py
from __future__ import annotations
import tomli_w
import threading
import asyncio
import time
import math
import json
import sys
import os
import uuid
import requests
from pathlib import Path
from tkinter import filedialog, Tk
from typing import Optional, Callable, Any
import aggregate
import ai_client
import cost_tracker
from ai_client import ProviderError
import shell_runner
import session_logger
import project_manager
import theme_2 as theme
import tomllib
import events
import numpy as np
import api_hooks
import mcp_client
import orchestrator_pm
from performance_monitor import PerformanceMonitor
from log_registry import LogRegistry
from log_pruner import LogPruner
import conductor_tech_lead
import multi_agent_conductor
from models import Track, Ticket
from file_cache import ASTParser
from fastapi import FastAPI, Depends, HTTPException
from fastapi.security.api_key import APIKeyHeader
from pydantic import BaseModel
from imgui_bundle import imgui, hello_imgui, immapp
CONFIG_PATH: Path = Path("config.toml")
PROVIDERS: list[str] = ["gemini", "anthropic", "gemini_cli", "deepseek"]
COMMS_CLAMP_CHARS: int = 300
def load_config() -> dict[str, Any]:
...
def save_config(config: dict[str, Any]) -> None:
...
def hide_tk_root() -> Tk:
...
def vec4(r: float, g: float, b: float, a: float = 1.0) -> imgui.ImVec4: ...
C_OUT: tuple[float, ...] = vec4(100, 200, 255)
C_IN: tuple[float, ...] = vec4(140, 255, 160)
C_REQ: tuple[float, ...] = vec4(255, 220, 100)
C_RES: tuple[float, ...] = vec4(180, 255, 180)
C_TC: tuple[float, ...] = vec4(255, 180, 80)
C_TR: tuple[float, ...] = vec4(180, 220, 255)
C_TRS: tuple[float, ...] = vec4(200, 180, 255)
C_LBL: tuple[float, ...] = vec4(180, 180, 180)
C_VAL: tuple[float, ...] = vec4(220, 220, 220)
C_KEY: tuple[float, ...] = vec4(140, 200, 255)
C_NUM: tuple[float, ...] = vec4(180, 255, 180)
C_SUB: tuple[float, ...] = vec4(220, 200, 120)
DIR_COLORS: dict[str, tuple[float, ...]] = {"OUT": C_OUT, "IN": C_IN}
KIND_COLORS: dict[str, tuple[float, ...]] = {"request": C_REQ, "response": C_RES, "tool_call": C_TC, "tool_result": C_TR, "tool_result_send": C_TRS}
HEAVY_KEYS: set[str] = {"message", "text", "script", "output", "content"}
DISC_ROLES: list[str] = ["User", "AI", "Vendor API", "System"]
AGENT_TOOL_NAMES: list[str] = [
"run_powershell", "read_file", "list_directory", "search_files", "get_file_summary",
"web_search", "fetch_url", "py_get_skeleton", "py_get_code_outline", "get_file_slice",
"py_get_definition", "py_get_signature", "py_get_class_summary", "py_get_var_declaration",
"get_git_diff", "py_find_usages", "py_get_imports", "py_check_syntax", "py_get_hierarchy",
"py_get_docstring", "get_tree", "get_ui_performance",
# Mutating tools — disabled by default
"set_file_slice", "py_update_definition", "py_set_signature", "py_set_var_declaration",
]
def truncate_entries(entries: list[dict[str, Any]], max_pairs: int) -> list[dict[str, Any]]:
...
def _parse_history_entries(history: list[str], roles: list[str] | None = None) -> list[dict[str, Any]]:
...
class ConfirmDialog:
def __init__(self, script: str, base_dir: str) -> None:
...
def wait(self) -> tuple[bool, str]:
...
class MMAApprovalDialog:
def __init__(self, ticket_id: str, payload: str) -> None:
...
def wait(self) -> tuple[bool, str]:
...
class MMASpawnApprovalDialog:
def __init__(self, ticket_id: str, role: str, prompt: str, context_md: str) -> None:
...
def wait(self) -> dict[str, Any]:
...
class GenerateRequest(BaseModel):
prompt: str
auto_add_history: bool = True
temperature: float | None = None
max_tokens: int | None = None
class ConfirmRequest(BaseModel):
approved: bool
script: Optional[str] = None
class App:
"""The main ImGui interface orchestrator for Manual Slop."""
def __init__(self) -> None:
# Initialize locks first to avoid initialization order issues
...
def _prune_old_logs(self) -> None:
"""Asynchronously prunes old insignificant logs on startup."""
......
@current_model.setter
def current_model(self, value: str) -> None:
...
def _init_ai_and_hooks(self) -> None:
...
def create_api(self) -> FastAPI:
"""Creates and configures the FastAPI application for headless mode."""
...
--- File: log_pruner.py ---
import os
import shutil
from datetime import datetime, timedelta
from log_registry import LogRegistry
class LogPruner:
"""
Handles the automated deletion of old and insignificant session logs.
Ensures that only whitelisted or significant sessions (based on size/content)
are preserved long-term.
"""
def __init__(self, log_registry: LogRegistry, logs_dir: str) -> None:
"""
Initializes the LogPruner.
Args:
log_registry: An instance of LogRegistry to check session data.
logs_dir: The path to the directory containing session sub-directories.
"""
...
def prune(self) -> None:
"""
Prunes old and small session directories from the logs directory.
Deletes session directories that meet the following criteria:
1. The session start time is older than 24 hours (based on data from LogRegistry).
2. The session name is NOT in the whitelist provided by the LogRegistry.
3. The total size of all files within the session directory is less than 2KB (2048 bytes).
"""
...
--- File: log_registry.py ---
from __future__ import annotations
import tomli_w
import tomllib
from datetime import datetime
import os
class LogRegistry:
"""
Manages a persistent registry of session logs using a TOML file.
Tracks session paths, start times, whitelisting status, and metadata.
"""
def __init__(self, registry_path: str) -> None:
"""
Initializes the LogRegistry with a path to the registry file.
Args:
registry_path (str): The file path to the TOML registry.
"""
...
def load_registry(self) -> None:
"""
Loads the registry data from the TOML file into memory.
Handles date/time conversions from TOML-native formats to strings for consistency.
"""
...
def save_registry(self) -> None:
"""
Serializes and saves the current registry data to the TOML file.
Converts internal datetime objects to ISO format strings for compatibility.
"""
...
def register_session(self, session_id: str, path: str, start_time: datetime | str) -> None:
"""
Registers a new session in the registry.
Args:
session_id (str): Unique identifier for the session.
path (str): File path to the session's log directory.
start_time (datetime|str): The timestamp when the session started.
"""
...
def update_session_metadata(self, session_id: str, message_count: int, errors: int, size_kb: int, whitelisted: bool, reason: str) -> None:
"""
Updates metadata fields for an existing session.
Args:
session_id (str): Unique identifier for the session.
message_count (int): Total number of messages in the session.
errors (int): Number of errors identified in logs.
size_kb (int): Total size of the session logs in kilobytes.
whitelisted (bool): Whether the session should be protected from pruning.
reason (str): Explanation for the current whitelisting status.
"""
...
def is_session_whitelisted(self, session_id: str) -> bool:
"""
Checks if a specific session is marked as whitelisted.
Args:
session_id (str): Unique identifier for the session.
Returns:
bool: True if whitelisted, False otherwise.
"""
...
def update_auto_whitelist_status(self, session_id: str) -> None:
"""
Analyzes session logs and updates whitelisting status based on heuristics.
Sessions are automatically whitelisted if they contain error keywords,
have a high message count, or exceed a size threshold.
Args:
session_id (str): Unique identifier for the session to analyze.
"""
...
def get_old_non_whitelisted_sessions(self, cutoff_datetime: datetime) -> list[dict]:
"""
Retrieves a list of sessions that are older than a specific cutoff time
and are not marked as whitelisted.
Args:
cutoff_datetime (datetime): The threshold time for identifying old sessions.
Returns:
list: A list of dictionaries containing session details (id, path, start_time).
"""
...
--- File: mcp_client.py ---
# mcp_client.py
"""
Note(Gemini):
MCP-style file context tools for manual_slop.
Exposes read-only filesystem tools the AI can call to selectively fetch file
content on demand, instead of having everything inlined into the context block.
All access is restricted to paths that are either:
- Explicitly listed in the project's allowed_paths set, OR
- Contained within an allowed base_dir (must resolve to a subpath of it)
This is heavily inspired by Claude's own tooling limits. We enforce safety here
so the AI doesn't wander outside the project workspace.
"""
# mcp_client.py
#MCP-style file context tools for manual_slop.
# Exposes read-only filesystem tools the AI can call to selectively fetch file
# content on demand, instead of having everything inlined into the context block.
# All access is restricted to paths that are either:
# - Explicitly listed in the project's allowed_paths set, OR
# - Contained within an allowed base_dir (must resolve to a subpath of it)
# Tools exposed:
# read_file(path) - return full UTF-8 content of a file
# list_directory(path) - list entries in a directory (names + type)
# search_files(path, pattern) - glob pattern search within an allowed dir
# get_file_summary(path) - return the summarize.py heuristic summary
#
from __future__ import annotations
from pathlib import Path
from typing import Optional, Callable, Any
import os
import summarize
import outline_tool
import urllib.request
import urllib.parse
from html.parser import HTMLParser
import re as _re
# ------------------------------------------------------------------ mutating tools sentinel
# Tools that write or modify files. ai_client checks this set before dispatch
# and routes to pre_tool_callback (GUI approval) if the tool name is present.
MUTATING_TOOLS: frozenset[str] = frozenset({
"set_file_slice",
"py_update_definition",
"py_set_signature",
"py_set_var_declaration",
})
# ------------------------------------------------------------------ state
# Set by configure() before the AI send loop starts.
# allowed_paths : set of resolved absolute Path objects (files or dirs)
# base_dirs : set of resolved absolute Path dirs that act as roots
_allowed_paths: set[Path] = set()
_base_dirs: set[Path] = set()
_primary_base_dir: Path | None = None
# Injected by gui_2.py - returns a dict of performance metrics
perf_monitor_callback: Optional[Callable[[], dict[str, Any]]] = None
def configure(file_items: list[dict[str, Any]], extra_base_dirs: list[str] | None = None) -> None:
"""
Build the allowlist from aggregate file_items.
Called by ai_client before each send so the list reflects the current project.
file_items : list of dicts from aggregate.build_file_items()
extra_base_dirs : additional directory roots to allow traversal of
"""
...
def _is_allowed(path: Path) -> bool:
"""
Return True if `path` is within the allowlist.
A path is allowed if:
- it is explicitly in _allowed_paths, OR
- it is contained within (or equal to) one of the _base_dirs
All paths are resolved (follows symlinks) before comparison to prevent
symlink-based path traversal.
CRITICAL: Blacklisted files (history) are NEVER allowed.
"""
...
def _resolve_and_check(raw_path: str) -> tuple[Path | None, str]:
"""
Resolve raw_path and verify it passes the allowlist check.
Returns (resolved_path, error_string). error_string is empty on success.
"""
...
def read_file(path: str) -> str:
"""Return the UTF-8 content of a file, or an error string."""
...
def list_directory(path: str) -> str:
"""List entries in a directory. Returns a compact text table."""
...
def search_files(path: str, pattern: str) -> str:
"""
Search for files matching a glob pattern within path.
pattern examples: '*.py', '**/*.toml', 'src/**/*.rs'
"""
...
def get_file_summary(path: str) -> str:
"""
Return the heuristic summary for a file (same as the initial context block).
For .py files: imports, classes, methods, functions, constants.
For .toml: table keys. For .md: headings. Others: line count + preview.
"""
...
def py_get_skeleton(path: str) -> str:
"""
Returns a skeleton of a Python file (preserving docstrings, stripping function bodies).
"""
...
def py_get_code_outline(path: str) -> str:
"""
Returns a hierarchical outline of a code file (classes, functions, methods with line ranges).
"""
...
def get_file_slice(path: str, start_line: int, end_line: int) -> str:
"""Return a specific line range from a file."""
...
def set_file_slice(path: str, start_line: int, end_line: int, new_content: str) -> str:
"""Replace a specific line range in a file with new content."""
...
def _get_symbol_node(tree: Any, name: str) -> Any:
"""Helper to find an AST node by name (Class, Function, or Variable). Supports dot notation."""
... name: str) -> str:
"""Returns only the signature part of a function or method (def line until colon)."""
...
def py_set_signature(path: str, name: str, new_signature: str) -> str:
"""Surgically replace only the signature of a function/method."""
...
def py_get_class_summary(path: str, name: str) -> str:
"""Returns a summary of a class: its methods and their signatures."""
...
def py_get_var_declaration(path: str, name: str) -> str:
"""Get the assignment/declaration line(s) for a module-level or class-level variable."""
...
def py_set_var_declaration(path: str, name: str, new_declaration: str) -> str:
"""Surgically replace a variable assignment/declaration."""
...
def get_git_diff(path: str, base_rev: str = "HEAD", head_rev: str = "") -> str:
"""
Returns the git diff for a file or directory.
base_rev: The base revision (default: HEAD)
head_rev: The head revision (optional)
"""
...
def py_find_usages(path: str, name: str) -> str:
"""Finds exact string matches of a symbol in a given file or directory."""
...et_file_slice", "py_update_definition", "py_get_signature", "py_set_signature", "py_get_class_summary", "py_get_var_declaration", "py_set_var_declaration", "py_find_usages", "py_get_imports", "py_check_syntax", "py_get_hierarchy", "py_get_docstring", "get_tree"}
def dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
"""
Dispatch an MCP tool call by name. Returns the result as a string.
"""
...
MCP_TOOL_SPECS: list[dict[str, Any]] = [
{
"name": "read_file",
"description": (
"Read the full UTF-8 content of a file within the allowed project paths. "
"Use get_file_summary first to decide whether you need the full content."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Absolute or relative path to the file to read.",
}
},
"required": ["path"],
},
},
{
"name": "list_directory",
"description": (
"List files and subdirectories within an allowed directory. "
"Shows name, type (file/dir), and size. Use this to explore the project structure."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Absolute path to the directory to list.",
}
},
"required": ["path"],
},
},
{
"name": "search_files",
"description": (
"Search for files matching a glob pattern within an allowed directory. "
"Supports recursive patterns like '**/*.py'. "
"Use this to find files by extension or name pattern."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Absolute path to the directory to search within.",
},
"pattern": {
"type": "string",
"description": "Glob pattern, e.g. '*.py', '**/*.toml', 'src/**/*.rs'.",
},
},
"required": ["path", "pattern"],
},
},
{
"name": "get_file_summary",
"description": (
"Get a compact heuristic summary of a file without reading its full content. "
"For Python: imports, classes, methods, functions, constants. "
"For TOML: table keys. For Markdown: headings. Others: line count + preview. "
"Use this before read_file to decide if you need the full content."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Absolute or relative path to the file to summarise.",
}
},
"required": ["path"],
},
},
{
"name": "py_get_skeleton",
"description": (
"Get a skeleton view of a Python file. "
"This returns all classes and function signatures with their docstrings, "
"but replaces function bodies with '...'. "
"Use this to understand module interfaces without reading the full implementation."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the .py file.",
}
},
"required": ["path"],
},
},
{
"name": "py_get_code_outline",
"description": (
"Get a hierarchical outline of a code file. "
"This returns classes, functions, and methods with their line ranges and brief docstrings. "
"Use this to quickly map out a file's structure before reading specific sections."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the code file (currently supports .py).",
}
},
"required": ["path"],
},
},
{
"name": "get_file_slice",
"description": "Read a specific line range from a file. Useful for reading parts of very large files.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file."
},
"start_line": {
"type": "integer",
"description": "1-based start line number."
},
"end_line": {
"type": "integer",
"description": "1-based end line number (inclusive)."
}
},
"required": ["path", "start_line", "end_line"]
}
},
{
"name": "set_file_slice",
"description": "Replace a specific line range in a file with new content. Surgical edit tool.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file."
},
"start_line": {
"type": "integer",
"description": "1-based start line number."
},
"end_line": {
"type": "integer",
"description": "1-based end line number (inclusive)."
},
"new_content": {
"type": "string",
"description": "New content to insert."
}
},
"required": ["path", "start_line", "end_line", "new_content"]
}
},
{
"name": "py_get_definition",
"description": (
"Get the full source code of a specific class, function, or method definition. "
"This is more efficient than reading the whole file if you know what you're looking for."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the .py file.",
},
"name": {
"type": "string",
"description": "The name of the class or function to retrieve. Use 'ClassName.method_name' for methods.",
}
},
"required": ["path", "name"],
},
},
{
"name": "py_update_definition",
"description": "Surgically replace the definition of a class or function in a Python file using AST to find line ranges.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the .py file."
},
"name": {
"type": "string",
"description": "Name of class/function/method."
},
"new_content": {
"type": "string",
"description": "Complete new source for the definition."
}
},
"required": ["path", "name", "new_content"]
}
},
{
"name": "py_get_signature",
"description": "Get only the signature part of a Python function or method (from def until colon).",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the .py file."
},
"name": {
"type": "string",
"description": "Name of the function/method (e.g. 'ClassName.method_name')."
}
},
"required": ["path", "name"]
}
},
{
"name": "py_set_signature",
"description": "Surgically replace only the signature of a Python function or method.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the .py file."
},
"name": {
"type": "string",
"description": "Name of the function/method."
},
"new_signature": {
"type": "string",
"description": "Complete new signature string (including def and trailing colon)."
}
},
"required": ["path", "name", "new_signature"]
}
},
{
"name": "py_get_class_summary",
"description": "Get a summary of a Python class, listing its docstring and all method signatures.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the .py file."
},
"name": {
"type": "string",
"description": "Name of the class."
}
},
"required": ["path", "name"]
}
},
{
"name": "py_get_var_declaration",
"description": "Get the assignment/declaration line for a variable.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the .py file."
},
"name": {
"type": "string",
"description": "Name of the variable."
}
},
"required": ["path", "name"]
}
},
{
"name": "py_set_var_declaration",
"description": "Surgically replace a variable assignment/declaration.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the .py file."
},
"name": {
"type": "string",
"description": "Name of the variable."
},
"new_declaration": {
"type": "string",
"description": "Complete new assignment/declaration string."
}
},
"required": ["path", "name", "new_declaration"]
}
},
{
"name": "get_git_diff",
"description": (
"Returns the git diff for a file or directory. "
"Use this to review changes efficiently without reading entire files."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file or directory.",
},
"base_rev": {
"type": "string",
"description": "Base revision (e.g. 'HEAD', 'HEAD~1', or a commit hash). Defaults to 'HEAD'.",
},
"head_rev": {
"type": "string",
"description": "Head revision (optional).",
}
},
"required": ["path"],
},
},
{
"name": "web_search",
"description": "Search the web using DuckDuckGo. Returns the top 5 search results with titles, URLs, and snippets. Chain this with fetch_url to read specific pages.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query."
}
},
"required": ["query"]
}
},
{
"name": "fetch_url",
"description": "Fetch the full text content of a URL (stripped of HTML tags). Use this after web_search to read relevant information from the web.",
"parameters": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The full URL to fetch."
}
},
"required": ["url"]
}
},
{
"name": "get_ui_performance",
"description": "Get a snapshot of the current UI performance metrics, including FPS, Frame Time (ms), CPU usage (%), and Input Lag (ms). Use this to diagnose UI slowness or verify that your changes haven't degraded the user experience.",
"parameters": {
"type": "object",
"properties": {}
}
},
{
"name": "py_find_usages",
"description": "Finds exact string matches of a symbol in a given file or directory.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Path to file or directory to search." },
"name": { "type": "string", "description": "The symbol/string to search for." }
},
"required": ["path", "name"]
}
},
{
"name": "py_get_imports",
"description": "Parses a file's AST and returns a strict list of its dependencies.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Path to the .py file." }
},
"required": ["path"]
}
},
{
"name": "py_check_syntax",
"description": "Runs a quick syntax check on a Python file.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Path to the .py file." }
},
"required": ["path"]
}
},
{
"name": "py_get_hierarchy",
"description": "Scans the project to find subclasses of a given class.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Directory path to search in." },
"class_name": { "type": "string", "description": "Name of the base class." }
},
"required": ["path", "class_name"]
}
},
{
"name": "py_get_docstring",
"description": "Extracts the docstring for a specific module, class, or function.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Path to the .py file." },
"name": { "type": "string", "description": "Name of symbol or 'module' for the file docstring." }
},
"required": ["path", "name"]
}
},
{
"name": "get_tree",
"description": "Returns a directory structure up to a max depth.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Directory path." },
"max_depth": { "type": "integer", "description": "Maximum depth to recurse (default 2)." }
},
"required": ["path"]
}
}
]
--- File: mma_prompts.py ---
"""
MMA Structured System Prompts for Tier 1 (PM) and Tier 2 (Tech Lead).
Contains templates and static strings for hierarchical orchestration.
"""
from typing import Dict
# --- Tier 1 (Strategic/Orchestration: PM) ---
TIER1_BASE_SYSTEM: str = """
You are the Tier 1 Orchestrator (Product Manager) for the Manual Slop project.
Your role is high-level strategic planning, architecture enforcement, and cross-module delegation.
You operate strictly on metadata, summaries, and executive-level directives.
NEVER request or attempt to read raw implementation code unless specifically provided in a Macro-Diff.
Maintain a "Godot ECS Flat List format" (JSON array of objects) for structural outputs.
"""
TIER1_EPIC_INIT: str = TIER1_BASE_SYSTEM + """
PATH: Epic Initialization (Project Planning)
GOAL: Break down a massive feature request into discrete Implementation Tracks.
CONSTRAINTS:
- IGNORE all source code, AST skeletons, and previous micro-task histories.
- FOCUS ONLY on the Repository Map and Project Meta-State.
OUTPUT REQUIREMENT:
Return a JSON array of 'Tracks'. Each track object must follow the Godot ECS Flat List format:
[
{
"id": "track_unique_id",
"type": "Track",
"module": "target_module_name",
"persona": "required_tech_lead_persona",
"severity": "Low|Medium|High",
"goal": "Descriptive goal",
"acceptance_criteria": ["criteria_1", "criteria_2"]
},
...
]
"""
TIER1_TRACK_DELEGATION: str = TIER1_BASE_SYSTEM + """
PATH: Track Delegation (Sprint Kickoff)
GOAL: Compile a 'Track Brief' for a Tier 2 Tech Lead.
CONSTRAINTS:
- IGNORE unrelated module docs and original massive user prompt.
- USE AST Skeleton Views (class/function definitions only) for allowed modules.
OUTPUT REQUIREMENT:
Generate a comprehensive 'Track Brief' (JSON or Markdown) which includes:
1. A tailored System Prompt for the Tier 2 Tech Lead.
2. A curated list of files (the "Allowlist") they are authorized to modify.
3. Explicit architectural constraints derived from the Skeleton View.
"""
TIER1_MACRO_MERGE: str = TIER1_BASE_SYSTEM + """
PATH: Macro-Merge & Acceptance Review
GOAL: Review high-severity changes and merge into the project history.
CONSTRAINTS:
- IGNORE Tier 3 trial-and-error histories and Tier 4 error logs.
- FOCUS on the Macro-Diff and the Executive Summary.
OUTPUT REQUIREMENT:
Return "Approved" (commits to memory) OR "Rejected".
If Rejected, provide specific architectural feedback focusing on integration breaks or logic violations.
"""
# --- Tier 2 (Architectural/Tech Lead: Conductor) ---
TIER2_BASE_SYSTEM: str = """
You are the Tier 2 Track Conductor (Tech Lead) for the Manual Slop project.
Your role is module-specific planning, code review, and worker management.
You bridge high-level architecture with code syntax using AST-aware Skeleton Views.
Enforce Interface-Driven Development (IDD) and manage Topological Dependency Graphs.
"""
TIER2_SPRINT_PLANNING: str = TIER2_BASE_SYSTEM + """
PATH: Sprint Planning (Task Delegation)
GOAL: Break down a Track Brief into discrete Tier 3 Tickets.
CONSTRAINTS:
- IGNORE the PM's overarching project-planning logic.
- USE Curated Implementation View (AST-extracted class structures + [HOT] function bodies) for target modules.
- USE Skeleton View (signatures only) for foreign modules.
OUTPUT REQUIREMENT:
Return a JSON array of 'Tickets' in Godot ECS Flat List format.
Include 'depends_on' pointers to construct an execution DAG (Directed Acyclic Graph).
[
{
"id": "ticket_id",
"type": "Ticket",
"goal": "Surgical implementation task",
"target_file": "path/to/file",
"depends_on": ["other_ticket_id"],
"context_requirements": ["list_of_needed_skeletons"]
},
...
]
"""
TIER2_CODE_REVIEW: str = TIER2_BASE_SYSTEM + """
PATH: Code Review (Local Integration)
GOAL: Review Tier 3 diffs and ensure they meet the Ticket's goals.
CONSTRAINTS:
- IGNORE the Contributor's internal trial-and-error chat history.
- FOCUS on the Proposed Diff and Tier 4 (QA) logs.
OUTPUT REQUIREMENT:
Return "Approve" (merges diff) OR "Reject" (sends technical critique back to Tier 3).
"""
TIER2_TRACK_FINALIZATION: str = TIER2_BASE_SYSTEM + """
PATH: Track Finalization (Upward Reporting)
GOAL: Summarize the completed Track for the Tier 1 PM.
CONSTRAINTS:
- IGNORE back-and-forth review cycles.
- FOCUS on the Aggregated Track Diff and Dependency Delta.
OUTPUT REQUIREMENT:
Provide an Executive Summary (~200 words) and the final Macro-Diff.
"""
TIER2_CONTRACT_FIRST: str = TIER2_BASE_SYSTEM + """
PATH: Contract-First Delegation (Stub-and-Resolve)
GOAL: Resolve cross-module dependencies via Interface-Driven Development (IDD).
TASK:
You have detected a dependency on an undefined signature.
EXECUTION PLAN:
1. Define the Interface Contract.
2. Generate a 'Stub Ticket' (signature, types, docstring).
3. Generate a 'Consumer Ticket' (codes against skeleton).
4. Generate an 'Implementation Ticket' (fills logic).
OUTPUT REQUIREMENT:
Return the Ticket set in Godot ECS Flat List format (JSON array).
"""
PROMPTS: Dict[str, str] = {
"tier1_epic_init": TIER1_EPIC_INIT,
"tier1_track_delegation": TIER1_TRACK_DELEGATION,
"tier1_macro_merge": TIER1_MACRO_MERGE,
"tier2_sprint_planning": TIER2_SPRINT_PLANNING,
"tier2_code_review": TIER2_CODE_REVIEW,
"tier2_track_finalization": TIER2_TRACK_FINALIZATION,
"tier2_contract_first": TIER2_CONTRACT_FIRST,
}
--- File: models.py ---
from dataclasses import dataclass, field
from typing import List, Optional, Dict, Any
from datetime import datetime
@dataclass
class Ticket:
"""
Represents a discrete unit of work within a track.
"""
id: str
description: str
status: str
assigned_to: str
target_file: Optional[str] = None
context_requirements: List[str] = field(default_factory=list)
depends_on: List[str] = field(default_factory=list)
blocked_reason: Optional[str] = None
step_mode: bool = False
retry_count: int = 0
def mark_blocked(self, reason: str) -> None:
"""Sets the ticket status to 'blocked' and records the reason."""
...
def mark_complete(self) -> None:
"""Sets the ticket status to 'completed'."""
...
def get(self, key: str, default: Any = None) -> Any:
"""Helper to provide dictionary-like access to dataclass fields."""
...
def to_dict(self) -> Dict[str, Any]:
...
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Ticket":
...
@dataclass
class Track:
"""
Represents a collection of tickets that together form an architectural track or epic.
"""
id: str
description: str
tickets: List[Ticket] = field(default_factory=list)
def get_executable_tickets(self) -> List[Ticket]:
"""
Returns all 'todo' tickets whose dependencies are all 'completed'.
"""
...
@dataclass
class WorkerContext:
"""
Represents the context provided to a Tier 3 Worker for a specific ticket.
"""
ticket_id: str
model_name: str
messages: List[dict]
@dataclass
class Metadata:
id: str
name: str
status: str
created_at: datetime
updated_at: datetime
def to_dict(self) -> Dict[str, Any]:
...
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Metadata":
...
@dataclass
class TrackState:
metadata: Metadata
discussion: List[Dict[str, Any]]
tasks: List[Ticket]
def to_dict(self) -> Dict[str, Any]:
...
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "TrackState":
...
--- File: multi_agent_conductor.py ---
import ai_client
import json
import asyncio
import time
import traceback
from typing import List, Optional, Tuple
from dataclasses import asdict
import events
from models import Ticket, Track, WorkerContext
from file_cache import ASTParser
from pathlib import Path
from dag_engine import TrackDAG, ExecutionEngine
class ConductorEngine:
"""
Orchestrates the execution of tickets within a track.
"""
def __init__(self, track: Track, event_queue: Optional[events.AsyncEventQueue] = None, auto_queue: bool = False) -> None:
...
async def _push_state(self, status: str = "running", active_tier: str = None) -> None:
...
def parse_json_tickets(self, json_str: str) -> None:
"""
Parses a JSON string of ticket definitions (Godot ECS Flat List format)
and populates the Track's ticket list.
"""
...
async def run(self, md_content: str = "") -> None:
"""
Main execution loop using the DAG engine.
Args:
md_content: The full markdown context (history + files) for AI workers.
"""
...
def _queue_put(event_queue: events.AsyncEventQueue, loop: asyncio.AbstractEventLoop, event_name: str, payload) -> None:
"""Thread-safe helper to push an event to the AsyncEventQueue from a worker thread."""
...
def confirm_execution(payload: str, event_queue: events.AsyncEventQueue, ticket_id: str, loop: asyncio.AbstractEventLoop = None) -> bool:
"""
Pushes an approval request to the GUI and waits for response.
"""
...
def confirm_spawn(role: str, prompt: str, context_md: str, event_queue: events.AsyncEventQueue, ticket_id: str, loop: asyncio.AbstractEventLoop = None) -> Tuple[bool, str, str]:
"""
Pushes a spawn approval request to the GUI and waits for response.
Returns (approved, modified_prompt, modified_context)
"""
...
def run_worker_lifecycle(ticket: Ticket, context: WorkerContext, context_files: List[str] | None = None, event_queue: events.AsyncEventQueue | None = None, engine: Optional['ConductorEngine'] = None, md_content: str = "", loop: asyncio.AbstractEventLoop = None) -> None:
"""
Simulates the lifecycle of a single agent working on a ticket.
Calls the AI client and updates the ticket status based on the response.
Args:
ticket: The ticket to process.
context: The worker context.
context_files: List of files to include in the context.
event_queue: Queue for pushing state updates and receiving approvals.
engine: The conductor engine.
md_content: The markdown context (history + files) for AI workers.
loop: The main asyncio event loop (required for thread-safe queue access).
"""
...
--- File: orchestrator_pm.py ---
import json
import ai_client
import mma_prompts
import aggregate
import summarize
from pathlib import Path
CONDUCTOR_PATH: Path = Path("conductor")
def get_track_history_summary() -> str:
"""
Scans conductor/archive/ and conductor/tracks/ to build a summary of past work.
"""
...
def generate_tracks(user_request: str, project_config: dict, file_items: list[dict], history_summary: str = None) -> list[dict]:
"""
Tier 1 (Strategic PM) call.
Analyzes the project state and user request to generate a list of Tracks.
"""
...
if __name__ == "__main__":
# Quick CLI test
import project_manager
proj = project_manager.load_project("manual_slop.toml")
flat = project_manager.flat_config(proj)
file_items = aggregate.build_file_items(Path("."), flat.get("files", {}).get("paths", []))
print("Testing Tier 1 Track Generation...")
history = get_track_history_summary()
tracks = generate_tracks("Implement a basic unit test for the ai_client.py module.", flat, file_items, history_summary=history)
print(json.dumps(tracks, indent=2))
--- File: outline_tool.py ---
import ast
from pathlib import Path
class CodeOutliner:
def __init__(self) -> None:
...
def outline(self, code: str) -> str:
...
--- File: performance_monitor.py ---
from __future__ import annotations
import time
import psutil
import threading
from typing import Any
class PerformanceMonitor:
def __init__(self) -> None:
...
def _monitor_cpu(self) -> None:
...
def start_frame(self) -> None:
...
def record_input_event(self) -> None:
...
def start_component(self, name: str) -> None:
...
def end_component(self, name: str) -> None:
...
def end_frame(self) -> None:
...
def _check_alerts(self) -> None:
...
def get_metrics(self) -> dict[str, Any]:
...
def stop(self) -> None:
...
--- File: project_manager.py ---
# project_manager.py
"""
Note(Gemini):
Handles loading/saving of project .toml configurations.
Also handles serializing the discussion history into the TOML format using a special
@timestamp prefix to preserve the exact sequence of events.
"""
import subprocess
import datetime
import tomllib
import tomli_w
import re
import json
from typing import Any, Optional, TYPE_CHECKING, Union
from pathlib import Path
if TYPE_CHECKING:
from models import TrackState
TS_FMT: str = "%Y-%m-%dT%H:%M:%S"
def now_ts() -> str:
...
def parse_ts(s: str) -> Optional[datetime.datetime]:
...
def entry_to_str(entry: dict[str, Any]) -> str:
"""Serialise a disc entry dict -> stored string."""
...
def str_to_entry(raw: str, roles: list[str]) -> dict[str, Any]:
"""Parse a stored string back to a disc entry dict."""
...
def get_git_commit(git_dir: str) -> str:
...
def get_git_log(git_dir: str, n: int = 5) -> str:
...
def default_discussion() -> dict[str, Any]:
...
def default_project(name: str = "unnamed") -> dict[str, Any]:
...
def get_history_path(project_path: Union[str, Path]) -> Path:
"""Return the Path to the sibling history TOML file for a given project."""
...
def load_project(path: Union[str, Path]) -> dict[str, Any]:
"""
Load a project TOML file.
Automatically migrates legacy 'discussion' keys to a sibling history file.
"""
...
def load_history(project_path: Union[str, Path]) -> dict[str, Any]:
"""Load the segregated discussion history from its dedicated TOML file."""
...
def clean_nones(data: Any) -> Any:
"""Recursively remove None values from a dictionary/list."""
...
def save_project(proj: dict[str, Any], path: Union[str, Path], disc_data: Optional[dict[str, Any]] = None) -> None:
"""
Save the project TOML.
If 'discussion' is present in proj, it is moved to the sibling history file.
"""
...
def migrate_from_legacy_config(cfg: dict[str, Any]) -> dict[str, Any]:
"""Build a fresh project dict from a legacy flat config.toml. Does NOT save."""
...
def flat_config(proj: dict[str, Any], disc_name: Optional[str] = None, track_id: Optional[str] = None) -> dict[str, Any]:
"""Return a flat config dict compatible with aggregate.run()."""
...
def save_track_state(track_id: str, state: 'TrackState', base_dir: Union[str, Path] = ".") -> None:
"""
Saves a TrackState object to conductor/tracks/<track_id>/state.toml.
"""
...
def load_track_state(track_id: str, base_dir: Union[str, Path] = ".") -> Optional['TrackState']:
"""
Loads a TrackState object from conductor/tracks/<track_id>/state.toml.
"""
...
def load_track_history(track_id: str, base_dir: Union[str, Path] = ".") -> list[str]:
"""
Loads the discussion history for a specific track from its state.toml.
Returns a list of entry strings formatted with @timestamp.
"""
...
def save_track_history(track_id: str, history: list[str], base_dir: Union[str, Path] = ".") -> None:
"""
Saves the discussion history for a specific track to its state.toml.
'history' is expected to be a list of formatted strings.
"""
...
def get_all_tracks(base_dir: Union[str, Path] = ".") -> list[dict[str, Any]]:
"""
Scans the conductor/tracks/ directory and returns a list of dictionaries
containing track metadata: 'id', 'title', 'status', 'complete', 'total',
and 'progress' (0.0 to 1.0).
Handles missing or malformed metadata.json or state.toml by falling back
to available info or defaults.
"""
...
--- File: reproduce_missing_hints.py ---
import subprocess
import sys
def test_type_hints() -> None:
...
if __name__ == "__main__":
test_type_hints()
--- File: run_tests.py ---
import argparse
import sys
import tomllib
import pytest
from typing import Dict, List, Any
def load_manifest(path: str) -> Dict[str, Any]:
"""
Loads a manifest file (expected to be in TOML format) from the given path.
Args:
path: The path to the TOML manifest file.
Returns:
A dictionary representing the loaded manifest.
Raises:
FileNotFoundError: If the manifest file does not exist.
tomllib.TOMLDecodeError: If the manifest file is not valid TOML.
"""
...
def get_test_files(manifest: Dict[str, Any], category: str) -> List[str]:
"""
Determines the list of test files based on the manifest and a specified category.
Args:
manifest: The loaded manifest dictionary.
category: The category of tests to retrieve.
Returns:
A list of file paths corresponding to the tests in the given category.
Returns an empty list if the category is not found or has no tests.
"""
...
def main() -> None:
...
if __name__ == "__main__":
main()
--- File: scripts\__init__.py ---
--- File: scripts\ai_style_formatter.py ---
from __future__ import annotations
import tokenize
import io
import os
import sys
def format_code(source: str) -> str:
"""
Formats Python code to use exactly 1 space for indentation (including continuations),
max 1 blank line between top-level definitions, and 0 blank lines inside
function/method bodies.
"""
...
def process_file(file_path: str, write: bool) -> None:
...
def main() -> None:
...
if __name__ == "__main__":
main()
--- File: scripts\apply_type_hints.py ---
"""
Type hint applicator for gui_2.py.
Does a single-pass AST-guided line edit to add type annotations.
No dependency on mcp_client — operates directly on file lines.
Run: uv run python scripts/apply_type_hints.py
"""
import ast
import re
import sys
import os
from typing import Any
BASE: str = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
stats: dict[str, Any] = {"auto_none": 0, "manual_sig": 0, "vars": 0, "errors": []}
def abs_path(filename: str) -> str:
...
def has_value_return(node: ast.AST) -> bool:
"""Check if function has any 'return <expr>' (not bare return or return None)."""
...
def collect_auto_none(tree: ast.Module) -> list[tuple[str, ast.AST]]:
"""Collect functions that can safely get -> None annotation."""
...ation replacements.
var_replacements: list of (regex_pattern_for_old_decl, replacement_decl)
"""
...
def verify_syntax(filepath: str) -> str:
...
GUI2_MANUAL_SIGS: list[tuple[str, str]] = [
(r'def resolve_pending_action\(self, action_id: str, approved: bool\):',
r'def resolve_pending_action(self, action_id: str, approved: bool) -> bool:'),
(r'def _cb_start_track\(self, user_data=None\):',
r'def _cb_start_track(self, user_data: Any = None) -> None:'),
(r'def _start_track_logic\(self, track_data\):',
r'def _start_track_logic(self, track_data: dict[str, Any]) -> None:'),
(r'def _cb_ticket_retry\(self, ticket_id\):',
r'def _cb_ticket_retry(self, ticket_id: str) -> None:'),
(r'def _cb_ticket_skip\(self, ticket_id\):',
r'def _cb_ticket_skip(self, ticket_id: str) -> None:'),
(r'def _render_ticket_dag_node\(self, ticket, tickets_by_id, children_map, rendered\):',
r'def _render_ticket_dag_node(self, ticket: Ticket, tickets_by_id: dict[str, Ticket], children_map: dict[str, list[str]], rendered: set[str]) -> None:'),
]
# ============================================================
# gui_2.py variable type annotations
# ============================================================
GUI2_VAR_REPLACEMENTS: list[tuple[str, str]] = [
(r'^CONFIG_PATH = ', 'CONFIG_PATH: Path = '),
(r'^PROVIDERS = ', 'PROVIDERS: list[str] = '),
(r'^COMMS_CLAMP_CHARS = ', 'COMMS_CLAMP_CHARS: int = '),
(r'^C_OUT = ', 'C_OUT: tuple[float, ...] = '),
(r'^C_IN = ', 'C_IN: tuple[float, ...] = '),
(r'^C_REQ = ', 'C_REQ: tuple[float, ...] = '),
(r'^C_RES = ', 'C_RES: tuple[float, ...] = '),
(r'^C_TC = ', 'C_TC: tuple[float, ...] = '),
(r'^C_TR = ', 'C_TR: tuple[float, ...] = '),
(r'^C_TRS = ', 'C_TRS: tuple[float, ...] = '),
(r'^C_LBL = ', 'C_LBL: tuple[float, ...] = '),
(r'^C_VAL = ', 'C_VAL: tuple[float, ...] = '),
(r'^C_KEY = ', 'C_KEY: tuple[float, ...] = '),
(r'^C_NUM = ', 'C_NUM: tuple[float, ...] = '),
(r'^C_SUB = ', 'C_SUB: tuple[float, ...] = '),
(r'^DIR_COLORS = ', 'DIR_COLORS: dict[str, tuple[float, ...]] = '),
(r'^KIND_COLORS = ', 'KIND_COLORS: dict[str, tuple[float, ...]] = '),
(r'^HEAVY_KEYS = ', 'HEAVY_KEYS: set[str] = '),
(r'^DISC_ROLES = ', 'DISC_ROLES: list[str] = '),
(r'^AGENT_TOOL_NAMES = ', 'AGENT_TOOL_NAMES: list[str] = '),
]
if __name__ == "__main__":
print("=== Phase A: Auto-apply -> None (single-pass AST) ===")
n = apply_return_none_single_pass("gui_2.py")
stats["auto_none"] += n
print(f" gui_2.py: {n} applied")
# Verify syntax after Phase A
r = verify_syntax("gui_2.py")
if "Error" in r:
print(f" ABORT: {r}")
sys.exit(1)
print(" Syntax OK after Phase A")
print("\n=== Phase B: Manual signatures (regex) ===")
n = apply_manual_sigs("gui_2.py", GUI2_MANUAL_SIGS)
stats["manual_sig"] += n
print(f" gui_2.py: {n} applied")
# Verify syntax after Phase B
r = verify_syntax("gui_2.py")
if "Error" in r:
print(f" ABORT: {r}")
sys.exit(1)
print(" Syntax OK after Phase B")
print("\n=== Phase C: Variable annotations (regex) ===")
# Use re.MULTILINE so ^ matches line starts
def apply_var_replacements_m(filepath, replacements):
...
n = apply_var_replacements_m("gui_2.py", GUI2_VAR_REPLACEMENTS)
stats["vars"] += n
print(f" gui_2.py: {n} applied")
print("\n=== Final Syntax Verification ===")
r = verify_syntax("gui_2.py")
print(f" gui_2.py: {r}")
all_ok = "Error" not in r
print("\n=== Summary ===")
print(f" Auto -> None: {stats['auto_none']}")
print(f" Manual sigs: {stats['manual_sig']}")
print(f" Variables: {stats['vars']}")
print(f" Errors: {len(stats['errors'])}")
if stats['errors']:
print("\n=== Errors ===")
for e in stats['errors']:
print(f" {e}")
if all_ok:
print("\nAll files pass syntax check.")
else:
print("\nSYNTAX ERRORS DETECTED — review and fix before committing.")
--- File: scripts\check_hints.py ---
files = ['ai_client.py', 'aggregate.py', 'mcp_client.py', 'shell_runner.py']
for file_path in files:
print(f"Checking {file_path}...")
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.strip().startswith('def '):
if '->' not in line:
# Check next line if it's a multiline def
if '):' not in line:
full_def = line
j = i + 1
while j < len(lines) and '):' not in lines[j-1]:
full_def += lines[j]
j += 1
if '->' not in full_def:
print(f" Missing hint at line {i+1}: {line.strip()}")
else:
print(f" Missing hint at line {i+1}: {line.strip()}")
--- File: scripts\check_hints_v2.py ---
import re
files = ['ai_client.py', 'aggregate.py', 'mcp_client.py', 'shell_runner.py']
for file_path in files:
print(f"Checking {file_path}...")
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# Find all function definitions
# This regex is simplified and might miss some edge cases (like multi-line defs)
# But it's better than nothing.
defs = re.finditer(r'def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\((.*?)\)\s*(->\s*.*?)?:', content, re.DOTALL)
for m in defs:
name = m.group(1)
args = m.group(2).strip()
ret = m.group(3)
if not ret:
print(f" Missing return type: {name}({args})")
# Check arguments
if args:
arg_list = [a.strip() for a in args.split(',')]
for arg in arg_list:
if not arg or arg == 'self' or arg == 'cls':
continue
if ':' not in arg and '=' not in arg:
print(f" Missing arg type: {name} -> {arg}")
elif ':' not in arg and '=' in arg:
# arg=val (missing type)
print(f" Missing arg type: {name} -> {arg}")
--- File: scripts\claude_mma_exec.py ---
import argparse
import subprocess
import os
import ast
import datetime
import re
import tomllib
import tree_sitter
import tree_sitter_python
LOG_FILE: str = 'logs/claude_mma_delegation.log'
MODEL_MAP: dict[str, str] = {
'tier1-orchestrator': 'claude-opus-4-6',
'tier1': 'claude-opus-4-6',
'tier2-tech-lead': 'claude-sonnet-4-6',
'tier2': 'claude-sonnet-4-6',
'tier3-worker': 'claude-sonnet-4-6',
'tier3': 'claude-sonnet-4-6',
'tier4-qa': 'claude-haiku-4-5',
'tier4': 'claude-haiku-4-5',
}
def generate_skeleton(code: str) -> str:
"""
Parses Python code and replaces function/method bodies with '...',
preserving docstrings if present.
"""
...
--- File: scripts\claude_tool_bridge.py ---
import sys
import json
import logging
import os
# Add project root to sys.path so we can import api_hook_client
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if project_root not in sys.path:
sys.path.append(project_root)
try:
from api_hook_client import ApiHookClient
except ImportError:
print("FATAL: Failed to import ApiHookClient. Ensure it's in the Python path.", file=sys.stderr)
sys.exit(1)
def main() -> None:
...
if __name__ == "__main__":
main()
--- File: scripts\cli_tool_bridge.py ---
import sys
import json
import logging
import os
# Add project root to sys.path so we can import api_hook_client
# This helps in cases where the script is run from different directories
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if project_root not in sys.path:
sys.path.append(project_root)
try:
from api_hook_client import ApiHookClient
except ImportError:
# Fallback if the script is run from the project root directly,
# or if the above path append didn't work for some reason.
try:
from api_hook_client import ApiHookClient
except ImportError:
# Use basic print for fatal errors if logging isn't set up yet
print("FATAL: Failed to import ApiHookClient. Ensure it's in the Python path.", file=sys.stderr)
sys.exit(1) # Exit if the core dependency cannot be imported
def main():
# Setup basic logging to stderr.
# Set level to DEBUG to capture all messages, including debug info.
...
if __name__ == "__main__":
main()
--- File: scripts\inject_tools.py ---
import re
with open('mcp_client.py', 'r', encoding='utf-8') as f:
content: str = f.read()
# 1. Add import os if not there
if 'import os' not in content:
content: str = content.replace('import summarize', 'import os\nimport summarize')
# 2. Add the functions before "# ------------------------------------------------------------------ web tools"
functions_code: str = r'''
def py_find_usages(path: str, name: str) -> str:
"""Finds exact string matches of a symbol in a given file or directory."""
p, err = _resolve_and_check(path)
if err: return err
try:
import re
pattern = re.compile(r"\b" + re.escape(name) + r"\b")
results = []
def _search_file(fp):
if fp.name == "history.toml" or fp.name.endswith("_history.toml"): return
if not _is_allowed(fp): return
try:
text = fp.read_text(encoding="utf-8")
lines = text.splitlines()
for i, line in enumerate(lines, 1):
if pattern.search(line):
rel = fp.relative_to(_primary_base_dir if _primary_base_dir else Path.cwd())
results.append(f"{rel}:{i}: {line.strip()[:100]}")
except Exception:
pass
if p.is_file():
_search_file(p)
else:
for root, dirs, files in os.walk(p):
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in ('__pycache__', 'venv', 'env')]
for file in files:
if file.endswith(('.py', '.md', '.toml', '.txt', '.json')):
_search_file(Path(root) / file)
if not results:
return f"No usages found for '{name}' in {p}"
if len(results) > 100:
return "\n".join(results[:100]) + f"\n... (and {len(results)-100} more)"
return "\n".join(results)
except Exception as e:
return f"ERROR finding usages for '{name}': {e}"
def py_get_imports(path: str) -> str:
"""Parses a file's AST and returns a strict list of its dependencies."""
p, err = _resolve_and_check(path)
if err: return err
if not p.is_file() or p.suffix != ".py": return f"ERROR: not a python file: {path}"
try:
import ast
code = p.read_text(encoding="utf-8")
tree = ast.parse(code)
imports = []
for node in tree.body:
if isinstance(node, ast.Import):
for alias in node.names:
imports.append(alias.name)
elif isinstance(node, ast.ImportFrom):
module = node.module or ""
for alias in node.names:
imports.append(f"{module}.{alias.name}" if module else alias.name)
if not imports: return "No imports found."
return "Imports:\n" + "\n".join(f" - {i}" for i in imports)
except Exception as e:
return f"ERROR getting imports for '{path}': {e}"
def py_check_syntax(path: str) -> str:
"""Runs a quick syntax check on a Python file."""
p, err = _resolve_and_check(path)
if err: return err
if not p.is_file() or p.suffix != ".py": return f"ERROR: not a python file: {path}"
try:
import ast
code = p.read_text(encoding="utf-8")
ast.parse(code)
return f"Syntax OK: {path}"
except SyntaxError as e:
return f"SyntaxError in {path} at line {e.lineno}, offset {e.offset}: {e.msg}\n{e.text}"
except Exception as e:
return f"ERROR checking syntax for '{path}': {e}"
def py_get_hierarchy(path: str, class_name: str) -> str:
"""Scans the project to find subclasses of a given class."""
p, err = _resolve_and_check(path)
if err: return err
import ast
subclasses = []
def _search_file(fp):
if not _is_allowed(fp): return
try:
code = fp.read_text(encoding="utf-8")
tree = ast.parse(code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
for base in node.bases:
if isinstance(base, ast.Name) and base.id == class_name:
subclasses.append(f"{fp.name}: class {node.name}({class_name})")
elif isinstance(base, ast.Attribute) and base.attr == class_name:
subclasses.append(f"{fp.name}: class {node.name}({base.value.id}.{class_name})")
except Exception:
pass
try:
if p.is_file():
_search_file(p)
else:
for root, dirs, files in os.walk(p):
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in ('__pycache__', 'venv', 'env')]
for file in files:
if file.endswith('.py'):
_search_file(Path(root) / file)
if not subclasses:
return f"No subclasses of '{class_name}' found in {p}"
return f"Subclasses of '{class_name}':\n" + "\n".join(f" - {s}" for s in subclasses)
except Exception as e:
return f"ERROR finding subclasses of '{class_name}': {e}"
def py_get_docstring(path: str, name: str) -> str:
"""Extracts the docstring for a specific module, class, or function."""
p, err = _resolve_and_check(path)
if err: return err
if not p.is_file() or p.suffix != ".py": return f"ERROR: not a python file: {path}"
try:
import ast
code = p.read_text(encoding="utf-8")
tree = ast.parse(code)
if not name or name == "module":
doc = ast.get_docstring(tree)
return doc if doc else "No module docstring found."
node = _get_symbol_node(tree, name)
if not node: return f"ERROR: could not find symbol '{name}' in {path}"
doc = ast.get_docstring(node)
return doc if doc else f"No docstring found for '{name}'."
except Exception as e:
return f"ERROR getting docstring for '{name}': {e}"
def get_tree(path: str, max_depth: int = 2) -> str:
"""Returns a directory structure up to a max depth."""
p, err = _resolve_and_check(path)
if err: return err
if not p.is_dir(): return f"ERROR: not a directory: {path}"
try:
max_depth = int(max_depth)
def _build_tree(dir_path, current_depth, prefix=""):
if current_depth > max_depth: return []
lines = []
try:
entries = sorted(dir_path.iterdir(), key=lambda e: (e.is_file(), e.name.lower()))
except PermissionError:
return []
# Filter
entries = [e for e in entries if not e.name.startswith('.') and e.name not in ('__pycache__', 'venv', 'env') and e.name != "history.toml" and not e.name.endswith("_history.toml")]
for i, entry in enumerate(entries):
is_last = (i == len(entries) - 1)
connector = "└── " if is_last else "├── "
lines.append(f"{prefix}{connector}{entry.name}")
if entry.is_dir():
extension = " " if is_last else "│ "
lines.extend(_build_tree(entry, current_depth + 1, prefix + extension))
return lines
tree_lines = [f"{p.name}/"] + _build_tree(p, 1)
return "\n".join(tree_lines)
except Exception as e:
return f"ERROR generating tree for '{path}': {e}"
# ------------------------------------------------------------------ web tools'''
content: str = content.replace('# ------------------------------------------------------------------ web tools', functions_code)
# 3. Update TOOL_NAMES
old_tool_names_match: re.Match | None = re.search(r'TOOL_NAMES\s*=\s*\{([^}]*)\}', content)
if old_tool_names_match:
old_names: str = old_tool_names_match.group(1)
new_names: str = old_names + ', "py_find_usages", "py_get_imports", "py_check_syntax", "py_get_hierarchy", "py_get_docstring", "get_tree"'
content: str = content.replace(old_tool_names_match.group(0), f'TOOL_NAMES = {{{new_names}}}')
# 4. Update dispatch
dispatch_additions: str = r'''
if tool_name == "py_find_usages":
return py_find_usages(tool_input.get("path", ""), tool_input.get("name", ""))
if tool_name == "py_get_imports":
return py_get_imports(tool_input.get("path", ""))
if tool_name == "py_check_syntax":
return py_check_syntax(tool_input.get("path", ""))
if tool_name == "py_get_hierarchy":
return py_get_hierarchy(tool_input.get("path", ""), tool_input.get("class_name", ""))
if tool_name == "py_get_docstring":
return py_get_docstring(tool_input.get("path", ""), tool_input.get("name", ""))
if tool_name == "get_tree":
return get_tree(tool_input.get("path", ""), tool_input.get("max_depth", 2))
return f"ERROR: unknown MCP tool '{tool_name}'"
'''
content: str = re.sub(
r' return f"ERROR: unknown MCP tool \'{tool_name}\'"', dispatch_additions.strip(), content)
# 5. Update MCP_TOOL_SPECS
mcp_tool_specs_addition: str = r'''
{
"name": "py_find_usages",
"description": "Finds exact string matches of a symbol in a given file or directory.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Path to file or directory to search." },
"name": { "type": "string", "description": "The symbol/string to search for." }
},
"required": ["path", "name"]
}
},
{
"name": "py_get_imports",
"description": "Parses a file's AST and returns a strict list of its dependencies.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Path to the .py file." }
},
"required": ["path"]
}
},
{
"name": "py_check_syntax",
"description": "Runs a quick syntax check on a Python file.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Path to the .py file." }
},
"required": ["path"]
}
},
{
"name": "py_get_hierarchy",
"description": "Scans the project to find subclasses of a given class.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Directory path to search in." },
"class_name": { "type": "string", "description": "Name of the base class." }
},
"required": ["path", "class_name"]
}
},
{
"name": "py_get_docstring",
"description": "Extracts the docstring for a specific module, class, or function.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Path to the .py file." },
"name": { "type": "string", "description": "Name of symbol or 'module' for the file docstring." }
},
"required": ["path", "name"]
}
},
{
"name": "get_tree",
"description": "Returns a directory structure up to a max depth.",
"parameters": {
"type": "object",
"properties": {
"path": { "type": "string", "description": "Directory path." },
"max_depth": { "type": "integer", "description": "Maximum depth to recurse (default 2)." }
},
"required": ["path"]
}
}
]
'''
content: str = re.sub(
r'\]\s*$', mcp_tool_specs_addition.strip(), content)
with open('mcp_client.py', 'w', encoding='utf-8') as f:
f.write(content)
print("Injected new tools.")
--- File: scripts\mcp_server.py ---
"""
MCP server exposing Manual Slop's custom tools (mcp_client.py) to Claude Code.
All 26 tools from mcp_client.MCP_TOOL_SPECS are served, plus run_powershell.
Delegates to mcp_client.dispatch() for all tools except run_powershell,
which routes through shell_runner.run_powershell() directly.
Usage (in .claude/settings.json mcpServers):
"command": "uv", "args": ["run", "python", "scripts/mcp_server.py"]
"""
import asyncio
import os
import sys
# Add project root to sys.path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import mcp_client
import shell_runner
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import Tool, TextContent
# run_powershell is handled by shell_runner, not mcp_client.dispatch()
# Define its spec here since it's not in MCP_TOOL_SPECS
RUN_POWERSHELL_SPEC = {
"name": "run_powershell",
"description": (
"Run a PowerShell script within the project base directory. "
"Returns combined stdout, stderr, and exit code. "
"60-second timeout. Use for builds, tests, and system commands."
),
"parameters": {
"type": "object",
"properties": {
"script": {
"type": "string",
"description": "PowerShell script content to execute."
}
},
"required": ["script"]
}
}
server = Server("manual-slop-tools")
@server.list_tools()
async def list_tools() -> list[Tool]:
...
@server.call_tool()
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
...
async def main() -> None:
# Configure mcp_client with the project root so py_* tools are not ACCESS DENIED
...
if __name__ == "__main__":
asyncio.run(main())
--- File: scripts\mma_exec.py ---
import argparse
import subprocess
import json
import os
import tomllib
import tree_sitter
import tree_sitter_python
import ast
import datetime
LOG_FILE: str = 'logs/errors/mma_delegation.log'
def generate_skeleton(code: str) -> str:
"""
Parses Python code and replaces function/method bodies with '...',
preserving docstrings if present.
"""
...
--- File: scripts\scan_all_hints.py ---
"""Scan all .py files for missing type hints. Writes scan_report.txt."""
import ast
import os
SKIP: set[str] = {'.git', '__pycache__', '.venv', 'venv', 'node_modules', '.claude', '.gemini'}
BASE: str = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
os.chdir(BASE)
results: dict[str, tuple[int, int, int, int]] = {}
for root, dirs, files in os.walk('.'):
dirs[:] = [d for d in dirs if d not in SKIP]
for f in files:
if not f.endswith('.py'):
continue
path: str = os.path.join(root, f).replace('\\', '/')
try:
with open(path, 'r', encoding='utf-8-sig') as fh:
tree = ast.parse(fh.read())
except Exception:
continue
counts: list[int] = [0, 0, 0] # nr, up, uv
def scan(scope: ast.AST, prefix: str = '') -> None:
# Iterate top-level nodes in this scope
...
scan(tree)
nr, up, uv = counts
total: int = nr + up + uv
if total > 0:
results[path] = (nr, up, uv, total)
lines: list[str] = []
lines.append(f'Files with untyped items: {len(results)}')
lines.append('')
lines.append(f'{"File":<58} {"NoRet":>6} {"Params":>7} {"Vars":>5} {"Total":>6}')
lines.append('-' * 85)
gt: int = 0
for path in sorted(results, key=lambda x: results[x][3], reverse=True):
nr, up, uv, t = results[path]
lines.append(f'{path:<58} {nr:>6} {up:>7} {uv:>5} {t:>6}')
gt += t
lines.append('-' * 85)
lines.append(f'{"TOTAL":<58} {"":>6} {"":>7} {"":>5} {gt:>6}')
report: str = '\n'.join(lines)
with open('scan_report.txt', 'w', encoding='utf-8') as f:
f.write(report)
--- File: scripts\slice_tools.py ---
import sys
import ast
def get_slice(filepath: str, start_line: int | str, end_line: int | str) -> str:
...
def set_slice(filepath: str, start_line: int | str, end_line: int | str, new_content: str) -> None:
...
def get_def(filepath: str, symbol_name: str) -> str:
...
def set_def(filepath: str, symbol_name: str, new_content: str) -> None:
...
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python slice_tools.py <command> [args...]")
sys.exit(1)
cmd = sys.argv[1]
if cmd == "get_slice":
print(get_slice(sys.argv[2], sys.argv[3], sys.argv[4]), end="")
elif cmd == "set_slice":
with open(sys.argv[5], 'r', encoding='utf-8') as f:
new_content = f.read()
set_slice(sys.argv[2], sys.argv[3], sys.argv[4], new_content)
elif cmd == "get_def":
print(get_def(sys.argv[2], sys.argv[3]), end="")
elif cmd == "set_def":
with open(sys.argv[4], 'r', encoding='utf-8') as f:
new_content = f.read()
set_def(sys.argv[2], sys.argv[3], new_content)
--- File: scripts\temp_def.py ---
def format_code(source: str) -> str:
"""
Formats Python code to use exactly 1 space for indentation (including continuations),
max 1 blank line between top-level definitions, and 0 blank lines inside
function/method bodies.
Args:
source: The Python source code to format.
Returns:
The formatted source code.
"""
...
--- File: scripts\tool_call.py ---
import sys
import json
import os
import io
# Force UTF-8 for stdout/stderr to avoid encoding issues on Windows
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
# Add project root to sys.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
try:
import mcp_client
import shell_runner
except ImportError:
print(json.dumps({"error": "Failed to import required modules"}))
sys.exit(1)
def main() -> None:
...
if __name__ == "__main__":
main()
--- File: scripts\tool_discovery.py ---
import json
import sys
import os
# Add project root to sys.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
try:
import mcp_client
except ImportError as e:
# Print the error to stderr to diagnose
print(f"ImportError in discovery: {e}", file=sys.stderr)
print("[]")
sys.exit(0)
def main() -> None:
...
if __name__ == "__main__":
main()
--- File: scripts\type_hint_scanner.py ---
import ast
import sys
def get_missing_hints(file_path: str):
...
if __name__ == "__main__":
if len(sys.argv) > 1:
for m in get_missing_hints(sys.argv[1]):
print(f"Line {m['lineno']}: {m['name']}")
--- File: session_logger.py ---
# session_logger.py
"""
Opens timestamped log/script files at startup and keeps them open for the
lifetime of the process. The next run of the GUI creates new files; the
previous run's files are simply closed when the process exits.
File layout
-----------
logs/sessions/
comms_<ts>.log - every comms entry (direction/kind/payload) as JSON-L
toolcalls_<ts>.log - sequential record of every tool invocation
clicalls_<ts>.log - sequential record of every CLI subprocess call
scripts/generated/
<ts>_<seq:04d>.ps1 - each PowerShell script the AI generated, in order
Where <ts> = YYYYMMDD_HHMMSS of when this session was started.
"""
import atexit
import datetime
import json
import threading
from typing import Any, Optional, TextIO
from pathlib import Path
_LOG_DIR: Path = Path("./logs/sessions")
_SCRIPTS_DIR: Path = Path("./scripts/generated")
_ts: str = "" # session timestamp string e.g. "20260301_142233"
_session_id: str = "" # YYYYMMDD_HHMMSS[_Label]
_session_dir: Optional[Path] = None # Path to the sub-directory for this session
_seq: int = 0 # monotonic counter for script files this session
_seq_lock: threading.Lock = threading.Lock()
_comms_fh: Optional[TextIO] = None # file handle: logs/sessions/<session_id>/comms.log
_tool_fh: Optional[TextIO] = None # file handle: logs/sessions/<session_id>/toolcalls.log
_api_fh: Optional[TextIO] = None # file handle: logs/sessions/<session_id>/apihooks.log
_cli_fh: Optional[TextIO] = None # file handle: logs/sessions/<session_id>/clicalls.log
def _now_ts() -> str:
...
def open_session(label: Optional[str] = None) -> None:
"""
Called once at GUI startup. Creates the log directories if needed and
opens the log files for this session within a sub-directory.
"""
...
def close_session() -> None:
"""Flush and close all log files. Called on clean exit."""
...
def log_api_hook(method: str, path: str, payload: str) -> None:
"""Log an API hook invocation."""
...
def log_comms(entry: dict[str, Any]) -> None:
"""
Append one comms entry to the comms log file as a JSON-L line.
Thread-safe (GIL + line-buffered file).
"""
...
def log_tool_call(script: str, result: str, script_path: Optional[str]) -> Optional[str]:
"""
Append a tool-call record to the toolcalls log and write the PS1 script to
scripts/generated/. Returns the path of the written script file.
"""
...
def log_cli_call(command: str, stdin_content: Optional[str], stdout_content: Optional[str], stderr_content: Optional[str], latency: float) -> None:
"""Log details of a CLI subprocess execution."""
...
--- File: shell_runner.py ---
# shell_runner.py
import os
import subprocess
import shutil
from pathlib import Path
from typing import Callable, Optional
try:
import tomllib
except ImportError:
import tomli as tomllib # type: ignore[no-redef]
TIMEOUT_SECONDS: int = 60
_ENV_CONFIG: dict = {}
def _load_env_config() -> dict:
"""Load mcp_env.toml from project root (sibling of this file or parent dir)."""
...
def _build_subprocess_env() -> dict[str, str]:
"""Build env dict for subprocess: current env + mcp_env.toml overrides."""
...
def run_powershell(script: str, base_dir: str, qa_callback: Optional[Callable[[str], str]] = None) -> str:
"""
Run a PowerShell script with working directory set to base_dir.
Returns a string combining stdout, stderr, and exit code.
Environment is configured via mcp_env.toml (project root).
If qa_callback is provided and the command fails or has stderr,
the callback is called with the stderr content and its result is appended.
"""
...
--- File: simulation\live_walkthrough.py ---
import os
import time
from api_hook_client import ApiHookClient
from simulation.workflow_sim import WorkflowSimulator
def main() -> None:
...
if __name__ == "__main__":
main()
--- File: simulation\ping_pong.py ---
import sys
import os
import time
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
from simulation.user_agent import UserSimAgent
def main():
...
if __name__ == "__main__":
main()
--- File: simulation\sim_ai_settings.py ---
import time
from simulation.sim_base import BaseSimulation, run_sim
class AISettingsSimulation(BaseSimulation):
def run(self) -> None:
...
if __name__ == "__main__":
run_sim(AISettingsSimulation)
--- File: simulation\sim_base.py ---
import sys
import os
import time
from typing import Any, Optional
from api_hook_client import ApiHookClient
from simulation.workflow_sim import WorkflowSimulator
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
class BaseSimulation:
def __init__(self, client: ApiHookClient = None) -> None:
...
def setup(self, project_name: str = "SimProject") -> None:
...
def teardown(self) -> None:
...
def get_value(self, tag: str) -> Any:
...
def wait_for_event(self, event_type: str, timeout: int = 5) -> Optional[dict]:
...
def assert_panel_visible(self, panel_tag: str, msg: str = None) -> None:
...
def wait_for_element(self, tag: str, timeout: int = 2) -> bool:
...
def run_sim(sim_class: type) -> None:
"""Helper to run a simulation class standalone."""
...
--- File: simulation\sim_context.py ---
import os
import time
from simulation.sim_base import BaseSimulation, run_sim
class ContextSimulation(BaseSimulation):
def run(self) -> None:
...
if __name__ == "__main__":
run_sim(ContextSimulation)
--- File: simulation\sim_execution.py ---
import os
import time
from simulation.sim_base import BaseSimulation, run_sim
class ExecutionSimulation(BaseSimulation):
def setup(self, project_name: str = "SimProject") -> None:
...
def run(self) -> None:
...
if __name__ == "__main__":
run_sim(ExecutionSimulation)
--- File: simulation\sim_tools.py ---
import time
from simulation.sim_base import BaseSimulation, run_sim
class ToolsSimulation(BaseSimulation):
def run(self) -> None:
...
if __name__ == "__main__":
run_sim(ToolsSimulation)
--- File: simulation\user_agent.py ---
import time
import random
from typing import Any, Callable
import ai_client
class UserSimAgent:
def __init__(self, hook_client: Any, model: str = "gemini-2.5-flash-lite") -> None:
...
def generate_response(self, conversation_history: list[dict]) -> str:
"""
Generates a human-like response based on the conversation history.
conversation_history: list of dicts with 'role' and 'content'
"""
...
def perform_action_with_delay(self, action_func: Callable, *args: Any, **kwargs: Any) -> Any:
"""
Executes an action with a human-like delay.
"""
...
--- File: simulation\workflow_sim.py ---
import time
from api_hook_client import ApiHookClient
from simulation.user_agent import UserSimAgent
class WorkflowSimulator:
def __init__(self, hook_client: ApiHookClient) -> None:
...
def setup_new_project(self, name: str, git_dir: str, project_path: str = None) -> None:
...
def create_discussion(self, name: str) -> None:
...
def switch_discussion(self, name: str) -> None:
...
def load_prior_log(self) -> None:
...
def truncate_history(self, pairs: int) -> None:
...
def run_discussion_turn(self, user_message: str = None) -> dict | None:
...
def run_discussion_turn_async(self, user_message: str = None) -> None:
...
def wait_for_ai_response(self, timeout: int = 60) -> dict | None:
...
--- File: summarize.py ---
# summarize.py
"""
Note(Gemini):
Local heuristic summariser. Doesn't use any AI or network.
Uses Python's AST to reliably pull out classes, methods, and functions.
Regex is used for TOML and Markdown.
The rationale here is simple: giving the AI the *structure* of a codebase is 90%
as good as giving it the full source, but costs 1% of the tokens.
If it needs the full source of a file after reading the summary, it can just call read_file.
"""
# summarize.py
"""
Local symbolic summariser — no AI calls, no network.
For each file, extracts structural information:
.py : imports, classes (with methods), top-level functions, global constants
.toml : top-level table keys + array lengths
.md : headings (h1-h3)
other : line count + first 8 lines as preview
Returns a compact markdown string per file, suitable for use as a low-token
context block that replaces full file contents in the initial <context> send.
"""
import ast
import re
from pathlib import Path
from typing import Callable
# ------------------------------------------------------------------ per-type extractors
def _summarise_python(path: Path, content: str) -> str:
...
def _summarise_toml(path: Path, content: str) -> str:
...
def _summarise_markdown(path: Path, content: str) -> str:
...
def _summarise_generic(path: Path, content: str) -> str:
...
_SUMMARISERS: dict[str, Callable[[Path, str], str]] = {
".py": _summarise_python,
".toml": _summarise_toml,
".md": _summarise_markdown,
".ini": _summarise_generic,
".txt": _summarise_generic,
".ps1": _summarise_generic,
}
def summarise_file(path: Path, content: str) -> str:
"""
Return a compact markdown summary string for a single file.
`content` is the already-read file text (or an error string).
"""
...
def summarise_items(file_items: list[dict]) -> list[dict]:
"""
Given a list of file_item dicts (as returned by aggregate.build_file_items),
return a parallel list of dicts with an added `summary` key.
"""
...
def build_summary_markdown(file_items: list[dict]) -> str:
"""
Build a compact markdown string of file summaries, suitable for the
initial <context> block instead of full file contents.
"""
...
--- File: test_mma_persistence.py ---
import unittest
from pathlib import Path
import project_manager
class TestMMAPersistence(unittest.TestCase):
def test_default_project_has_mma(self) -> None:
...
def test_save_load_mma(self) -> None:
...
if __name__ == "__main__":
unittest.main()
--- File: tests\conftest.py ---
import pytest
import asyncio
import subprocess
import time
import requests
import os
import signal
import sys
import datetime
import shutil
from pathlib import Path
from typing import Generator, Any
from unittest.mock import patch, MagicMock
# Import the App class after patching if necessary, but here we just need the type hint
from gui_2 import App
class VerificationLogger:
def __init__(self, test_name: str, script_name: str) -> None:
...
def log_state(self, field: str, before: Any, after: Any) -> None:
...
def finalize(self, title: str, status: str, result_msg: str) -> None:
...
@pytest.fixture
def vlogger(request) -> VerificationLogger:
"""Fixture to provide a VerificationLogger instance to a test."""
...
def kill_process_tree(pid: int | None) -> None:
"""Robustly kills a process and all its children."""
...
@pytest.fixture
def mock_app() -> App:
"""
Mock version of the App for simple unit tests that don't need a loop.
"""
...
@pytest.fixture
def app_instance() -> Generator[App, None, None]:
"""
Centralized App instance with all external side effects mocked.
Matches the pattern used in test_token_viz.py and test_gui_phase4.py.
"""
...
@pytest.fixture(scope="session")
def live_gui() -> Generator[tuple[subprocess.Popen, str], None, None]:
"""
Session-scoped fixture that starts gui_2.py with --enable-test-hooks.
Includes high-signal environment telemetry and workspace isolation.
"""
...
--- File: tests\mock_gemini_cli.py ---
import sys
import json
import os
def main() -> None:
...
if __name__ == "__main__":
main()
--- File: tests\test_agent_capabilities.py ---
import pytest
import sys
import os
from unittest.mock import patch, MagicMock
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import ai_client
def test_agent_capabilities_listing() -> None:
# Mock credentials
...
--- File: tests\test_agent_tools_wiring.py ---
import pytest
import sys
import os
import ai_client
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from ai_client import set_agent_tools, _build_anthropic_tools
def test_set_agent_tools() -> None:
...
def test_build_anthropic_tools_conversion() -> None:
...
--- File: tests\test_ai_client_cli.py ---
from unittest.mock import patch
import ai_client
def test_ai_client_send_gemini_cli() -> None:
"""
Verifies that 'ai_client.send' correctly interacts with 'GeminiCliAdapter'
when the 'gemini_cli' provider is specified.
"""
...
--- File: tests\test_ai_client_list_models.py ---
import ai_client
def test_list_models_gemini_cli() -> None:
"""
Verifies that 'ai_client.list_models' correctly returns a list of models
for the 'gemini_cli' provider.
"""
...
--- File: tests\test_ai_style_formatter.py ---
import textwrap
from scripts.ai_style_formatter import format_code
def test_basic_indentation() -> None:
...
def test_top_level_blank_lines() -> None:
...
def test_inner_blank_lines() -> None:
...
def test_multiline_string_safety() -> None:
...
def test_continuation_indentation() -> None:
...
def test_multiple_top_level_definitions(vlogger) -> None:
...
--- File: tests\test_api_events.py ---
import pytest
from typing import Any
from unittest.mock import MagicMock, patch
import ai_client
class MockUsage:
def __init__(self) -> None:
...
class MockPart:
def __init__(self, text: Any, function_call: Any) -> None:
...
class MockContent:
def __init__(self, parts: Any) -> None:
...
class MockCandidate:
def __init__(self, parts: Any) -> None:
...
def test_ai_client_event_emitter_exists() -> None:
# This should fail initially because 'events' won't exist on ai_client
...
def test_event_emission() -> None:
...
def test_send_emits_events_proper() -> None:
...
def test_send_emits_tool_events() -> None:
...
--- File: tests\test_api_hook_client.py ---
import pytest
from unittest.mock import patch
import sys
import os
# Ensure project root is in path for imports
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
def test_get_status_success(live_gui: tuple) -> None:
"""
Test that get_status successfully retrieves the server status
when the live GUI is running.
"""
...
def test_get_project_success(live_gui: tuple) -> None:
"""
Test successful retrieval of project data from the live GUI.
"""
...
def test_get_session_success(live_gui: tuple) -> None:
"""
Test successful retrieval of session data.
"""
...
def test_post_gui_success(live_gui: tuple) -> None:
"""
Test successful posting of GUI data.
"""
...
def test_get_performance_success(live_gui: tuple) -> None:
"""
Test successful retrieval of performance metrics.
"""
...
def test_unsupported_method_error() -> None:
"""
Test that calling an unsupported HTTP method raises a ValueError.
"""
...
def test_get_text_value() -> None:
"""
Test retrieval of string representation using get_text_value.
"""
...
def test_get_node_status() -> None:
"""
Test retrieval of DAG node status using get_node_status.
"""
...
--- File: tests\test_api_hook_extensions.py ---
import sys
import os
from typing import Any
from unittest.mock import MagicMock, patch
# Ensure project root is in path for imports
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
def test_api_client_has_extensions() -> None:
...
def test_select_tab_integration(live_gui: Any) -> None:
...
def test_select_list_item_integration(live_gui: Any) -> None:
...
def test_get_indicator_state_integration(live_gui: Any) -> None:
...
def test_app_processes_new_actions() -> None:
...
--- File: tests\test_arch_boundary_phase1.py ---
import os
import sys
import unittest
import unittest.mock as mock
import importlib
import inspect
import tempfile
import shutil
# Ensure scripts directory is in sys.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'scripts')))
import mma_exec
class TestArchBoundaryPhase1(unittest.TestCase):
def setUp(self):
...
def tearDown(self):
...
def test_unfettered_modules_constant_removed(self):
"""TEST 1: Check 'UNFETTERED_MODULES' string absent from execute_agent source."""
...
def test_full_module_context_never_injected(self):
"""TEST 2: Verify 'FULL MODULE CONTEXT' not in captured input for mcp_client."""
...
def test_skeleton_used_for_mcp_client(self):
"""TEST 3: Verify 'DEPENDENCY SKELETON' is used for mcp_client."""
...
def test_mma_exec_no_hardcoded_path(self):
"""TEST 4: mma_exec.execute_agent must not contain hardcoded machine paths."""
...
def test_claude_mma_exec_no_hardcoded_path(self):
"""TEST 5: claude_mma_exec.execute_agent must not contain hardcoded machine paths."""
...
if __name__ == '__main__':
unittest.main()
--- File: tests\test_arch_boundary_phase2.py ---
"""
Tests for architecture_boundary_hardening_20260302 — Phase 2.
Tasks 2.1-2.4: MCP tool config exposure + MUTATING_TOOLS + HITL enforcement.
"""
import tomllib
import pytest
from project_manager import default_project
MUTATING_TOOLS = {"set_file_slice", "py_update_definition", "py_set_signature", "py_set_var_declaration"}
ALL_DISPATCH_TOOLS = {
"run_powershell", "read_file", "list_directory", "search_files", "get_file_summary",
"web_search", "fetch_url", "py_get_skeleton", "py_get_code_outline", "get_file_slice",
"py_get_definition", "py_update_definition", "py_get_signature", "py_set_signature",
"py_get_class_summary", "py_get_var_declaration", "py_set_var_declaration", "get_git_diff",
"py_find_usages", "py_get_imports", "py_check_syntax", "py_get_hierarchy",
"py_get_docstring", "get_tree", "get_ui_performance", "set_file_slice",
}
# ---------------------------------------------------------------------------
# Task 2.1: manual_slop.toml and default_project() expose all tools
# ---------------------------------------------------------------------------
def test_toml_exposes_all_dispatch_tools():
"""manual_slop.toml [agent.tools] must list every tool in mcp_client.dispatch()."""
...
def test_toml_mutating_tools_disabled_by_default():
"""Mutating tools must default to false in manual_slop.toml."""
...
def test_default_project_exposes_all_dispatch_tools():
"""default_project() agent.tools must list every tool in mcp_client.dispatch()."""
...
def test_default_project_mutating_tools_disabled():
"""Mutating tools must default to False in default_project()."""
...
# ---------------------------------------------------------------------------
# Task 2.2: AGENT_TOOL_NAMES in gui_2.py exposes all dispatch tools
# ---------------------------------------------------------------------------
def test_gui_agent_tool_names_exposes_all_dispatch_tools():
"""AGENT_TOOL_NAMES in gui_2.py must include every tool in mcp_client.dispatch()."""
...
# ---------------------------------------------------------------------------
# Task 2.3: MUTATING_TOOLS constant in mcp_client.py
# ---------------------------------------------------------------------------
def test_mcp_client_has_mutating_tools_constant():
"""mcp_client must expose a MUTATING_TOOLS frozenset."""
...
def test_mutating_tools_contains_write_tools():
"""MUTATING_TOOLS must include all four write tools."""
...
def test_mutating_tools_excludes_read_tools():
"""MUTATING_TOOLS must not include read-only tools."""
...
# ---------------------------------------------------------------------------
# Task 2.4: HITL enforcement in ai_client — mutating tools route through pre_tool_callback
# ---------------------------------------------------------------------------
def test_mutating_tool_triggers_pre_tool_callback(monkeypatch):
"""When a mutating tool is called and pre_tool_callback is set, it must be invoked."""
...atch(monkeypatch):
"""When pre_tool_callback returns None (rejected), dispatch must NOT be called."""
...est_non_mutating_tool_skips_callback():
"""Read-only tools must NOT trigger pre_tool_callback."""
...
--- File: tests\test_arch_boundary_phase3.py ---
import pytest
from models import Ticket
from dag_engine import TrackDAG, ExecutionEngine
def test_cascade_blocks_simple() -> None:
"""Test that a blocked dependency blocks its immediate dependent."""
...
def test_cascade_blocks_multi_hop() -> None:
"""Test that blocking cascades through multiple levels: A(blocked) -> B -> C."""
...
def test_cascade_blocks_no_cascade_to_completed() -> None:
"""Test that completed tasks are not changed even if a dependency is blocked (though this shouldn't normally happen)."""
...
def test_cascade_blocks_partial_dependencies() -> None:
"""Test that if one dependency is blocked, the dependent is blocked even if others are completed."""
...
def test_cascade_blocks_already_in_progress() -> None:
"""Test that in_progress tasks are not blocked automatically (only todo)."""
...
def test_execution_engine_tick_cascades_blocks() -> None:
"""Test that ExecutionEngine.tick() triggers the cascading blocks."""
...
--- File: tests\test_ast_parser.py ---
import pytest
import tree_sitter
from file_cache import ASTParser
def test_ast_parser_initialization() -> None:
"""Verify that ASTParser can be initialized with a language string."""
...
def test_ast_parser_parse() -> None:
"""Verify that the parse method returns a tree_sitter.Tree."""
...
def test_ast_parser_get_skeleton_python() -> None:
"""Verify that get_skeleton replaces function bodies with '...' while preserving docstrings."""
...
def test_ast_parser_invalid_language() -> None:
"""Verify handling of unsupported or invalid languages."""
...
def test_ast_parser_get_curated_view() -> None:
"""Verify that get_curated_view preserves function bodies with @core_logic or # [HOT]."""
...
--- File: tests\test_async_events.py ---
import asyncio
from events import AsyncEventQueue
def test_async_event_queue_put_get() -> None:
"""Verify that an event can be asynchronously put and retrieved from the queue."""
...
--- File: tests\test_auto_whitelist.py ---
import pytest
from typing import Any
from datetime import datetime
from log_registry import LogRegistry
@pytest.fixture
def registry_setup(tmp_path: Any) -> Any:
...
def test_auto_whitelist_keywords(registry_setup: Any) -> None:
...
def test_auto_whitelist_message_count(registry_setup: Any) -> None:
...
def test_auto_whitelist_large_size(registry_setup: Any) -> None:
...
def test_no_auto_whitelist_insignificant(registry_setup: Any) -> None:
...
--- File: tests\test_cli_tool_bridge.py ---
import unittest
from unittest.mock import patch, MagicMock
import io
import json
import sys
import os
# Add project root to sys.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Import after path fix
from scripts.cli_tool_bridge import main
class TestCliToolBridge(unittest.TestCase):
def setUp(self) -> None:
...
@patch('sys.stdin', new_callable=io.StringIO)
@patch('sys.stdout', new_callable=io.StringIO)
@patch('api_hook_client.ApiHookClient.request_confirmation')
def test_allow_decision(self, mock_request: MagicMock, mock_stdout: MagicMock, mock_stdin: MagicMock) -> None:
...
@patch('sys.stdin', new_callable=io.StringIO)
@patch('sys.stdout', new_callable=io.StringIO)
@patch('api_hook_client.ApiHookClient.request_confirmation')
def test_deny_decision(self, mock_request: MagicMock, mock_stdout: MagicMock, mock_stdin: MagicMock) -> None:
...
@patch('sys.stdin', new_callable=io.StringIO)
@patch('sys.stdout', new_callable=io.StringIO)
@patch('api_hook_client.ApiHookClient.request_confirmation')
def test_unreachable_hook_server(self, mock_request: MagicMock, mock_stdout: MagicMock, mock_stdin: MagicMock) -> None:
...
if __name__ == '__main__':
unittest.main()
--- File: tests\test_cli_tool_bridge_mapping.py ---
import unittest
from unittest.mock import patch, MagicMock
import io
import json
import sys
import os
# Add project root to sys.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Import after path fix
from scripts.cli_tool_bridge import main
class TestCliToolBridgeMapping(unittest.TestCase):
def setUp(self) -> None:
...
@patch('sys.stdin', new_callable=io.StringIO)
@patch('sys.stdout', new_callable=io.StringIO)
@patch('api_hook_client.ApiHookClient.request_confirmation')
def test_mapping_from_api_format(self, mock_request: MagicMock, mock_stdout: MagicMock, mock_stdin: MagicMock) -> None:
"""
Verify that bridge correctly maps 'id', 'name', 'input' (Gemini API format)
into tool_name and tool_input for the hook client.
"""
...
if __name__ == '__main__':
unittest.main()
--- File: tests\test_conductor_api_hook_integration.py ---
from unittest.mock import patch
import os
import sys
from typing import Any
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
def simulate_conductor_phase_completion(client: ApiHookClient) -> dict[str, Any]:
"""
Simulates the Conductor agent's logic for phase completion using ApiHookClient.
"""
...
def test_conductor_integrates_api_hook_client_for_verification(live_gui: Any) -> None:
"""
Verify that Conductor's simulated phase completion logic properly integrates
and uses the ApiHookClient for verification against the live GUI.
"""
...
def test_conductor_handles_api_hook_failure(live_gui: Any) -> None:
"""
Verify Conductor handles a simulated API hook verification failure.
We patch the client's get_status to simulate failure even with live GUI.
"""
...
def test_conductor_handles_api_hook_connection_error() -> None:
"""
Verify Conductor handles a simulated API hook connection error (server down).
"""
...
--- File: tests\test_conductor_engine.py ---
import pytest
from unittest.mock import MagicMock, patch
from models import Ticket, Track, WorkerContext
import ai_client
# These tests define the expected interface for multi_agent_conductor.py
# which will be implemented in the next phase of TDD.
def test_conductor_engine_initialization() -> None:
"""
Test that ConductorEngine can be initialized with a Track.
"""
...
@pytest.mark.asyncio
async def test_conductor_engine_run_executes_tickets_in_order(monkeypatch: pytest.MonkeyPatch, vlogger) -> None:
"""
Test that run iterates through executable tickets and calls the worker lifecycle.
"""
...un_worker_lifecycle_calls_ai_client_send(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle triggers the AI client and updates ticket status on success.
"""
...
@pytest.mark.asyncio
async def test_run_worker_lifecycle_context_injection(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle can take a context_files list and injects AST views into the prompt.
"""
...s the ticket as blocked if the AI indicates it cannot proceed.
"""
...
@pytest.mark.asyncio
async def test_run_worker_lifecycle_step_mode_confirmation(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle passes confirm_execution to ai_client.send when step_mode is True.
Verify that if confirm_execution is called (simulated by mocking ai_client.send to call its callback),
the flow works as expected.
"""
...c (in ai_client, which we simulate here)
would prevent execution. In run_worker_lifecycle, we just check if it's passed.
"""
...
@pytest.mark.asyncio
async def test_conductor_engine_dynamic_parsing_and_execution(monkeypatch: pytest.MonkeyPatch, vlogger) -> None:
"""
Test that parse_json_tickets correctly populates the track and run executes them in dependency order.
"""
...response_via_queue(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle pushes a 'response' event with the correct stream_id
via _queue_put when event_queue and loop are provided.
"""
...
def test_run_worker_lifecycle_token_usage_from_comms_log(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle reads token usage from the comms log and
updates engine.tier_usage['Tier 3'] with real input/output token counts.
"""
...
--- File: tests\test_conductor_tech_lead.py ---
import unittest
from unittest.mock import patch
import conductor_tech_lead
import pytest
class TestConductorTechLead(unittest.TestCase):
def test_generate_tickets_parse_error(self) -> None:
...
def test_generate_tickets_success(self) -> None:
...
class TestTopologicalSort(unittest.TestCase):
def test_topological_sort_linear(self) -> None:
...
def test_topological_sort_complex(self) -> None:
...
def test_topological_sort_cycle(self) -> None:
...
def test_topological_sort_empty(self) -> None:
...
def test_topological_sort_missing_dependency(self) -> None:
# If a ticket depends on something not in the list, we should probably handle it or let it fail.
# Usually in our context, we only care about dependencies within the same track.
...
@pytest.mark.asyncio
async def test_topological_sort_vlog(vlogger) -> None:
...
--- File: tests\test_cost_tracker.py ---
import cost_tracker
def test_estimate_cost():
# Test unknown model
...
if __name__ == "__main__":
test_estimate_cost()
print("All cost_tracker tests passed!")
--- File: tests\test_dag_engine.py ---
import pytest
from models import Ticket
from dag_engine import TrackDAG
def test_get_ready_tasks_linear() -> None:
...
def test_get_ready_tasks_branching() -> None:
...
def test_has_cycle_no_cycle() -> None:
...
def test_has_cycle_direct_cycle() -> None:
...
def test_has_cycle_indirect_cycle() -> None:
...
def test_has_cycle_complex_no_cycle() -> None:
...
def test_get_ready_tasks_multiple_deps() -> None:
...
def test_topological_sort() -> None:
...
def test_topological_sort_cycle() -> None:
...
--- File: tests\test_deepseek_infra.py ---
from typing import Any
import pytest
import os
import sys
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import ai_client
import project_manager
def test_credentials_error_mentions_deepseek(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Verify that the error message shown when credentials.toml is missing
includes deepseek instructions.
"""
...
def test_default_project_includes_reasoning_role() -> None:
"""
Verify that 'Reasoning' is included in the default discussion roles
to support DeepSeek-R1 reasoning traces.
"""
...
def test_gui_providers_list() -> None:
"""
Check if 'deepseek' is in the GUI's provider list.
"""
...
def test_deepseek_model_listing() -> None:
"""
Verify that list_models for deepseek returns expected models.
"""
...
def test_gui_provider_list_via_hooks(live_gui: Any) -> None:
"""
Verify 'deepseek' is present in the GUI provider list using API hooks.
"""
...
--- File: tests\test_deepseek_provider.py ---
from unittest.mock import patch, MagicMock
import ai_client
def test_deepseek_model_selection() -> None:
"""
Verifies that ai_client.set_provider('deepseek', 'deepseek-chat') correctly updates the internal state.
"""
...
def test_deepseek_completion_logic() -> None:
"""
Verifies that ai_client.send() correctly calls the DeepSeek API and returns content.
"""
...
def test_deepseek_reasoning_logic() -> None:
"""
Verifies that reasoning_content is captured and wrapped in <thinking> tags.
"""
...
def test_deepseek_tool_calling() -> None:
"""
Verifies that DeepSeek provider correctly identifies and executes tool calls.
"""
...
def test_deepseek_streaming() -> None:
"""
Verifies that DeepSeek provider correctly aggregates streaming chunks.
"""
...
--- File: tests\test_execution_engine.py ---
import pytest
from models import Ticket
from dag_engine import TrackDAG, ExecutionEngine
def test_execution_engine_basic_flow() -> None:
...
def test_execution_engine_update_nonexistent_task() -> None:
...
def test_execution_engine_status_persistence() -> None:
...
def test_execution_engine_auto_queue() -> None:
...
def test_execution_engine_step_mode() -> None:
...
def test_execution_engine_approve_task() -> None:
...
--- File: tests\test_extended_sims.py ---
import pytest
from typing import Any
import time
import sys
import os
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
from simulation.sim_context import ContextSimulation
from simulation.sim_ai_settings import AISettingsSimulation
from simulation.sim_tools import ToolsSimulation
from simulation.sim_execution import ExecutionSimulation
@pytest.mark.integration
def test_context_sim_live(live_gui: Any) -> None:
"""Run the Context & Chat simulation against a live GUI."""
...
@pytest.mark.integration
def test_ai_settings_sim_live(live_gui: Any) -> None:
"""Run the AI Settings simulation against a live GUI."""
...
@pytest.mark.integration
def test_tools_sim_live(live_gui: Any) -> None:
"""Run the Tools & Search simulation against a live GUI."""
...
@pytest.mark.integration
def test_execution_sim_live(live_gui: Any) -> None:
"""Run the Execution & Modals simulation against a live GUI."""
...
--- File: tests\test_gemini_cli_adapter.py ---
import unittest
from typing import Any
from unittest.mock import patch, MagicMock
import json
import subprocess
import sys
import os
# Ensure the project root is in sys.path to resolve imports correctly
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from gemini_cli_adapter import GeminiCliAdapter
class TestGeminiCliAdapter(unittest.TestCase):
def setUp(self) -> None:
...
@patch('subprocess.Popen')
def test_send_starts_subprocess_with_correct_args(self, mock_popen: Any) -> None:
"""
Verify that send(message) correctly starts the subprocess with
--output-format stream-json and the provided message via stdin.
"""
...
@patch('subprocess.Popen')
def test_send_parses_jsonl_output(self, mock_popen: Any) -> None:
"""
Verify that it correctly parses multiple JSONL 'message' events
and returns the combined text.
"""
...
@patch('subprocess.Popen')
def test_send_handles_tool_use_events(self, mock_popen: Any) -> None:
"""
Verify that it correctly handles 'tool_use' events in the stream
by continuing to read until the final 'result' event.
"""
...
@patch('subprocess.Popen')
def test_send_captures_usage_metadata(self, mock_popen: Any) -> None:
"""
Verify that usage data is extracted from the 'result' event.
"""
...
if __name__ == '__main__':
unittest.main()
--- File: tests\test_gemini_cli_adapter_parity.py ---
import unittest
from unittest.mock import patch, MagicMock
import json
import sys
import os
import subprocess
# Ensure the project root is in sys.path to resolve imports correctly
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if project_root not in sys.path:
sys.path.append(project_root)
# Import the class to be tested
from gemini_cli_adapter import GeminiCliAdapter
class TestGeminiCliAdapterParity(unittest.TestCase):
def setUp(self) -> None:
"""Set up a fresh adapter instance and reset session state for each test."""
...
def tearDown(self) -> None:
...
@patch('subprocess.Popen')
def test_count_tokens_uses_estimation(self, mock_popen: MagicMock) -> None:
"""
Test that count_tokens uses character-based estimation.
"""
...
@patch('subprocess.Popen')
def test_send_with_safety_settings_no_flags_added(self, mock_popen: MagicMock) -> None:
"""
Test that the send method does NOT add --safety flags when safety_settings are provided,
as this functionality is no longer supported via CLI flags.
"""
...
@patch('subprocess.Popen')
def test_send_without_safety_settings_no_flags(self, mock_popen: MagicMock) -> None:
"""
Test that when safety_settings is None or an empty list, no --safety flags are added.
"""
...
@patch('subprocess.Popen')
def test_send_with_system_instruction_prepended_to_stdin(self, mock_popen: MagicMock) -> None:
"""
Test that the send method prepends the system instruction to the prompt
sent via stdin, and does NOT add a --system flag to the command.
"""
...
@patch('subprocess.Popen')
def test_send_with_model_parameter(self, mock_popen: MagicMock) -> None:
"""
Test that the send method correctly adds the -m <model> flag when a model is specified.
"""
...
@patch('subprocess.Popen')
def test_send_parses_tool_calls_from_streaming_json(self, mock_popen: MagicMock) -> None:
"""
Test that tool_use messages in the streaming JSON are correctly parsed.
"""
...
if __name__ == '__main__':
unittest.main()
--- File: tests\test_gemini_cli_edge_cases.py ---
import time
import os
import sys
import requests
from typing import Any
from api_hook_client import ApiHookClient
def test_gemini_cli_context_bleed_prevention(live_gui: Any) -> None:
"""
Test that the GeminiCliAdapter correctly filters out echoed 'user' messages
and only shows assistant content in the GUI history.
"""
...
def test_gemini_cli_parameter_resilience(live_gui: Any) -> None:
"""
Test that mcp_client correctly handles 'file_path' and 'dir_path' aliases
sent by the AI instead of 'path'.
"""
...
def test_gemini_cli_loop_termination(live_gui: Any) -> None:
"""
Test that multi-round tool calling correctly terminates and preserves
payload (session context) between rounds.
"""
...
--- File: tests\test_gemini_cli_integration.py ---
from typing import Any
import time
import os
import sys
import requests
from api_hook_client import ApiHookClient
def test_gemini_cli_full_integration(live_gui: Any) -> None:
"""
Integration test for the Gemini CLI provider and tool bridge.
Handles 'ask_received' events from the bridge and any other approval requests.
"""
...
def test_gemini_cli_rejection_and_history(live_gui: Any) -> None:
"""
Integration test for the Gemini CLI provider: Rejection flow and history.
"""
...
--- File: tests\test_gemini_cli_parity_regression.py ---
from typing import Any
from unittest.mock import patch
import ai_client
@patch('ai_client.GeminiCliAdapter')
def test_send_invokes_adapter_send(mock_adapter_class: Any) -> None:
...
@patch('ai_client.GeminiCliAdapter')
def test_get_history_bleed_stats(mock_adapter_class: Any) -> None:
...
--- File: tests\test_gemini_metrics.py ---
import os
import sys
from unittest.mock import MagicMock, patch
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Import the necessary functions from ai_client, including the reset helper
from ai_client import get_gemini_cache_stats, reset_session
def test_get_gemini_cache_stats_with_mock_client() -> None:
"""
Test that get_gemini_cache_stats correctly processes cache lists
from a mocked client instance.
"""
...
--- File: tests\test_gui2_events.py ---
import pytest
from unittest.mock import patch
from typing import Generator
from gui_2 import App
import ai_client
from events import EventEmitter
@pytest.fixture
def app_instance() -> Generator[type[App], None, None]:
"""
Fixture to create an instance of the gui_2.App class for testing.
It mocks functions that would render a window or block execution.
"""
...
def test_app_subscribes_to_events(app_instance: type[App]) -> None:
"""
This test checks that the App's __init__ method subscribes the necessary
event handlers to the ai_client.events emitter.
This test will fail until the event subscription logic is added to gui_2.App.
"""
...
--- File: tests\test_gui2_layout.py ---
from typing import Generator
import pytest
from unittest.mock import patch
from gui_2 import App
def test_gui2_hubs_exist_in_show_windows(app_instance: App) -> None:
"""
Verifies that the new consolidated Hub windows are defined in the App's show_windows.
This ensures they will be available in the 'Windows' menu.
"""
...
def test_gui2_old_windows_removed_from_show_windows(app_instance: App) -> None:
"""
Verifies that the old fragmented windows are removed from show_windows.
"""
...
--- File: tests\test_gui2_mcp.py ---
import pytest
from unittest.mock import patch, MagicMock
from typing import Generator
from gui_2 import App
import ai_client
from events import EventEmitter
def test_mcp_tool_call_is_dispatched(app_instance: App) -> None:
"""
This test verifies that when the AI returns a tool call for an MCP function,
the ai_client correctly dispatches it to mcp_client.
This will fail until mcp_client is properly integrated.
"""
...
--- File: tests\test_gui2_parity.py ---
import pytest
from typing import Any
import time
import os
import uuid
from pathlib import Path
import sys
# Ensure project root is in path for imports
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
# Define a temporary file path for callback testing
TEST_CALLBACK_FILE = Path("tests/artifacts/temp_callback_output.txt")
@pytest.fixture(scope="function", autouse=True)
def cleanup_callback_file() -> None:
"""Ensures the test callback file is cleaned up before and after each test."""
...
def test_gui2_set_value_hook_works(live_gui: Any) -> None:
"""
Tests that the 'set_value' GUI hook is correctly implemented.
"""
...
def test_gui2_click_hook_works(live_gui: Any) -> None:
"""
Tests that the 'click' GUI hook for the 'Reset' button is implemented.
"""
...
def test_gui2_custom_callback_hook_works(live_gui: Any) -> None:
"""
Tests that the 'custom_callback' GUI hook is correctly implemented.
"""
...
--- File: tests\test_gui2_performance.py ---
import pytest
import time
import sys
import os
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
# Session-wide storage for comparing metrics
_shared_metrics = {}
def test_performance_benchmarking(live_gui: tuple) -> None:
"""
Collects performance metrics for the current GUI script.
"""
...
def test_performance_baseline_check() -> None:
"""
Verifies that we have performance metrics for gui_2.py.
"""
...
--- File: tests\test_gui_async_events.py ---
import pytest
from unittest.mock import MagicMock, patch
from gui_2 import App
from events import UserRequestEvent
@pytest.fixture
def mock_gui() -> App:
...
def test_handle_generate_send_pushes_event(mock_gui: App) -> None:
...
def test_user_request_event_payload() -> None:
...
@pytest.mark.asyncio
async def test_async_event_queue() -> None:
...
--- File: tests\test_gui_diagnostics.py ---
import pytest
from unittest.mock import patch, MagicMock
import importlib.util
import sys
import os
from typing import Any
# Ensure project root is in path for imports
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Load gui_2.py as a module for testing
spec = importlib.util.spec_from_file_location("gui_2", "gui_2.py")
gui_2 = importlib.util.module_from_spec(spec)
sys.modules["gui_2"] = gui_2
spec.loader.exec_module(gui_2)
from gui_2 import App
@pytest.fixture
def app_instance() -> Any:
...
def test_diagnostics_panel_initialization(app_instance: Any) -> None:
...
def test_diagnostics_history_updates(app_instance: Any) -> None:
"""
Verifies that the internal performance history is updated correctly.
This logic is inside the render loop in gui_2.py, but we can test
the data structure and initialization.
"""
...
--- File: tests\test_gui_events.py ---
import pytest
import sys
import os
from unittest.mock import patch
from typing import Generator, Any
from gui_2 import App
import ai_client
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
@pytest.fixture
def app_instance() -> Generator[App, None, None]:
"""
Fixture to create an instance of the App class for testing.
"""
...
def test_gui_updates_on_event(app_instance: App) -> None:
...
--- File: tests\test_gui_performance_requirements.py ---
import time
import sys
import os
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
def test_idle_performance_requirements(live_gui) -> None:
"""
Requirement: GUI must maintain stable performance on idle.
"""
...
--- File: tests\test_gui_phase3.py ---
import os
import json
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
# Mocking modules that might fail in test env
import sys
sys.modules['imgui_bundle'] = MagicMock()
sys.modules['imgui_bundle.imgui'] = MagicMock()
sys.modules['imgui_bundle.immapp'] = MagicMock()
sys.modules['imgui_bundle.hello_imgui'] = MagicMock()
from gui_2 import App
def test_track_proposal_editing(app_instance):
# Setup some proposed tracks
...
def test_conductor_setup_scan(app_instance, tmp_path):
# Create a mock conductor directory
...
def test_create_track(app_instance, tmp_path):
...
--- File: tests\test_gui_phase4.py ---
import pytest
from unittest.mock import MagicMock, patch
from gui_2 import App
from models import Track
@pytest.fixture(autouse=True)
def setup_mock_app(mock_app: App):
...
def test_add_ticket_logic(mock_app: App):
# Mock imgui calls to simulate clicking "Create" in the form
...ket_logic(mock_app: App):
# Setup tickets
...toggle(mock_app: App):
...
--- File: tests\test_gui_streaming.py ---
import pytest
from unittest.mock import patch
from gui_2 import App
@pytest.mark.asyncio
async def test_mma_stream_event_routing(app_instance: App):
"""Verifies that 'mma_stream' events from AsyncEventQueue reach mma_streams."""
...
@pytest.mark.asyncio
async def test_mma_stream_multiple_workers(app_instance: App):
"""Verifies that streaming works for multiple concurrent workers."""
...
def test_handle_ai_response_resets_stream(app_instance: App):
"""Verifies that the final handle_ai_response (status=done) replaces/finalizes the stream."""
...
def test_handle_ai_response_streaming(app_instance: App):
"""Verifies that 'handle_ai_response' with status='streaming...' appends to mma_streams."""
...
--- File: tests\test_gui_stress_performance.py ---
import time
import sys
import os
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
def test_comms_volume_stress_performance(live_gui) -> None:
"""
Stress test: Inject many session entries and verify performance doesn't degrade.
"""
...
--- File: tests\test_gui_updates.py ---
import pytest
from unittest.mock import patch
import importlib.util
import sys
import os
from typing import Any
# Ensure project root is in path for imports
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Load gui_2.py as a module for testing
spec = importlib.util.spec_from_file_location("gui_2", "gui_2.py")
gui_2 = importlib.util.module_from_spec(spec)
sys.modules["gui_2"] = gui_2
spec.loader.exec_module(gui_2)
from gui_2 import App
@pytest.fixture
def app_instance() -> Any:
"""
Fixture to create an instance of the App class for testing.
"""
...
def test_telemetry_data_updates_correctly(app_instance: Any) -> None:
"""
Tests that the _refresh_api_metrics method correctly updates
the internal state for display.
"""
...
def test_cache_data_display_updates_correctly(app_instance: Any) -> None:
"""
Tests that the _refresh_api_metrics method correctly updates the
internal cache text for display.
"""
...
--- File: tests\test_headless_service.py ---
import sys
import unittest
from unittest.mock import patch, MagicMock
import gui_2
import pytest
import importlib
from pathlib import Path
from fastapi.testclient import TestClient
class TestHeadlessAPI(unittest.TestCase):
def setUp(self) -> None:
...
def test_health_endpoint(self) -> None:
...
def test_status_endpoint_unauthorized(self) -> None:
...
def test_status_endpoint_authorized(self) -> None:
...
def test_generate_endpoint(self) -> None:
...
def test_pending_actions_endpoint(self) -> None:
...
def test_confirm_action_endpoint(self) -> None:
...
def test_list_sessions_endpoint(self) -> None:
...
def test_get_context_endpoint(self) -> None:
...
def test_endpoint_no_api_key_configured(self) -> None:
...
class TestHeadlessStartup(unittest.TestCase):
@patch('gui_2.immapp.run')
@patch('gui_2.api_hooks.HookServer')
@patch('gui_2.save_config')
@patch('gui_2.ai_client.cleanup')
@patch('uvicorn.run') # Mock uvicorn.run to prevent hanging
def test_headless_flag_prevents_gui_run(self, mock_uvicorn_run: MagicMock, mock_cleanup: MagicMock, mock_save_config: MagicMock, mock_hook_server: MagicMock, mock_immapp_run: MagicMock) -> None:
...
@patch('gui_2.immapp.run')
def test_normal_startup_calls_gui_run(self, mock_immapp_run: MagicMock) -> None:
...
def test_fastapi_installed() -> None:
"""Verify that fastapi is installed."""
...
def test_uvicorn_installed() -> None:
"""Verify that uvicorn is installed."""
...
if __name__ == "__main__":
unittest.main()
--- File: tests\test_headless_verification.py ---
from typing import Any
import pytest
from unittest.mock import MagicMock, patch
from models import Ticket, Track
import multi_agent_conductor
from multi_agent_conductor import ConductorEngine
@pytest.mark.asyncio
async def test_headless_verification_full_run(vlogger) -> None:
"""
1. Initialize a ConductorEngine with a Track containing multiple dependent Tickets.
2. Simulate a full execution run using engine.run().
3. Mock ai_client.send to simulate successful tool calls and final responses.
4. Specifically verify that 'Context Amnesia' is maintained.
"""
...
@pytest.mark.asyncio
async def test_headless_verification_error_and_qa_interceptor(vlogger) -> None:
"""
5. Simulate a shell error and verify that the Tier 4 QA interceptor is triggered
and its summary is injected into the worker's history for the next retry.
"""
...
--- File: tests\test_history_management.py ---
import sys
import os
import tomli_w
import tomllib
from pathlib import Path
# Ensure project root is in path for imports
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Import necessary modules from the project
import aggregate
import project_manager
import mcp_client
import ai_client
# --- Tests for Aggregate Module ---
def test_aggregate_includes_segregated_history(tmp_path: Path) -> None:
"""
Tests if the aggregate function correctly includes history
when it's segregated into a separate file.
"""
...
def test_mcp_blacklist(tmp_path: Path) -> None:
"""
Tests that the MCP client correctly blacklists specified files
and prevents listing them.
"""
...
def test_aggregate_blacklist(tmp_path: Path) -> None:
"""
Tests that aggregate's path resolution respects blacklisting,
ensuring history files are not included by default.
"""
...
def test_migration_on_load(tmp_path: Path) -> None:
"""
Tests that project loading migrates discussion history from manual_slop.toml
to manual_slop_history.toml if it exists in the main config.
"""
...
def test_save_separation(tmp_path: Path) -> None:
"""
Tests that saving project data correctly separates discussion history
into manual_slop_history.toml.
"""
...
def test_history_persistence_across_turns(tmp_path: Path) -> None:
"""
Tests that discussion history is correctly persisted across multiple save/load cycles.
"""
...
def test_get_history_bleed_stats_basic() -> None:
"""
Tests basic retrieval of history bleed statistics from the AI client.
"""
...
--- File: tests\test_hooks.py ---
import os
import sys
from unittest.mock import patch
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
import gui_2
def test_hooks_enabled_via_cli() -> None:
...
def test_hooks_disabled_by_default() -> None:
...
def test_live_hook_server_responses(live_gui) -> None:
"""
Verifies the live hook server (started via fixture) responds correctly to all major endpoints.
"""
...
--- File: tests\test_layout_reorganization.py ---
import pytest
from typing import Any
import sys
import os
import importlib.util
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Load gui_2.py
spec = importlib.util.spec_from_file_location("gui_2", "gui_2.py")
gui_2 = importlib.util.module_from_spec(spec)
sys.modules["gui_2"] = gui_2
spec.loader.exec_module(gui_2)
from gui_2 import App
def test_new_hubs_defined_in_show_windows() -> None:
"""
Verifies that the new consolidated Hub windows are defined in the App's show_windows.
This ensures they will be available in the 'Windows' menu.
"""
...
def test_old_windows_removed_from_gui2(app_instance_simple: Any) -> None:
"""
Verifies that the old fragmented windows are removed or renamed.
"""
...
@pytest.fixture
def app_instance_simple() -> Any:
...
def test_hub_windows_exist_in_gui2(app_instance_simple: Any) -> None:
"""
Verifies that the new Hub windows are present in the show_windows dictionary.
"""
...
def test_indicators_logic_exists(app_instance_simple: Any) -> None:
"""
Verifies that the status indicators logic exists in the App.
"""
...
--- File: tests\test_live_gui_integration.py ---
import pytest
from unittest.mock import patch, ANY
import asyncio
import time
from gui_2 import App
from events import UserRequestEvent
@pytest.mark.timeout(10)
@pytest.mark.asyncio
async def test_user_request_integration_flow(mock_app: App) -> None:
"""
Verifies that pushing a UserRequestEvent to the event_queue:
1. Triggers ai_client.send
2. Results in a 'response' event back to the queue
3. Eventually updates the UI state (ai_response, ai_status) after processing GUI tasks.
"""
...
@pytest.mark.timeout(10)
@pytest.mark.asyncio
async def test_user_request_error_handling(mock_app: App) -> None:
"""
Verifies that if ai_client.send raises an exception, the UI is updated with the error state.
"""
...
--- File: tests\test_live_workflow.py ---
import pytest
import time
import sys
import os
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
@pytest.mark.integration
def test_full_live_workflow(live_gui) -> None:
"""
Integration test that drives the GUI through a full workflow.
"""
...
--- File: tests\test_log_management_ui.py ---
import pytest
from unittest.mock import MagicMock, patch
from pathlib import Path
# We can safely import gui_2 if we don't instantiate App without mocking its threads
from gui_2 import App
@pytest.fixture
def mock_config(tmp_path: Path) -> Path:
...
@pytest.fixture
def mock_project(tmp_path: Path) -> Path:
...
@pytest.fixture
def app_instance(mock_config: Path, mock_project: Path, monkeypatch: pytest.MonkeyPatch) -> App:
...
def test_log_management_init(app_instance: App) -> None:
...
def test_render_log_management_logic(app_instance: App) -> None:
...
--- File: tests\test_log_pruner.py ---
from typing import Tuple
import pytest
from pathlib import Path
from datetime import datetime, timedelta
from log_registry import LogRegistry
from log_pruner import LogPruner
@pytest.fixture
def pruner_setup(tmp_path: Path) -> Tuple[LogPruner, LogRegistry, Path]:
...
def test_prune_old_insignificant_logs(pruner_setup: Tuple[LogPruner, LogRegistry, Path]) -> None:
...
--- File: tests\test_log_registry.py ---
import unittest
import tempfile
import os
from datetime import datetime, timedelta
# This import is expected to fail with ImportError until log_registry.py is created.
from log_registry import LogRegistry
class TestLogRegistry(unittest.TestCase):
def setUp(self) -> None:
"""Set up a temporary directory and registry file for each test."""
...
def tearDown(self) -> None:
"""Clean up the temporary directory and its contents after each test."""
...
def test_instantiation(self) -> None:
"""Test LogRegistry instantiation with a file path."""
...
def test_register_session(self) -> None:
"""Test registering a new session."""
...
def test_update_session_metadata(self) -> None:
"""Test updating session metadata."""
...
def test_is_session_whitelisted(self) -> None:
"""Test checking if a session is whitelisted."""
...
def test_get_old_non_whitelisted_sessions(self) -> None:
"""Test retrieving old, non-whitelisted sessions."""
...
--- File: tests\test_logging_e2e.py ---
import pytest
from typing import Any
from pathlib import Path
from datetime import datetime, timedelta
import session_logger
from log_registry import LogRegistry
from log_pruner import LogPruner
@pytest.fixture
def e2e_setup(tmp_path: Path, monkeypatch: Any) -> Any:
# Ensure closed before starting
...
def test_logging_e2e(e2e_setup: Any) -> None:
...
--- File: tests\test_mcp_perf_tool.py ---
import sys
import os
from unittest.mock import patch
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import mcp_client
def test_mcp_perf_tool_retrieval() -> None:
...
--- File: tests\test_mma_agent_focus_phase1.py ---
"""
Tests for mma_agent_focus_ux_20260302 — Phase 1: Tier Tagging at Emission.
These tests are written RED-first: they fail before implementation.
"""
from typing import Generator
import pytest
from unittest.mock import patch
import ai_client
from gui_2 import App
@pytest.fixture(autouse=True)
def reset_tier():
"""Reset current_tier before and after each test."""
...
# ---------------------------------------------------------------------------
# Task 1.1 / 1.2: current_tier variable and source_tier in _append_comms
# ---------------------------------------------------------------------------
def test_current_tier_variable_exists():
"""ai_client must expose a module-level current_tier variable."""
...
def test_append_comms_has_source_tier_key():
"""_append_comms entries must contain a 'source_tier' key."""
...
def test_append_comms_source_tier_none_when_unset():
"""source_tier must be None when current_tier is not set."""
...
def test_append_comms_source_tier_set_when_current_tier_set():
"""source_tier must reflect current_tier when it is set."""
...
def test_append_comms_source_tier_tier2():
"""source_tier must reflect Tier 2 when current_tier = 'Tier 2'."""
...
# ---------------------------------------------------------------------------
# Task 1.5: _tool_log stores dicts with source_tier
# ---------------------------------------------------------------------------
def test_append_tool_log_stores_dict(app_instance):
"""_append_tool_log must store a dict, not a tuple."""
...
def test_append_tool_log_dict_has_source_tier(app_instance):
"""Dict entry must have 'source_tier' key."""
...
def test_append_tool_log_dict_keys(app_instance):
"""Dict entry must have script, result, ts, source_tier keys."""
...
--- File: tests\test_mma_agent_focus_phase3.py ---
"""
Tests for mma_agent_focus_ux_20260302 — Phase 3: Focus Agent UI + Filter Logic.
"""
from typing import Generator
import pytest
from unittest.mock import patch
from gui_2 import App
def test_ui_focus_agent_state_var_exists(app_instance):
"""App.__init__ must expose ui_focus_agent: str | None = None."""
...
def test_tool_log_filter_all(app_instance):
"""When ui_focus_agent is None, all tool log entries are visible."""
...
def test_tool_log_filter_tier3_only(app_instance):
"""When ui_focus_agent='Tier 3', only Tier 3 entries are shown."""
...
def test_tool_log_filter_excludes_none_tier(app_instance):
"""Filtering to Tier 2 excludes entries with source_tier=None."""
...
def test_comms_log_filter_tier3_only(app_instance):
"""When ui_focus_agent='Tier 3', comms filter excludes other tiers."""
...
def test_comms_log_filter_not_applied_for_prior_session(app_instance):
"""Focus filter must NOT apply when viewing prior session log."""
...
--- File: tests\test_mma_approval_indicators.py ---
from __future__ import annotations
from unittest.mock import patch, MagicMock
from gui_2 import App
def _make_app(**kwargs):
...
def _make_imgui_mock():
...
def _collect_text_colored_args(imgui_mock):
"""Return a single joined string of all text_colored second-arg strings."""
...
class TestMMAApprovalIndicators:
def test_no_approval_badge_when_idle(self):
"""No 'APPROVAL PENDING' badge when all pending attrs are None/False."""
...
def test_approval_badge_shown_when_spawn_pending(self):
"""'APPROVAL PENDING' badge must appear when _pending_mma_spawn is set."""
...
def test_approval_badge_shown_when_mma_approval_pending(self):
"""'APPROVAL PENDING' badge must appear when _pending_mma_approval is set."""
...
def test_approval_badge_shown_when_ask_dialog_pending(self):
"""'APPROVAL PENDING' badge must appear when _pending_ask_dialog is True."""
...
--- File: tests\test_mma_dashboard_refresh.py ---
import pytest
from unittest.mock import patch, MagicMock
from typing import Any
from gui_2 import App
@pytest.fixture
def app_instance() -> Any:
# We patch the dependencies of App.__init__ to avoid side effects
...
def test_mma_dashboard_refresh(app_instance: Any) -> None:
...
def test_mma_dashboard_initialization_refresh(app_instance: Any) -> None:
"""
Checks that _refresh_from_project is called during initialization if
_load_active_project is NOT mocked to skip it (but here it IS mocked in fixture).
This test verifies that calling it manually works as expected for initialization scenarios.
"""
...
--- File: tests\test_mma_dashboard_streams.py ---
from __future__ import annotations
from unittest.mock import patch, MagicMock
from gui_2 import App
def _make_app(**kwargs):
...
def _make_imgui_mock():
...
class TestMMADashboardStreams:
def test_tier1_renders_stream_content(self):
"""_render_tier_stream_panel for Tier 1 must call text_wrapped with the stream content."""
...
def test_tier3_renders_worker_subheaders(self):
"""_render_tier_stream_panel for Tier 3 must render a sub-header for each worker stream key."""
...
--- File: tests\test_mma_models.py ---
from models import Ticket, Track, WorkerContext
def test_ticket_instantiation() -> None:
"""
Verifies that a Ticket can be instantiated with its required fields:
id, description, status, assigned_to.
"""
...
def test_ticket_with_dependencies() -> None:
"""
Verifies that a Ticket can store dependencies.
"""
...
def test_track_instantiation() -> None:
"""
Verifies that a Track can be instantiated with its required fields:
id, description, and a list of Tickets.
"""
...
def test_track_can_handle_empty_tickets() -> None:
"""
Verifies that a Track can be instantiated with an empty list of tickets.
"""
...
def test_worker_context_instantiation() -> None:
"""
Verifies that a WorkerContext can be instantiated with ticket_id,
model_name, and messages.
"""
...
def test_ticket_mark_blocked() -> None:
"""
Verifies that ticket.mark_blocked(reason) sets the status to 'blocked'.
Note: The reason field might need to be added to the Ticket class.
"""
...
def test_ticket_mark_complete() -> None:
"""
Verifies that ticket.mark_complete() sets the status to 'completed'.
"""
...
def test_track_get_executable_tickets() -> None:
"""
Verifies that track.get_executable_tickets() returns only 'todo' tickets
whose dependencies are all 'completed'.
"""
...
def test_track_get_executable_tickets_complex() -> None:
"""
Verifies executable tickets with complex dependency chains.
Chain: T1 (comp) -> T2 (todo) -> T3 (todo)
T4 (comp) -> T3
T5 (todo) -> T3
"""
...
--- File: tests\test_mma_orchestration_gui.py ---
import pytest
import json
from unittest.mock import patch
import time
from gui_2 import App
def test_mma_ui_state_initialization(app_instance: App) -> None:
"""Verifies that the new MMA UI state variables are initialized correctly."""
...
def test_process_pending_gui_tasks_show_track_proposal(app_instance: App) -> None:
"""Verifies that the 'show_track_proposal' action correctly updates the UI state."""
...
def test_cb_plan_epic_launches_thread(app_instance: App) -> None:
"""Verifies that _cb_plan_epic launches a thread and eventually queues a task."""
...
def test_process_pending_gui_tasks_mma_spawn_approval(app_instance: App) -> None:
"""Verifies that the 'mma_spawn_approval' action correctly updates the UI state."""
...
def test_handle_ai_response_with_stream_id(app_instance: App) -> None:
"""Verifies routing to mma_streams."""
...
def test_handle_ai_response_fallback(app_instance: App) -> None:
"""Verifies fallback to ai_response when stream_id is missing."""
...
--- File: tests\test_mma_prompts.py ---
from mma_prompts import PROMPTS
def test_tier1_epic_init_constraints() -> None:
...
def test_tier1_track_delegation_constraints() -> None:
...
def test_tier1_macro_merge_constraints() -> None:
...
def test_tier2_sprint_planning_constraints() -> None:
...
def test_tier2_code_review_constraints() -> None:
...
def test_tier2_track_finalization_constraints() -> None:
...
def test_tier2_contract_first_constraints() -> None:
...
--- File: tests\test_mma_ticket_actions.py ---
from typing import Generator
import pytest
from unittest.mock import patch, MagicMock
from gui_2 import App
def test_cb_ticket_retry(app_instance: App) -> None:
...
def test_cb_ticket_skip(app_instance: App) -> None:
...
--- File: tests\test_mock_gemini_cli.py ---
import subprocess
import json
def get_message_content(stdout):
...
def run_mock(prompt):
...
def test_epic_prompt_returns_track_json():
...
def test_sprint_prompt_returns_ticket_json():
...
def test_worker_prompt_returns_plain_text():
...
def test_tool_result_prompt_returns_plain_text():
...
--- File: tests\test_orchestration_logic.py ---
import pytest
from unittest.mock import patch
import json
from typing import Any
import orchestrator_pm
import conductor_tech_lead
import multi_agent_conductor
from models import Track, Ticket
@pytest.fixture
def mock_ai_client() -> Any:
...
def test_generate_tracks(mock_ai_client: Any) -> None:
# Tier 1 (PM) response mock
...
def test_generate_tickets(mock_ai_client: Any) -> None:
...
def test_topological_sort() -> None:
...
def test_topological_sort_circular() -> None:
...
def test_track_executable_tickets() -> None:
...
@pytest.mark.asyncio
async def test_conductor_engine_run(vlogger) -> None:
...ine_parse_json_tickets() -> None:
...
def test_run_worker_lifecycle_blocked(mock_ai_client: Any) -> None:
...
--- File: tests\test_orchestrator_pm.py ---
import unittest
from typing import Any
from unittest.mock import patch
import json
import orchestrator_pm
import mma_prompts
class TestOrchestratorPM(unittest.TestCase):
@patch('summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_success(self, mock_send: Any, mock_summarize: Any) -> None:
# Setup mocks
...
@patch('summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_markdown_wrapped(self, mock_send: Any, mock_summarize: Any) -> None:
...
@patch('summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_malformed_json(self, mock_send: Any, mock_summarize: Any) -> None:
...
if __name__ == '__main__':
unittest.main()
--- File: tests\test_orchestrator_pm_history.py ---
import unittest
from unittest.mock import patch, MagicMock
import shutil
import json
from pathlib import Path
import orchestrator_pm
class TestOrchestratorPMHistory(unittest.TestCase):
def setUp(self) -> None:
...
def tearDown(self) -> None:
...
def create_track(self, parent_dir: Path, track_id: str, title: str, status: str, overview: str) -> None:
...
@patch('orchestrator_pm.CONDUCTOR_PATH', Path("test_conductor"))
def test_get_track_history_summary(self) -> None:
...
@patch('orchestrator_pm.CONDUCTOR_PATH', Path("test_conductor"))
def test_get_track_history_summary_missing_files(self) -> None:
...
@patch('orchestrator_pm.summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_with_history(self, mock_send: MagicMock, mock_summarize: MagicMock) -> None:
...
if __name__ == '__main__':
unittest.main()
--- File: tests\test_performance_monitor.py ---
import sys
import os
import time
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from performance_monitor import PerformanceMonitor
def test_perf_monitor_basic_timing() -> None:
...
def test_perf_monitor_component_timing() -> None:
...
--- File: tests\test_phase6_engine.py ---
import pytest
from unittest.mock import MagicMock, patch, AsyncMock
from multi_agent_conductor import ConductorEngine, run_worker_lifecycle
from models import Ticket, Track, WorkerContext
def test_worker_streaming_intermediate():
...
--- File: tests\test_process_pending_gui_tasks.py ---
from typing import Generator
import pytest
from unittest.mock import patch
import ai_client
from gui_2 import App
@pytest.fixture
def app_instance() -> Generator[App, None, None]:
...
def test_redundant_calls_in_process_pending_gui_tasks(app_instance: App) -> None:
...
def test_gcli_path_updates_adapter(app_instance: App) -> None:
...
--- File: tests\test_project_manager_tracks.py ---
import pytest
from typing import Any
import json
from project_manager import get_all_tracks, save_track_state
from models import TrackState, Metadata, Ticket
from datetime import datetime
def test_get_all_tracks_empty(tmp_path: Any) -> None:
...
def test_get_all_tracks_with_state(tmp_path: Any) -> None:
...
def test_get_all_tracks_with_metadata_json(tmp_path: Any) -> None:
...
def test_get_all_tracks_malformed(tmp_path: Any) -> None:
...
--- File: tests\test_session_logging.py ---
import pytest
import tomllib
from pathlib import Path
from typing import Generator
import session_logger
@pytest.fixture
def temp_logs(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Generator[Path, None, None]:
# Ensure closed before starting
...
def test_open_session_creates_subdir_and_registry(temp_logs: Path) -> None:
...
--- File: tests\test_sim_ai_settings.py ---
from unittest.mock import MagicMock, patch
import os
import sys
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_ai_settings import AISettingsSimulation
def test_ai_settings_simulation_run() -> None:
...
--- File: tests\test_sim_base.py ---
from unittest.mock import MagicMock, patch
import os
import sys
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_base import BaseSimulation
def test_base_simulation_init() -> None:
...
def test_base_simulation_setup() -> None:
...
--- File: tests\test_sim_context.py ---
from unittest.mock import MagicMock, patch
import os
import sys
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_context import ContextSimulation
def test_context_simulation_run() -> None:
...
--- File: tests\test_sim_execution.py ---
from unittest.mock import MagicMock, patch
import os
import sys
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_execution import ExecutionSimulation
def test_execution_simulation_run() -> None:
...
--- File: tests\test_sim_tools.py ---
from unittest.mock import MagicMock, patch
import os
import sys
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_tools import ToolsSimulation
def test_tools_simulation_run() -> None:
...
--- File: tests\test_spawn_interception.py ---
import pytest
from unittest.mock import MagicMock, patch
import multi_agent_conductor
from models import Ticket, WorkerContext
import events
import asyncio
import concurrent.futures
class MockDialog:
def __init__(self, approved: bool, final_payload: dict | None = None) -> None:
...
def wait(self) -> dict:
...
@pytest.fixture
def mock_ai_client() -> None:
...
@pytest.mark.asyncio
async def test_confirm_spawn_pushed_to_queue() -> None:
... MagicMock, mock_ai_client: MagicMock, app_instance) -> None:
...
@patch("multi_agent_conductor.confirm_spawn")
def test_run_worker_lifecycle_rejected(mock_confirm: MagicMock, mock_ai_client: MagicMock, app_instance) -> None:
...
--- File: tests\test_sync_hooks.py ---
import threading
import time
import requests
from api_hook_client import ApiHookClient
def test_api_ask_client_method(live_gui) -> None:
"""
Tests the request_confirmation method in ApiHookClient.
"""
...
--- File: tests\test_tier4_interceptor.py ---
from unittest.mock import MagicMock, patch
from shell_runner import run_powershell
def test_run_powershell_qa_callback_on_failure(vlogger) -> None:
"""
Test that qa_callback is called when a powershell command fails (non-zero exit code).
The result of the callback should be appended to the output.
"""
...
def test_run_powershell_qa_callback_on_stderr_only(vlogger) -> None:
"""
Test that qa_callback is called when a command has stderr even if exit code is 0.
"""
...
def test_run_powershell_no_qa_callback_on_success() -> None:
"""
Test that qa_callback is NOT called when the command succeeds without stderr.
"""
...
def test_run_powershell_optional_qa_callback() -> None:
"""
Test that run_powershell still works without providing a qa_callback.
"""
...
def test_end_to_end_tier4_integration(vlogger) -> None:
"""
Verifies that shell_runner.run_powershell correctly uses ai_client.run_tier4_analysis.
"""
...
def test_ai_client_passes_qa_callback() -> None:
"""
Verifies that ai_client.send passes the qa_callback down to the provider function.
"""
...
def test_gemini_provider_passes_qa_callback_to_run_script() -> None:
"""
Verifies that _send_gemini passes the qa_callback to _run_script.
"""
...
--- File: tests\test_tiered_context.py ---
from typing import Any
from pathlib import Path
from aggregate import build_tier1_context, build_tier2_context, build_tier3_context
def test_build_tier1_context_exists() -> None:
...
def test_build_tier2_context_exists() -> None:
...
def test_build_tier3_context_ast_skeleton(monkeypatch: Any) -> None:
...
def test_build_tier3_context_exists() -> None:
...
def test_build_file_items_with_tiers(tmp_path: Any) -> None:
...
def test_build_files_section_with_dicts(tmp_path: Any) -> None:
...
def test_tiered_context_by_tier_field() -> None:
...
--- File: tests\test_token_usage.py ---
import pytest
import sys
import os
import hashlib
from unittest.mock import patch, MagicMock
from types import SimpleNamespace
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import ai_client
def test_token_usage_tracking() -> None:
...
--- File: tests\test_token_viz.py ---
"""Tests for context & token visualization (Track: context_token_viz_20260301)."""
from typing import Generator
from unittest.mock import patch
import pytest
import ai_client
from ai_client import _add_bleed_derived, get_history_bleed_stats
from gui_2 import App
# --- _add_bleed_derived unit tests ---
def test_add_bleed_derived_aliases() -> None:
...
def test_add_bleed_derived_headroom() -> None:
...
def test_add_bleed_derived_would_trim_false() -> None:
...
def test_add_bleed_derived_would_trim_true() -> None:
...
def test_add_bleed_derived_breakdown() -> None:
...
def test_add_bleed_derived_history_clamped_to_zero() -> None:
"""history_tokens should not go negative when sys+tool > current."""
...
def test_add_bleed_derived_headroom_clamped_to_zero() -> None:
...
# --- get_history_bleed_stats returns all required keys ---
REQUIRED_KEYS = [
"provider", "limit", "current", "percentage",
"estimated_prompt_tokens", "max_prompt_tokens", "utilization_pct",
"headroom_tokens", "would_trim", "system_tokens", "tools_tokens", "history_tokens",
]
def test_get_history_bleed_stats_returns_all_keys_unknown_provider() -> None:
"""Fallback path (unknown provider) must still return all derived keys."""
...
# --- App initialization ---
def test_app_token_stats_initialized_empty(app_instance: App) -> None:
...
def test_app_last_stable_md_initialized_empty(app_instance: App) -> None:
...
def test_app_has_render_token_budget_panel(app_instance: App) -> None:
...
def test_render_token_budget_panel_empty_stats_no_crash(app_instance: App) -> None:
"""With empty _token_stats, _render_token_budget_panel must not raise."""
...
# --- Trim warning logic ---
def test_would_trim_boundary_exact() -> None:
"""would_trim is False when headroom == 20000 (threshold is strictly < 20000)."""
...
def test_would_trim_just_below_threshold() -> None:
...
def test_would_trim_just_above_threshold() -> None:
...
# --- Cache status fields available from ai_client ---
def test_gemini_cache_fields_accessible() -> None:
"""_gemini_cache, _gemini_cache_created_at, _GEMINI_CACHE_TTL must be accessible."""
...
def test_anthropic_history_lock_accessible() -> None:
"""_anthropic_history_lock must be accessible for cache hint rendering."""
...
--- File: tests\test_track_state_persistence.py ---
from datetime import datetime
# Import the real models
from models import TrackState, Metadata, Ticket
# Import the persistence functions from project_manager
from project_manager import save_track_state, load_track_state
def test_track_state_persistence(tmp_path) -> None:
"""
Tests saving and loading a TrackState object to/from a TOML file.
1. Create a TrackState object with sample metadata, discussion, and tasks.
2. Call save_track_state('test_track', state, base_dir).
3. Verify that base_dir/conductor/tracks/test_track/state.toml exists.
4. Call load_track_state('test_track', base_dir) and verify it returns an identical TrackState object.
"""
...
--- File: tests\test_track_state_schema.py ---
from datetime import datetime, timezone, timedelta
# Import necessary classes from models.py
from models import Metadata, TrackState, Ticket
# --- Pytest Tests ---
def test_track_state_instantiation() -> None:
"""Test creating a TrackState object."""
...
def test_track_state_to_dict() -> None:
"""Test the to_dict() method for serialization."""
...
def test_track_state_from_dict() -> None:
"""Test the from_dict() class method for deserialization."""
...
def test_track_state_from_dict_empty_and_missing() -> None:
"""Test from_dict with empty lists and missing optional keys."""
...
def test_track_state_to_dict_with_none() -> None:
"""Test to_dict with None values in optional fields."""
...
--- File: tests\test_tree_sitter_setup.py ---
import tree_sitter_python as tspython
from tree_sitter import Language, Parser
def test_tree_sitter_python_setup() -> None:
"""
Verifies that tree-sitter and tree-sitter-python are correctly installed
and can parse a simple Python function string.
"""
...
--- File: tests\test_user_agent.py ---
import sys
import os
# Ensure project root is in path for imports
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.user_agent import UserSimAgent
def test_user_agent_instantiation() -> None:
...
def test_perform_action_with_delay() -> None:
...
--- File: tests\test_visual_mma.py ---
import pytest
import time
from api_hook_client import ApiHookClient
def test_visual_mma_components(live_gui):
"""
Refactored visual MMA verification using the live_gui fixture.
Ensures the MMA dashboard and tickets are correctly rendered.
"""
...
--- File: tests\test_visual_orchestration.py ---
import pytest
import time
import sys
import os
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
@pytest.mark.integration
def test_mma_epic_lifecycle(live_gui) -> None:
"""
Integration test for the full MMA Epic lifecycle.
1. Start App.
2. Trigger 'New Epic' request.
3. Verify Tier 1 generates tracks.
4. Trigger 'Start Track' for one of the tracks.
5. Verify Tier 2 generates tickets.
6. Verify execution loop starts.
"""
...
if __name__ == "__main__":
# If run directly, try to use pytest
import subprocess
# Using sys.executable to ensure we use the same environment
subprocess.run([sys.executable, "-m", "pytest", "-v", __file__])
--- File: tests\test_visual_sim_gui_ux.py ---
import pytest
import time
import sys
import os
import json
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
@pytest.mark.integration
@pytest.mark.timeout(60)
def test_gui_ux_event_routing(live_gui) -> None:
...
@pytest.mark.integration
@pytest.mark.timeout(60)
def test_gui_track_creation(live_gui) -> None:
...
if __name__ == "__main__":
pass
--- File: tests\test_visual_sim_mma_v2.py ---
import pytest
import time
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _drain_approvals(client: ApiHookClient, status: dict) -> None:
"""Auto-approve any pending approval gate found in status."""
...
def _poll(client: ApiHookClient, timeout: int, condition, label: str) -> tuple[bool, dict]:
"""Poll get_mma_status() until condition(status) is True or timeout."""
...
# ---------------------------------------------------------------------------
# Test
# ---------------------------------------------------------------------------
@pytest.mark.integration
@pytest.mark.timeout(300)
def test_mma_complete_lifecycle(live_gui) -> None:
"""
End-to-end MMA lifecycle using real Gemini API (gemini-2.5-flash-lite).
Incorporates frame-sync sleeps and explicit state-transition waits per
simulation_hardening_20260301 spec (Issues 2 & 3).
"""
...
--- File: tests\test_vlogger_availability.py ---
import pytest
def test_vlogger_available(vlogger):
...
--- File: tests\test_workflow_sim.py ---
import sys
import os
from unittest.mock import MagicMock
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.workflow_sim import WorkflowSimulator
def test_simulator_instantiation() -> None:
...
def test_setup_new_project() -> None:
...
def test_discussion_switching() -> None:
...
def test_history_truncation() -> None:
...
--- File: theme.py ---
# theme.py
"""
Theming support for manual_slop GUI.
Palettes
--------
Each palette is a dict mapping semantic names to (R,G,B) or (R,G,B,A) tuples.
The names correspond to dpg theme colour / style constants.
Font handling
-------------
Call apply_font(path, size) to load a TTF and bind it as the global default.
Call set_scale(factor) to set the global font scale (DPI scaling).
Usage
-----
import theme
theme.apply("10x") # apply a named palette
theme.apply_font("C:/Windows/Fonts/CascadiaCode.ttf", 15)
theme.set_scale(1.25)
"""
import dearpygui.dearpygui as dpg
from pathlib import Path
# ------------------------------------------------------------------ palettes
# Colour key names match the DPG mvThemeCol_* constants (string lookup below).
# Only keys that differ from DPG defaults need to be listed.
_PALETTES: dict[str, dict] = {
"DPG Default": {}, # empty = reset to DPG built-in defaults
"10x Dark": {
# Window / frame chrome
"WindowBg": ( 34, 32, 28),
"ChildBg": ( 30, 28, 24),
"PopupBg": ( 35, 30, 20),
"Border": ( 60, 55, 50),
"BorderShadow": ( 0, 0, 0, 0),
"FrameBg": ( 45, 42, 38),
"FrameBgHovered": ( 60, 56, 50),
"FrameBgActive": ( 75, 70, 62),
# Title bars
"TitleBg": ( 40, 35, 25),
"TitleBgActive": ( 60, 45, 15),
"TitleBgCollapsed": ( 30, 27, 20),
# Menu bar
"MenuBarBg": ( 35, 30, 20),
# Scrollbar
"ScrollbarBg": ( 30, 28, 24),
"ScrollbarGrab": ( 80, 78, 72),
"ScrollbarGrabHovered": (100, 100, 92),
"ScrollbarGrabActive": (120, 118, 110),
# Check marks / radio buttons
"CheckMark": (194, 164, 74),
# Sliders
"SliderGrab": (126, 78, 14),
"SliderGrabActive": (194, 140, 30),
# Buttons
"Button": ( 83, 76, 60),
"ButtonHovered": (126, 78, 14),
"ButtonActive": (115, 90, 70),
# Headers (collapsing headers, selectables, listbox items)
"Header": ( 83, 76, 60),
"HeaderHovered": (126, 78, 14),
"HeaderActive": (115, 90, 70),
# Separator
"Separator": ( 70, 65, 55),
"SeparatorHovered": (126, 78, 14),
"SeparatorActive": (194, 164, 74),
# Resize grip
"ResizeGrip": ( 60, 55, 44),
"ResizeGripHovered": (126, 78, 14),
"ResizeGripActive": (194, 164, 74),
# Tab bar
"Tab": ( 83, 83, 70),
"TabHovered": (126, 77, 25),
"TabActive": (126, 77, 25),
"TabUnfocused": ( 60, 58, 50),
"TabUnfocusedActive": ( 90, 80, 55),
# Docking
"DockingPreview": (126, 78, 14, 180),
"DockingEmptyBg": ( 20, 20, 20),
# Text
"Text": (200, 200, 200),
"TextDisabled": (130, 130, 120),
# Input text cursor / selection
"TextSelectedBg": ( 59, 86, 142, 180),
# Plot / table lines
"TableHeaderBg": ( 55, 50, 38),
"TableBorderStrong": ( 70, 65, 55),
"TableBorderLight": ( 50, 47, 42),
"TableRowBg": ( 0, 0, 0, 0),
"TableRowBgAlt": ( 40, 38, 34, 40),
# Misc
"NavHighlight": (126, 78, 14),
"NavWindowingHighlight":(194, 164, 74, 180),
"NavWindowingDimBg": ( 20, 20, 20, 80),
"ModalWindowDimBg": ( 10, 10, 10, 100),
},
"Nord Dark": {
"WindowBg": ( 36, 41, 49),
"ChildBg": ( 30, 34, 42),
"PopupBg": ( 36, 41, 49),
"Border": ( 59, 66, 82),
"BorderShadow": ( 0, 0, 0, 0),
"FrameBg": ( 46, 52, 64),
"FrameBgHovered": ( 59, 66, 82),
"FrameBgActive": ( 67, 76, 94),
"TitleBg": ( 36, 41, 49),
"TitleBgActive": ( 59, 66, 82),
"TitleBgCollapsed": ( 30, 34, 42),
"MenuBarBg": ( 46, 52, 64),
"ScrollbarBg": ( 30, 34, 42),
"ScrollbarGrab": ( 76, 86, 106),
"ScrollbarGrabHovered": ( 94, 129, 172),
"ScrollbarGrabActive": (129, 161, 193),
"CheckMark": (136, 192, 208),
"SliderGrab": ( 94, 129, 172),
"SliderGrabActive": (129, 161, 193),
"Button": ( 59, 66, 82),
"ButtonHovered": ( 94, 129, 172),
"ButtonActive": (129, 161, 193),
"Header": ( 59, 66, 82),
"HeaderHovered": ( 94, 129, 172),
"HeaderActive": (129, 161, 193),
"Separator": ( 59, 66, 82),
"SeparatorHovered": ( 94, 129, 172),
"SeparatorActive": (136, 192, 208),
"ResizeGrip": ( 59, 66, 82),
"ResizeGripHovered": ( 94, 129, 172),
"ResizeGripActive": (136, 192, 208),
"Tab": ( 46, 52, 64),
"TabHovered": ( 94, 129, 172),
"TabActive": ( 76, 86, 106),
"TabUnfocused": ( 36, 41, 49),
"TabUnfocusedActive": ( 59, 66, 82),
"DockingPreview": ( 94, 129, 172, 180),
"DockingEmptyBg": ( 20, 22, 28),
"Text": (216, 222, 233),
"TextDisabled": (116, 128, 150),
"TextSelectedBg": ( 94, 129, 172, 180),
"TableHeaderBg": ( 59, 66, 82),
"TableBorderStrong": ( 76, 86, 106),
"TableBorderLight": ( 59, 66, 82),
"TableRowBg": ( 0, 0, 0, 0),
"TableRowBgAlt": ( 46, 52, 64, 40),
"NavHighlight": (136, 192, 208),
"ModalWindowDimBg": ( 10, 12, 16, 100),
},
"Monokai": {
"WindowBg": ( 39, 40, 34),
"ChildBg": ( 34, 35, 29),
"PopupBg": ( 39, 40, 34),
"Border": ( 60, 61, 52),
"BorderShadow": ( 0, 0, 0, 0),
"FrameBg": ( 50, 51, 44),
"FrameBgHovered": ( 65, 67, 56),
"FrameBgActive": ( 80, 82, 68),
"TitleBg": ( 39, 40, 34),
"TitleBgActive": ( 73, 72, 62),
"TitleBgCollapsed": ( 30, 31, 26),
"MenuBarBg": ( 50, 51, 44),
"ScrollbarBg": ( 34, 35, 29),
"ScrollbarGrab": ( 80, 80, 72),
"ScrollbarGrabHovered": (102, 217, 39),
"ScrollbarGrabActive": (166, 226, 46),
"CheckMark": (166, 226, 46),
"SliderGrab": (102, 217, 39),
"SliderGrabActive": (166, 226, 46),
"Button": ( 73, 72, 62),
"ButtonHovered": (249, 38, 114),
"ButtonActive": (198, 30, 92),
"Header": ( 73, 72, 62),
"HeaderHovered": (249, 38, 114),
"HeaderActive": (198, 30, 92),
"Separator": ( 60, 61, 52),
"SeparatorHovered": (249, 38, 114),
"SeparatorActive": (166, 226, 46),
"ResizeGrip": ( 73, 72, 62),
"ResizeGripHovered": (249, 38, 114),
"ResizeGripActive": (166, 226, 46),
"Tab": ( 73, 72, 62),
"TabHovered": (249, 38, 114),
"TabActive": (249, 38, 114),
"TabUnfocused": ( 50, 51, 44),
"TabUnfocusedActive": ( 90, 88, 76),
"DockingPreview": (249, 38, 114, 180),
"DockingEmptyBg": ( 20, 20, 18),
"Text": (248, 248, 242),
"TextDisabled": (117, 113, 94),
"TextSelectedBg": (249, 38, 114, 150),
"TableHeaderBg": ( 60, 61, 52),
"TableBorderStrong": ( 73, 72, 62),
"TableBorderLight": ( 55, 56, 48),
"TableRowBg": ( 0, 0, 0, 0),
"TableRowBgAlt": ( 50, 51, 44, 40),
"NavHighlight": (166, 226, 46),
"ModalWindowDimBg": ( 10, 10, 8, 100),
},
}
PALETTE_NAMES: list[str] = list(_PALETTES.keys())
# ------------------------------------------------------------------ colour key -> mvThemeCol_* mapping
# Maps our friendly name -> dpg constant name
_COL_MAP: dict[str, str] = {
"Text": "mvThemeCol_Text",
"TextDisabled": "mvThemeCol_TextDisabled",
"WindowBg": "mvThemeCol_WindowBg",
"ChildBg": "mvThemeCol_ChildBg",
"PopupBg": "mvThemeCol_PopupBg",
"Border": "mvThemeCol_Border",
"BorderShadow": "mvThemeCol_BorderShadow",
"FrameBg": "mvThemeCol_FrameBg",
"FrameBgHovered": "mvThemeCol_FrameBgHovered",
"FrameBgActive": "mvThemeCol_FrameBgActive",
"TitleBg": "mvThemeCol_TitleBg",
"TitleBgActive": "mvThemeCol_TitleBgActive",
"TitleBgCollapsed": "mvThemeCol_TitleBgCollapsed",
"MenuBarBg": "mvThemeCol_MenuBarBg",
"ScrollbarBg": "mvThemeCol_ScrollbarBg",
"ScrollbarGrab": "mvThemeCol_ScrollbarGrab",
"ScrollbarGrabHovered": "mvThemeCol_ScrollbarGrabHovered",
"ScrollbarGrabActive": "mvThemeCol_ScrollbarGrabActive",
"CheckMark": "mvThemeCol_CheckMark",
"SliderGrab": "mvThemeCol_SliderGrab",
"SliderGrabActive": "mvThemeCol_SliderGrabActive",
"Button": "mvThemeCol_Button",
"ButtonHovered": "mvThemeCol_ButtonHovered",
"ButtonActive": "mvThemeCol_ButtonActive",
"Header": "mvThemeCol_Header",
"HeaderHovered": "mvThemeCol_HeaderHovered",
"HeaderActive": "mvThemeCol_HeaderActive",
"Separator": "mvThemeCol_Separator",
"SeparatorHovered": "mvThemeCol_SeparatorHovered",
"SeparatorActive": "mvThemeCol_SeparatorActive",
"ResizeGrip": "mvThemeCol_ResizeGrip",
"ResizeGripHovered": "mvThemeCol_ResizeGripHovered",
"ResizeGripActive": "mvThemeCol_ResizeGripActive",
"Tab": "mvThemeCol_Tab",
"TabHovered": "mvThemeCol_TabHovered",
"TabActive": "mvThemeCol_TabActive",
"TabUnfocused": "mvThemeCol_TabUnfocused",
"TabUnfocusedActive": "mvThemeCol_TabUnfocusedActive",
"DockingPreview": "mvThemeCol_DockingPreview",
"DockingEmptyBg": "mvThemeCol_DockingEmptyBg",
"TextSelectedBg": "mvThemeCol_TextSelectedBg",
"TableHeaderBg": "mvThemeCol_TableHeaderBg",
"TableBorderStrong": "mvThemeCol_TableBorderStrong",
"TableBorderLight": "mvThemeCol_TableBorderLight",
"TableRowBg": "mvThemeCol_TableRowBg",
"TableRowBgAlt": "mvThemeCol_TableRowBgAlt",
"NavHighlight": "mvThemeCol_NavHighlight",
"NavWindowingHighlight": "mvThemeCol_NavWindowingHighlight",
"NavWindowingDimBg": "mvThemeCol_NavWindowingDimBg",
"ModalWindowDimBg": "mvThemeCol_ModalWindowDimBg",
}
# ------------------------------------------------------------------ state
_current_theme_tag: str | None = None
_current_font_tag: str | None = None
_font_registry_tag: str | None = None
_current_palette: str = "DPG Default"
_current_font_path: str = ""
_current_font_size: float = 14.0
_current_scale: float = 1.0
# ------------------------------------------------------------------ public API
def get_palette_names() -> list[str]:
...
def get_current_palette() -> str:
...
def get_current_font_path() -> str:
...
def get_current_font_size() -> float:
...
def get_current_scale() -> float:
...
def get_palette_colours(name: str) -> dict:
"""Return a copy of the colour dict for the named palette."""
...
def apply(palette_name: str, overrides: dict | None = None) -> None:
"""
Build a global DPG theme from the named palette plus optional per-colour
overrides, and bind it as the default theme.
overrides: {colour_key: (R,G,B) or (R,G,B,A)} — merged on top of palette.
"""
...
def apply_font(font_path: str, size: float = 14.0) -> None:
"""
Load the TTF at font_path at the given point size and bind it globally.
Safe to call multiple times. Uses a single persistent font_registry; only
the font *item* tag is tracked. Passing an empty path or a missing file
resets to the DPG built-in font.
"""
...
def set_scale(factor: float) -> None:
"""Set the global Dear PyGui font/UI scale factor."""
...
def save_to_config(config: dict) -> None:
"""Persist theme settings into the config dict under [theme]."""
...
def load_from_config(config: dict) -> None:
"""Read [theme] from config and apply everything."""
...
--- File: theme_2.py ---
# theme_2.py
"""
Theming support for manual_slop GUI — imgui-bundle port.
Replaces theme.py (DearPyGui-specific) with imgui-bundle equivalents.
Palettes are applied via imgui.get_style().set_color_() calls.
Font loading uses hello_imgui.load_font().
Scale uses imgui.get_style().font_scale_main.
"""
from imgui_bundle import imgui
# ------------------------------------------------------------------ palettes
# Each palette maps imgui color enum values to (R, G, B, A) floats [0..1].
# Only keys that differ from the ImGui dark defaults need to be listed.
def _c(r: int, g: int, b: int, a: int = 255) -> tuple[float, float, float, float]:
"""Convert 0-255 RGBA to 0.0-1.0 floats."""
...
_PALETTES: dict[str, dict[int, tuple]] = {
"ImGui Dark": {}, # empty = use imgui dark defaults
"10x Dark": {
imgui.Col_.window_bg: _c( 34, 32, 28),
imgui.Col_.child_bg: _c( 30, 28, 24),
imgui.Col_.popup_bg: _c( 35, 30, 20),
imgui.Col_.border: _c( 60, 55, 50),
imgui.Col_.border_shadow: _c( 0, 0, 0, 0),
imgui.Col_.frame_bg: _c( 45, 42, 38),
imgui.Col_.frame_bg_hovered: _c( 60, 56, 50),
imgui.Col_.frame_bg_active: _c( 75, 70, 62),
imgui.Col_.title_bg: _c( 40, 35, 25),
imgui.Col_.title_bg_active: _c( 60, 45, 15),
imgui.Col_.title_bg_collapsed: _c( 30, 27, 20),
imgui.Col_.menu_bar_bg: _c( 35, 30, 20),
imgui.Col_.scrollbar_bg: _c( 30, 28, 24),
imgui.Col_.scrollbar_grab: _c( 80, 78, 72),
imgui.Col_.scrollbar_grab_hovered: _c(100, 100, 92),
imgui.Col_.scrollbar_grab_active: _c(120, 118, 110),
imgui.Col_.check_mark: _c(194, 164, 74),
imgui.Col_.slider_grab: _c(126, 78, 14),
imgui.Col_.slider_grab_active: _c(194, 140, 30),
imgui.Col_.button: _c( 83, 76, 60),
imgui.Col_.button_hovered: _c(126, 78, 14),
imgui.Col_.button_active: _c(115, 90, 70),
imgui.Col_.header: _c( 83, 76, 60),
imgui.Col_.header_hovered: _c(126, 78, 14),
imgui.Col_.header_active: _c(115, 90, 70),
imgui.Col_.separator: _c( 70, 65, 55),
imgui.Col_.separator_hovered: _c(126, 78, 14),
imgui.Col_.separator_active: _c(194, 164, 74),
imgui.Col_.resize_grip: _c( 60, 55, 44),
imgui.Col_.resize_grip_hovered: _c(126, 78, 14),
imgui.Col_.resize_grip_active: _c(194, 164, 74),
imgui.Col_.tab: _c( 83, 83, 70),
imgui.Col_.tab_hovered: _c(126, 77, 25),
imgui.Col_.tab_selected: _c(126, 77, 25),
imgui.Col_.tab_dimmed: _c( 60, 58, 50),
imgui.Col_.tab_dimmed_selected: _c( 90, 80, 55),
imgui.Col_.docking_preview: _c(126, 78, 14, 180),
imgui.Col_.docking_empty_bg: _c( 20, 20, 20),
imgui.Col_.text: _c(200, 200, 200),
imgui.Col_.text_disabled: _c(130, 130, 120),
imgui.Col_.text_selected_bg: _c( 59, 86, 142, 180),
imgui.Col_.table_header_bg: _c( 55, 50, 38),
imgui.Col_.table_border_strong: _c( 70, 65, 55),
imgui.Col_.table_border_light: _c( 50, 47, 42),
imgui.Col_.table_row_bg: _c( 0, 0, 0, 0),
imgui.Col_.table_row_bg_alt: _c( 40, 38, 34, 40),
imgui.Col_.nav_cursor: _c(126, 78, 14),
imgui.Col_.nav_windowing_highlight: _c(194, 164, 74, 180),
imgui.Col_.nav_windowing_dim_bg: _c( 20, 20, 20, 80),
imgui.Col_.modal_window_dim_bg: _c( 10, 10, 10, 100),
},
"Nord Dark": {
imgui.Col_.window_bg: _c( 36, 41, 49),
imgui.Col_.child_bg: _c( 30, 34, 42),
imgui.Col_.popup_bg: _c( 36, 41, 49),
imgui.Col_.border: _c( 59, 66, 82),
imgui.Col_.border_shadow: _c( 0, 0, 0, 0),
imgui.Col_.frame_bg: _c( 46, 52, 64),
imgui.Col_.frame_bg_hovered: _c( 59, 66, 82),
imgui.Col_.frame_bg_active: _c( 67, 76, 94),
imgui.Col_.title_bg: _c( 36, 41, 49),
imgui.Col_.title_bg_active: _c( 59, 66, 82),
imgui.Col_.title_bg_collapsed: _c( 30, 34, 42),
imgui.Col_.menu_bar_bg: _c( 46, 52, 64),
imgui.Col_.scrollbar_bg: _c( 30, 34, 42),
imgui.Col_.scrollbar_grab: _c( 76, 86, 106),
imgui.Col_.scrollbar_grab_hovered: _c( 94, 129, 172),
imgui.Col_.scrollbar_grab_active: _c(129, 161, 193),
imgui.Col_.check_mark: _c(136, 192, 208),
imgui.Col_.slider_grab: _c( 94, 129, 172),
imgui.Col_.slider_grab_active: _c(129, 161, 193),
imgui.Col_.button: _c( 59, 66, 82),
imgui.Col_.button_hovered: _c( 94, 129, 172),
imgui.Col_.button_active: _c(129, 161, 193),
imgui.Col_.header: _c( 59, 66, 82),
imgui.Col_.header_hovered: _c( 94, 129, 172),
imgui.Col_.header_active: _c(129, 161, 193),
imgui.Col_.separator: _c( 59, 66, 82),
imgui.Col_.separator_hovered: _c( 94, 129, 172),
imgui.Col_.separator_active: _c(136, 192, 208),
imgui.Col_.resize_grip: _c( 59, 66, 82),
imgui.Col_.resize_grip_hovered: _c( 94, 129, 172),
imgui.Col_.resize_grip_active: _c(136, 192, 208),
imgui.Col_.tab: _c( 46, 52, 64),
imgui.Col_.tab_hovered: _c( 94, 129, 172),
imgui.Col_.tab_selected: _c( 76, 86, 106),
imgui.Col_.tab_dimmed: _c( 36, 41, 49),
imgui.Col_.tab_dimmed_selected: _c( 59, 66, 82),
imgui.Col_.docking_preview: _c( 94, 129, 172, 180),
imgui.Col_.docking_empty_bg: _c( 20, 22, 28),
imgui.Col_.text: _c(216, 222, 233),
imgui.Col_.text_disabled: _c(116, 128, 150),
imgui.Col_.text_selected_bg: _c( 94, 129, 172, 180),
imgui.Col_.table_header_bg: _c( 59, 66, 82),
imgui.Col_.table_border_strong: _c( 76, 86, 106),
imgui.Col_.table_border_light: _c( 59, 66, 82),
imgui.Col_.table_row_bg: _c( 0, 0, 0, 0),
imgui.Col_.table_row_bg_alt: _c( 46, 52, 64, 40),
imgui.Col_.nav_cursor: _c(136, 192, 208),
imgui.Col_.modal_window_dim_bg: _c( 10, 12, 16, 100),
},
"Monokai": {
imgui.Col_.window_bg: _c( 39, 40, 34),
imgui.Col_.child_bg: _c( 34, 35, 29),
imgui.Col_.popup_bg: _c( 39, 40, 34),
imgui.Col_.border: _c( 60, 61, 52),
imgui.Col_.border_shadow: _c( 0, 0, 0, 0),
imgui.Col_.frame_bg: _c( 50, 51, 44),
imgui.Col_.frame_bg_hovered: _c( 65, 67, 56),
imgui.Col_.frame_bg_active: _c( 80, 82, 68),
imgui.Col_.title_bg: _c( 39, 40, 34),
imgui.Col_.title_bg_active: _c( 73, 72, 62),
imgui.Col_.title_bg_collapsed: _c( 30, 31, 26),
imgui.Col_.menu_bar_bg: _c( 50, 51, 44),
imgui.Col_.scrollbar_bg: _c( 34, 35, 29),
imgui.Col_.scrollbar_grab: _c( 80, 80, 72),
imgui.Col_.scrollbar_grab_hovered: _c(102, 217, 39),
imgui.Col_.scrollbar_grab_active: _c(166, 226, 46),
imgui.Col_.check_mark: _c(166, 226, 46),
imgui.Col_.slider_grab: _c(102, 217, 39),
imgui.Col_.slider_grab_active: _c(166, 226, 46),
imgui.Col_.button: _c( 73, 72, 62),
imgui.Col_.button_hovered: _c(249, 38, 114),
imgui.Col_.button_active: _c(198, 30, 92),
imgui.Col_.header: _c( 73, 72, 62),
imgui.Col_.header_hovered: _c(249, 38, 114),
imgui.Col_.header_active: _c(198, 30, 92),
imgui.Col_.separator: _c( 60, 61, 52),
imgui.Col_.separator_hovered: _c(249, 38, 114),
imgui.Col_.separator_active: _c(166, 226, 46),
imgui.Col_.resize_grip: _c( 73, 72, 62),
imgui.Col_.resize_grip_hovered: _c(249, 38, 114),
imgui.Col_.resize_grip_active: _c(166, 226, 46),
imgui.Col_.tab: _c( 73, 72, 62),
imgui.Col_.tab_hovered: _c(249, 38, 114),
imgui.Col_.tab_selected: _c(249, 38, 114),
imgui.Col_.tab_dimmed: _c( 50, 51, 44),
imgui.Col_.tab_dimmed_selected: _c( 90, 88, 76),
imgui.Col_.docking_preview: _c(249, 38, 114, 180),
imgui.Col_.docking_empty_bg: _c( 20, 20, 18),
imgui.Col_.text: _c(248, 248, 242),
imgui.Col_.text_disabled: _c(117, 113, 94),
imgui.Col_.text_selected_bg: _c(249, 38, 114, 150),
imgui.Col_.table_header_bg: _c( 60, 61, 52),
imgui.Col_.table_border_strong: _c( 73, 72, 62),
imgui.Col_.table_border_light: _c( 55, 56, 48),
imgui.Col_.table_row_bg: _c( 0, 0, 0, 0),
imgui.Col_.table_row_bg_alt: _c( 50, 51, 44, 40),
imgui.Col_.nav_cursor: _c(166, 226, 46),
imgui.Col_.modal_window_dim_bg: _c( 10, 10, 8, 100),
},
}
PALETTE_NAMES: list[str] = list(_PALETTES.keys())
# ------------------------------------------------------------------ state
_current_palette: str = "ImGui Dark"
_current_font_path: str = ""
_current_font_size: float = 16.0
_current_scale: float = 1.0
_custom_font: imgui.ImFont = None # type: ignore
# ------------------------------------------------------------------ public API
def get_palette_names() -> list[str]:
...
def get_current_palette() -> str:
...
def get_current_font_path() -> str:
...
def get_current_font_size() -> float:
...
def get_current_scale() -> float:
...
def apply(palette_name: str) -> None:
"""
Apply a named palette by setting all ImGui style colors.
Call this once per frame if you want dynamic switching, or once at startup.
In practice we call it once when the user picks a palette, and imgui retains the style.
"""
...
def set_scale(factor: float) -> None:
"""Set the global font/UI scale factor."""
...
def save_to_config(config: dict) -> None:
"""Persist theme settings into the config dict under [theme]."""
...
def load_from_config(config: dict) -> None:
"""Read [theme] from config and apply palette + scale. Font is handled separately at startup."""
...
def apply_current() -> None:
"""Apply the loaded palette and scale. Call after imgui context exists."""
...
def get_font_loading_params() -> tuple[str, float]:
"""Return (font_path, font_size) for use during hello_imgui font loading callback."""
...
--- File: verify_pm_changes.py ---