From 2ffb2b2e1fac87da2fcd95985bbb1f548fc355dd Mon Sep 17 00:00:00 2001 From: Ed_ Date: Sun, 8 Mar 2026 03:11:11 -0400 Subject: [PATCH] docs --- src/api_hook_client.py | 33 +++++++++++++++++++++++ src/conductor_tech_lead.py | 35 ++++++++++++++++++++++++ src/cost_tracker.py | 33 +++++++++++++++++++++++ src/file_cache.py | 39 +++++++++++++++++++++++---- src/gemini_cli_adapter.py | 36 +++++++++++++++++++++++++ src/log_registry.py | 40 +++++++++++++++++++++++++++ src/outline_tool.py | 33 +++++++++++++++++++++++ src/paths.py | 43 +++++++++++++++++++++++++++++ src/performance_monitor.py | 55 ++++++++++++++++++++++++++++++++++++++ 9 files changed, 342 insertions(+), 5 deletions(-) diff --git a/src/api_hook_client.py b/src/api_hook_client.py index 40d2497..88d73bb 100644 --- a/src/api_hook_client.py +++ b/src/api_hook_client.py @@ -1,3 +1,36 @@ +""" +API Hook Client - Python client for the Hook API. + +This module provides a Python client for interacting with the Hook API exposed by the application on port 8999. + It is used for: + - Automated GUI testing via the `live_gui` pytest fixture + - External tool integration + - Remote control of the application + +Architecture: + - Uses requests library for HTTP communication + - All methods return dict[str, Any] or None + - Handles connection errors gracefully (returns None on failure) + +Key Method Categories: + 1. Connection: wait_for_server, get_status + 2. State Query: get_project, get_session. get_performance, get_mma_status + 3. GUI Manipulation: click, set_value, select_tab, select_list_item + 4. Polling: wait_for_event + 5. HITL: request_confirmation + +Timeout Handling: + - Standard operations: 5s timeout + - HITL dialogs: 60s timeout (waits for human input) + +Integration: + - Used by simulation tests (tests/visual_sim_mma_v2.py) + - Used by external tools for automation + +See Also: + - src/api_hooks.py for the server implementation + - docs/guide_tools.md for Hook API documentation +""" from __future__ import annotations import requests # type: ignore[import-untyped] import time diff --git a/src/conductor_tech_lead.py b/src/conductor_tech_lead.py index 680e3cd..0d23f55 100644 --- a/src/conductor_tech_lead.py +++ b/src/conductor_tech_lead.py @@ -1,3 +1,38 @@ +""" +Conductor Tech Lead - Tier 2 ticket generation for MMA orchestration. + +This module implements the Tier 2 (Tech Lead) function for generating implementation tickets from track briefs. + It uses the LLM to analyze the track requirements and produce structured ticket definitions. + +Architecture: + - Uses ai_client.send() for LLM communication + - Uses mma_prompts.PROMPTS["tier2_sprint_planning"] for system prompt + - Returns JSON array of ticket definitions + +Ticket Format: + Each ticket is a dict with: + - id: Unique identifier + - description: Task description + - depends_on: List of dependency ticket IDs + - step_mode: Whether to pause for approval between steps + +Dependencies: + - Uses TrackDAG from dag_engine.py for topological sorting + - Uses Ticket from models.py for validation + +Error Handling: + - Retries JSON parsing errors up to 3 times + - Raises RuntimeError if all retries fail + +Thread Safety: + - NOT thread-safe. Should only be called from the main GUI thread. + - Modifies ai_client state (custom_system_prompt, current_tier) + +See Also: + - docs/guide_mma.md for MMA orchestration documentation + - src/mma_prompts.py for Tier-specific prompts + - src/dag_engine.py for TrackDAG +""" import json from src import ai_client from src import mma_prompts diff --git a/src/cost_tracker.py b/src/cost_tracker.py index 16f9709..82cc129 100644 --- a/src/cost_tracker.py +++ b/src/cost_tracker.py @@ -1,3 +1,36 @@ +""" +Cost Tracker - Token cost estimation for API calls. + +This module provides cost estimation for different LLM providers based on per-token pricing. + It is used to display estimated costs in the MMA Dashboard. + +Pricing Data (per 1M tokens): + - gemini-2.5-flash-lite: $0.075 input / $0.30 output + - gemini-3-flash-preview: $0.15 input / $0.60 output + - gemini-3.1-pro-preview: $3.50 input / $10.50 output + - claude-*-sonnet: $3.0 input / $15.0 output + - claude-*-opus: $15.0 input / $75.0 output + - deepseek-v3: $0.27 input / $1.10 output + +Usage: + from src.cost_tracker import estimate_cost + + total = estimate_cost("gemini-2.5-flash-lite", 50000, 10000) + # Returns: 0.007 (approx) + +Accuracy: + - Pricing data may be outdated + - Uses regex matching for model identification + - Returns 0.0 for unknown models + +Integration: + - Used by gui_2.py for MMA dashboard cost display + - Called after each API call + +See Also: + - src/ai_client.py for token tracking + - docs/guide_mma.md for MMA dashboard documentation +""" import re # Pricing per 1M tokens in USD diff --git a/src/file_cache.py b/src/file_cache.py index dde7372..f08b91d 100644 --- a/src/file_cache.py +++ b/src/file_cache.py @@ -1,10 +1,39 @@ -# file_cache.py -""" -Stub — the Anthropic Files API path has been removed. -All context is now sent as inline chunked text via _send_anthropic_chunked. -This file is kept so that any stale imports do not break. """ +File Cache - ASTParser (tree-sitter) for Python source code analysis. +This module provides AST-based code analysis using the tree-sitter library. + It is used to generate compressed "views" of Python code that preserve + structure while reducing token consumption. + +Key Components: + - ASTParser: Main parser class using tree-sitter + - get_skeleton(): Compress function bodies to `...` + - get_curated_view(): Preserve `@core_logic` and `[HOT]` functions + - get_targeted_view(): Extract only specified symbols + dependencies + +Caching: + - Module-level `_ast_cache` stores parsed trees with mtime invalidation + - Cache limit: 10 entries (simple LRU eviction) + - Cache key: file path + mtime + +Thread Safety: + - Not thread-safe. Use separate ASTParser instances per thread. + - Cache is module-level shared across instances. + +Views: + 1. Skeleton: Signatures + docstrings only, bodies replaced with `...` + 2. Curated: Skeleton + bodies marked with `@core_logic` or `[HOT]` + 3. Targeted: Only specified symbols + their dependencies (depth 2) + +Integration: + - Used by mcp_client.py for py_get_skeleton, py_get_curated_view + - Used by multi_agent_conductor.py for worker context injection + - Used by aggregate.py for summary generation + +See Also: + - docs/guide_tools.md for AST tool documentation + - src/summarize.py for heuristic summaries +""" from pathlib import Path from typing import Optional, Any, List, Tuple, Dict import tree_sitter diff --git a/src/gemini_cli_adapter.py b/src/gemini_cli_adapter.py index 8ec937d..4af0c92 100644 --- a/src/gemini_cli_adapter.py +++ b/src/gemini_cli_adapter.py @@ -1,3 +1,38 @@ +""" +Gemini CLI Adapter - Subprocess wrapper for the `gemini` CLI tool. + +This module provides an adapter for running the Google Gemini CLI as a subprocess, +parsing its streaming JSON output, and handling session management. + +Key Features: + - Streaming JSON output parsing (init, message, chunk, tool_use, result) + - Session persistence via --resume flag + - Non-blocking line-by-line reading with stream_callback + - Token estimation via character count heuristic (4 chars/token) + - CLI call logging via session_logger + +Integration: + - Used by ai_client.py as the 'gemini_cli' provider + - Enables synchronous HITL bridge via GEMINI_CLI_HOOK_CONTEXT env var + +Thread Safety: + - Each GeminiCliAdapter instance maintains its own session_id + - Not thread-safe. Use separate instances per thread. + +Configuration: + - binary_path: Path to the `gemini` CLI (from project config [gemini_cli].binary_path) + +Output Protocol: + The CLI emits JSON-L lines: + {"type": "init", "session_id": "..."} + {"type": "message", "content": "...", "role": "assistant"} + {"type": "tool_use", "name": "...", "parameters": {...}} + {"type": "result", "status": "success", "stats": {"total_tokens": N}} + +See Also: + - docs/guide_architecture.md for CLI adapter integration + - src/ai_client.py for provider dispatch +""" import subprocess import json import os @@ -6,6 +41,7 @@ import sys from src import session_logger from typing import Optional, Callable, Any + class GeminiCliAdapter: """ Adapter for the Gemini CLI that parses streaming JSON output. diff --git a/src/log_registry.py b/src/log_registry.py index c458194..db64915 100644 --- a/src/log_registry.py +++ b/src/log_registry.py @@ -1,3 +1,42 @@ +""" +Log Registry - Session metadata persistence for log management. + +This module provides the LogRegistry class for tracking session logs +in a persistent TOML registry file. It supports session registration, +metadata updates, whitelisting, and age-based pruning queries. + +Key Features: + - Persistent TOML-based registry (log_registry.toml) + - Session registration with path and start time + - Automatic whitelisting based on heuristics (errors, message count, size) + - Age-based session queries for log pruning + - Thread-safe file operations (via atomic TOML writes) + +Registry File Format (log_registry.toml): + [session_id] + path = "logs/sessions/session_id" + start_time = "2024-01-15T10:30:00" + whitelisted = false + [session_id.metadata] + message_count = 42 + errors = 0 + size_kb = 15 + reason = "High message count: 42" + +Integration: + - Used by session_logger.py for session registration + - Used by log_pruner.py for age-based cleanup + - Called from gui_2.py for log management UI + +Thread Safety: + - File operations use atomic write (tomli_w.dump) + - In-memory data dict is not thread-safe for concurrent access + +See Also: + - src/session_logger.py for session lifecycle + - src/log_pruner.py for automated cleanup + - src/paths.py for registry path resolution +""" from __future__ import annotations import tomli_w import tomllib @@ -5,6 +44,7 @@ from datetime import datetime import os from typing import Any + class LogRegistry: """ Manages a persistent registry of session logs using a TOML file. diff --git a/src/outline_tool.py b/src/outline_tool.py index 786466e..9752f1f 100644 --- a/src/outline_tool.py +++ b/src/outline_tool.py @@ -1,6 +1,39 @@ +""" +Outline Tool - Hierarchical code outline extraction via stdlib ast. + +This module provides the CodeOutliner class for generating a hierarchical +outline of Python source code, showing classes, methods, and functions +with their line ranges and docstrings. + +Key Features: + - Uses Python's built-in ast module (no external dependencies) + - Extracts class and function definitions with line ranges + - Includes first line of docstrings for each definition + - Distinguishes between methods and top-level functions + +Usage: + outliner = CodeOutliner() + outline = outliner.outline(python_code) + +Output Format: + [Class] ClassName (Lines 10-50) + """First line of class docstring""" + [Method] __init__ (Lines 11-20) + [Method] process (Lines 22-35) + [Func] top_level_function (Lines 55-70) + +Integration: + - Used by mcp_client.py for py_get_code_outline tool + - Used by simulation tests for code structure verification + +See Also: + - src/file_cache.py for ASTParser (tree-sitter based) + - src/summarize.py for heuristic file summaries +""" import ast from pathlib import Path + class CodeOutliner: def __init__(self) -> None: pass diff --git a/src/paths.py b/src/paths.py index ba96e8f..089fa51 100644 --- a/src/paths.py +++ b/src/paths.py @@ -1,3 +1,46 @@ +""" +Paths - Centralized path resolution for configuration and environment variables. + +This module provides centralized path resolution for all configurable paths in the application. + All paths can be overridden via environment variables or config.toml. + +Environment Variables: + SLOP_CONFIG: Path to config.toml + SLOP_CONDUCTOR_DIR: Path to conductor directory + SLOP_LOGS_DIR: Path to logs directory + SLOP_SCRIPTS_DIR: Path to generated scripts directory + +Configuration (config.toml): + [paths] + conductor_dir = "conductor" + logs_dir = "logs/sessions" + scripts_dir = "scripts/generated" + +Path Functions: + get_config_path() -> Path to config.toml + get_conductor_dir() -> Path to conductor directory + get_logs_dir() -> Path to logs/sessions + get_scripts_dir() -> Path to scripts/generated + get_tracks_dir() -> Path to conductor/tracks + get_track_state_dir(track_id) -> Path to conductor/tracks/ + get_archive_dir() -> Path to conductor/archive + +Resolution Order: + 1. Check environment variable + 2. Check config.toml [paths] section + 3. Fall back to default + +Usage: + from src.paths import get_logs_dir, get_scripts_dir + + logs_dir = get_logs_dir() + scripts_dir = get_scripts_dir() + +See Also: + - docs/guide_tools.md for configuration documentation + - src/session_logger.py for logging paths + - src/project_manager.py for project paths +""" from pathlib import Path import os import tomllib diff --git a/src/performance_monitor.py b/src/performance_monitor.py index cb30726..c0e1868 100644 --- a/src/performance_monitor.py +++ b/src/performance_monitor.py @@ -1,3 +1,57 @@ +""" +Performance Monitor - Real-time FPS, frame time, and CPU usage tracking. + +This module provides the PerformanceMonitor singleton class for tracking +application performance metrics with efficient O(1) moving averages. + +Key Features: + - FPS and frame time tracking with rolling history + - CPU percentage monitoring via background thread + - Per-component timing with start_component() / end_component() + - Efficient moving average using deque + running sum + - Thread-safe metric collection + +Usage: + perf = get_monitor() + perf.enabled = True + + # In render loop: + perf.start_frame() + perf.start_component('panel_a') + # ... render panel A ... + perf.end_component('panel_a') + perf.end_frame() + + # Get metrics: + metrics = perf.get_metrics() + fps = metrics['fps'] + avg_frame_time = metrics['frame_time_ms_avg'] + +Metrics Available: + - fps: Instantaneous frames per second + - fps_avg: Rolling average FPS + - last_frame_time_ms: Last frame duration in milliseconds + - frame_time_ms_avg: Rolling average frame time + - cpu_percent: Current CPU usage + - cpu_percent_avg: Rolling average CPU usage + - input_lag_ms: Input latency estimate + - time__ms: Per-component timing + - time__ms_avg: Per-component rolling average + +Thread Safety: + - All public methods are thread-safe + - Uses threading.Lock for state mutations + - Background CPU thread polls every 1 second + +Configuration: + - history_size: Number of samples for rolling averages (default: 300) + - sample_interval: Minimum time between history samples (default: 100ms) + +Integration: + - Instantiated as singleton via get_monitor() + - Used by gui_2.py for Diagnostics Panel + - Exposed via Hook API at /api/performance +""" from __future__ import annotations import time import psutil @@ -7,6 +61,7 @@ from collections import deque _instance: Optional[PerformanceMonitor] = None + def get_monitor() -> PerformanceMonitor: global _instance if _instance is None: