feat(perf): Expand instrumentation with context manager and extended metrics

This commit is contained in:
2026-05-06 14:30:22 -04:00
parent 022c39888c
commit 23c1e21661
5 changed files with 240 additions and 136 deletions
+5
View File
@@ -21,6 +21,7 @@ from src import summarize
from src import project_manager
from src import beads_client
from src.file_cache import ASTParser
from src.performance_monitor import get_monitor
def find_next_increment(output_dir: Path, namespace: str) -> int:
pattern = re.compile(rf"^{re.escape(namespace)}_(\d+)\.md$")
@@ -132,6 +133,7 @@ def build_file_items(base_dir: Path, files: list[str | dict[str, Any]]) -> list[
auto_aggregate : bool
force_full : bool
"""
with get_monitor().scope("build_file_items"):
items: list[dict[str, Any]] = []
for entry_raw in files:
if isinstance(entry_raw, dict):
@@ -176,6 +178,7 @@ def build_summary_section(base_dir: Path, files: list[str | dict[str, Any]]) ->
Build a compact summary section using summarize.py — one short block per file.
Used as the initial <context> block instead of full file contents.
"""
with get_monitor().scope("build_summary_section"):
items = build_file_items(base_dir, files)
return summarize.build_summary_markdown(items)
@@ -297,6 +300,7 @@ def build_tier3_context(file_items: list[dict[str, Any]], screenshot_base_dir: P
Tier 3 Context: Execution/Worker.
Full content for focus_files and files with tier=3, summaries/skeletons for others.
"""
with get_monitor().scope("build_tier3_context"):
parts = []
if file_items:
sections = []
@@ -336,6 +340,7 @@ def build_tier3_context(file_items: list[dict[str, Any]], screenshot_base_dir: P
return "\n\n---\n\n".join(parts)
def build_markdown(base_dir: Path, files: list[str | dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False, execution_mode: str = "standard") -> str:
with get_monitor().scope("build_markdown"):
parts = []
# STATIC PREFIX: Files and Screenshots must go first to maximize Cache Hits
if files:
+4 -1
View File
@@ -28,6 +28,7 @@ See Also:
"""
from typing import List
from src.models import Ticket
from src.performance_monitor import get_monitor
class TrackDAG:
"""
@@ -87,6 +88,7 @@ class TrackDAG:
Returns:
True if a cycle is detected, False otherwise.
"""
with get_monitor().scope("dag_has_cycle"):
visited = set()
rec_stack = set()
@@ -119,6 +121,7 @@ class TrackDAG:
Raises:
ValueError: If a dependency cycle is detected.
"""
with get_monitor().scope("dag_topological_sort"):
if self.has_cycle():
raise ValueError("Dependency cycle detected")
visited = set()
@@ -161,6 +164,7 @@ class ExecutionEngine:
Returns:
A list of ready Ticket objects.
"""
with get_monitor().scope("dag_tick"):
self.dag.cascade_blocks()
ready = self.dag.get_ready_tasks()
return ready
@@ -185,4 +189,3 @@ class ExecutionEngine:
ticket = self.dag.ticket_map.get(task_id)
if ticket:
ticket.status = status
+13 -1
View File
@@ -2348,15 +2348,21 @@ class App:
if self.perf_profiling_enabled:
imgui.separator()
imgui.text("Detailed Component Timings (Moving Average)")
if imgui.begin_table("comp_timings", 3, imgui.TableFlags_.borders):
if imgui.begin_table("comp_timings", 6, imgui.TableFlags_.borders):
imgui.table_setup_column("Component")
imgui.table_setup_column("Avg (ms)")
imgui.table_setup_column("Count")
imgui.table_setup_column("Max (ms)")
imgui.table_setup_column("Min (ms)")
imgui.table_setup_column("Graph")
imgui.table_headers_row()
for key, val in metrics.items():
if key.startswith("time_") and key.endswith("_ms") and not key.endswith("_avg"):
comp_name = key[5:-3]
avg_val = metrics.get(f"{key}_avg", val)
count = int(metrics.get(f"count_{comp_name}", 0))
max_val = metrics.get(f"max_{comp_name}_ms", 0.0)
min_val = metrics.get(f"min_{comp_name}_ms", 0.0)
imgui.table_next_row()
imgui.table_next_column()
imgui.text(comp_name)
@@ -2366,6 +2372,12 @@ class App:
else:
imgui.text(f"{avg_val:.2f}")
imgui.table_next_column()
imgui.text(f"{count}")
imgui.table_next_column()
imgui.text(f"{max_val:.2f}")
imgui.table_next_column()
imgui.text(f"{min_val:.2f}")
imgui.table_next_column()
self.perf_show_graphs.setdefault(comp_name, False)
_, self.perf_show_graphs[comp_name] = imgui.checkbox(f"##g_{comp_name}", self.perf_show_graphs[comp_name])
imgui.end_table()
+30
View File
@@ -62,6 +62,18 @@ from collections import deque
_instance: Optional[PerformanceMonitor] = None
class PerformanceScope:
"""Helper class for PerformanceMonitor.scope() context manager."""
def __init__(self, monitor: PerformanceMonitor, name: str) -> None:
self.monitor = monitor
self.name = name
def __enter__(self) -> PerformanceScope:
self.monitor.start_component(self.name)
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.monitor.end_component(self.name)
def get_monitor() -> PerformanceMonitor:
global _instance
if _instance is None:
@@ -90,6 +102,9 @@ class PerformanceMonitor:
self._component_starts: dict[str, float] = {}
self._component_timings: dict[str, float] = {}
self._component_counts: dict[str, int] = {}
self._component_max: dict[str, float] = {}
self._component_min: dict[str, float] = {}
# Rolling history and running sums for O(1) average calculation
# deques are thread-safe for appends and pops.
@@ -192,6 +207,11 @@ class PerformanceMonitor:
elapsed = (now - start) * 1000
with self._lock:
self._component_timings[name] = elapsed
self._component_counts[name] = self._component_counts.get(name, 0) + 1
if name not in self._component_max or elapsed > self._component_max[name]:
self._component_max[name] = elapsed
if name not in self._component_min or elapsed < self._component_min[name]:
self._component_min[name] = elapsed
self._add_to_history(f'comp_{name}', elapsed)
def get_metrics(self) -> dict[str, float]:
@@ -203,6 +223,9 @@ class PerformanceMonitor:
ilag = self._input_lag_ms
last_calc_fps = self._last_calculated_fps
timings_snapshot = dict(self._component_timings)
counts_snapshot = dict(self._component_counts)
max_snapshot = dict(self._component_max)
min_snapshot = dict(self._component_min)
metrics = {
'fps': fps,
@@ -217,6 +240,9 @@ class PerformanceMonitor:
for name, elapsed in timings_snapshot.items():
metrics[f'time_{name}_ms'] = elapsed
metrics[f'time_{name}_ms_avg'] = self._get_avg(f'comp_{name}')
metrics[f'count_{name}'] = float(counts_snapshot.get(name, 0))
metrics[f'max_{name}_ms'] = max_snapshot.get(name, 0.0)
metrics[f'min_{name}_ms'] = min_snapshot.get(name, 0.0)
return metrics
def get_history(self, key: str) -> List[float]:
@@ -228,6 +254,10 @@ class PerformanceMonitor:
return list(self._history[f'comp_{key}'])
return []
def scope(self, name: str) -> PerformanceScope:
"""Returns a context manager for timing a component."""
return PerformanceScope(self, name)
def stop(self) -> None:
self._stop_event.set()
if self._cpu_thread.is_alive():
+54
View File
@@ -26,3 +26,57 @@ def test_perf_monitor_component_timing() -> None:
metrics = pm.get_metrics()
assert metrics['time_test_comp_ms'] >= 10.0
pm.stop()
def test_perf_monitor_scope_context_manager() -> None:
pm = PerformanceMonitor()
pm.enabled = True
# Test normal usage
with pm.scope("test_scope"):
time.sleep(0.01)
metrics = pm.get_metrics()
assert metrics['time_test_scope_ms'] >= 10.0
# Test exception handling
try:
with pm.scope("test_error"):
time.sleep(0.01)
raise ValueError("test error")
except ValueError:
pass
metrics = pm.get_metrics()
# Component should still be finished, so timing should be recorded
assert metrics['time_test_error_ms'] >= 10.0
pm.stop()
def test_perf_monitor_extended_metrics() -> None:
pm = PerformanceMonitor()
pm.enabled = True
# 1st call: 10ms
pm.start_component("test_comp")
time.sleep(0.01)
pm.end_component("test_comp")
# 2nd call: 30ms
pm.start_component("test_comp")
time.sleep(0.03)
pm.end_component("test_comp")
# 3rd call: 20ms
pm.start_component("test_comp")
time.sleep(0.02)
pm.end_component("test_comp")
metrics = pm.get_metrics()
assert metrics['count_test_comp'] == 3.0
assert metrics['max_test_comp_ms'] >= 30.0
assert metrics['min_test_comp_ms'] >= 10.0
assert metrics['min_test_comp_ms'] < 20.0
pm.stop()