feat(perf): Expand instrumentation with context manager and extended metrics

This commit is contained in:
2026-05-06 14:30:22 -04:00
parent 022c39888c
commit 23c1e21661
5 changed files with 240 additions and 136 deletions
+98 -93
View File
@@ -21,6 +21,7 @@ from src import summarize
from src import project_manager from src import project_manager
from src import beads_client from src import beads_client
from src.file_cache import ASTParser from src.file_cache import ASTParser
from src.performance_monitor import get_monitor
def find_next_increment(output_dir: Path, namespace: str) -> int: def find_next_increment(output_dir: Path, namespace: str) -> int:
pattern = re.compile(rf"^{re.escape(namespace)}_(\d+)\.md$") pattern = re.compile(rf"^{re.escape(namespace)}_(\d+)\.md$")
@@ -132,52 +133,54 @@ def build_file_items(base_dir: Path, files: list[str | dict[str, Any]]) -> list[
auto_aggregate : bool auto_aggregate : bool
force_full : bool force_full : bool
""" """
items: list[dict[str, Any]] = [] with get_monitor().scope("build_file_items"):
for entry_raw in files: items: list[dict[str, Any]] = []
if isinstance(entry_raw, dict): for entry_raw in files:
entry = cast(str, entry_raw.get("path", "")) if isinstance(entry_raw, dict):
tier = entry_raw.get("tier") entry = cast(str, entry_raw.get("path", ""))
auto_aggregate = entry_raw.get("auto_aggregate", True) tier = entry_raw.get("tier")
force_full = entry_raw.get("force_full", False) auto_aggregate = entry_raw.get("auto_aggregate", True)
elif hasattr(entry_raw, "path"): force_full = entry_raw.get("force_full", False)
entry = entry_raw.path elif hasattr(entry_raw, "path"):
tier = getattr(entry_raw, "tier", None) entry = entry_raw.path
auto_aggregate = getattr(entry_raw, "auto_aggregate", True) tier = getattr(entry_raw, "tier", None)
force_full = getattr(entry_raw, "force_full", False) auto_aggregate = getattr(entry_raw, "auto_aggregate", True)
else: force_full = getattr(entry_raw, "force_full", False)
entry = entry_raw else:
tier = None entry = entry_raw
auto_aggregate = True tier = None
force_full = False auto_aggregate = True
if not entry or not isinstance(entry, str): force_full = False
continue if not entry or not isinstance(entry, str):
paths = resolve_paths(base_dir, entry) continue
if not paths: paths = resolve_paths(base_dir, entry)
items.append({"path": None, "entry": entry, "content": f"ERROR: no files matched: {entry}", "error": True, "mtime": 0.0, "tier": tier, "auto_aggregate": auto_aggregate, "force_full": force_full}) if not paths:
continue items.append({"path": None, "entry": entry, "content": f"ERROR: no files matched: {entry}", "error": True, "mtime": 0.0, "tier": tier, "auto_aggregate": auto_aggregate, "force_full": force_full})
for path in paths: continue
try: for path in paths:
content = path.read_text(encoding="utf-8") try:
mtime = path.stat().st_mtime content = path.read_text(encoding="utf-8")
error = False mtime = path.stat().st_mtime
except FileNotFoundError: error = False
content = f"ERROR: file not found: {path}" except FileNotFoundError:
mtime = 0.0 content = f"ERROR: file not found: {path}"
error = True mtime = 0.0
except Exception as e: error = True
content = f"ERROR: {e}" except Exception as e:
mtime = 0.0 content = f"ERROR: {e}"
error = True mtime = 0.0
items.append({"path": path, "entry": entry, "content": content, "error": error, "mtime": mtime, "tier": tier, "auto_aggregate": auto_aggregate, "force_full": force_full}) error = True
return items items.append({"path": path, "entry": entry, "content": content, "error": error, "mtime": mtime, "tier": tier, "auto_aggregate": auto_aggregate, "force_full": force_full})
return items
def build_summary_section(base_dir: Path, files: list[str | dict[str, Any]]) -> str: def build_summary_section(base_dir: Path, files: list[str | dict[str, Any]]) -> str:
""" """
Build a compact summary section using summarize.py — one short block per file. Build a compact summary section using summarize.py — one short block per file.
Used as the initial <context> block instead of full file contents. Used as the initial <context> block instead of full file contents.
""" """
items = build_file_items(base_dir, files) with get_monitor().scope("build_summary_section"):
return summarize.build_summary_markdown(items) items = build_file_items(base_dir, files)
return summarize.build_summary_markdown(items)
def _build_files_section_from_items(file_items: list[dict[str, Any]]) -> str: def _build_files_section_from_items(file_items: list[dict[str, Any]]) -> str:
"""Build the files markdown section from pre-read file items (avoids double I/O).""" """Build the files markdown section from pre-read file items (avoids double I/O)."""
@@ -297,62 +300,64 @@ def build_tier3_context(file_items: list[dict[str, Any]], screenshot_base_dir: P
Tier 3 Context: Execution/Worker. Tier 3 Context: Execution/Worker.
Full content for focus_files and files with tier=3, summaries/skeletons for others. Full content for focus_files and files with tier=3, summaries/skeletons for others.
""" """
parts = [] with get_monitor().scope("build_tier3_context"):
if file_items: parts = []
sections = [] if file_items:
for item in file_items: sections = []
if not item.get("auto_aggregate", True): for item in file_items:
continue if not item.get("auto_aggregate", True):
path = cast(Path, item.get("path")) continue
entry = cast(str, item.get("entry", "")) path = cast(Path, item.get("path"))
path_str = str(path) if path else "" entry = cast(str, item.get("entry", ""))
# Check if this file is in focus_files (by name or path) path_str = str(path) if path else ""
is_focus = False # Check if this file is in focus_files (by name or path)
for focus in focus_files: is_focus = False
if focus == entry or (path and focus == path.name) or (path_str and focus in path_str): for focus in focus_files:
is_focus = True if focus == entry or (path and focus == path.name) or (path_str and focus in path_str):
break is_focus = True
if is_focus or item.get("tier") == 3 or item.get("force_full"): break
sections.append("### `" + (entry or path_str) + "`\n\n" + if is_focus or item.get("tier") == 3 or item.get("force_full"):
f"```{path.suffix.lstrip('.') if path and path.suffix else 'text'}\n{item.get('content', '')}\n```") sections.append("### `" + (entry or path_str) + "`\n\n" +
else: f"```{path.suffix.lstrip('.') if path and path.suffix else 'text'}\n{item.get('content', '')}\n```")
content = cast(str, item.get("content", ""))
if path and path.suffix == ".py" and not item.get("error"):
try:
parser = ASTParser("python")
skeleton = parser.get_skeleton(content)
sections.append(f"### `{entry or path_str}` (AST Skeleton)\n\n```python\n{skeleton}\n```")
except Exception:
# Fallback to summary if AST parsing fails
sections.append(f"### `{entry or path_str}`\n\n" + summarize.summarise_file(path, content))
else: else:
if path: content = cast(str, item.get("content", ""))
sections.append(f"### `{entry or path_str}`\n\n" + summarize.summarise_file(path, content)) if path and path.suffix == ".py" and not item.get("error"):
parts.append("## Files (Tier 3 - Focused)\n\n" + "\n\n---\n\n".join(sections)) try:
if screenshots: parser = ASTParser("python")
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots)) skeleton = parser.get_skeleton(content)
if history: sections.append(f"### `{entry or path_str}` (AST Skeleton)\n\n```python\n{skeleton}\n```")
parts.append("## Discussion History\n\n" + build_discussion_section(history)) except Exception:
return "\n\n---\n\n".join(parts) # Fallback to summary if AST parsing fails
sections.append(f"### `{entry or path_str}`\n\n" + summarize.summarise_file(path, content))
else:
if path:
sections.append(f"### `{entry or path_str}`\n\n" + summarize.summarise_file(path, content))
parts.append("## Files (Tier 3 - Focused)\n\n" + "\n\n---\n\n".join(sections))
if screenshots:
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots))
if history:
parts.append("## Discussion History\n\n" + build_discussion_section(history))
return "\n\n---\n\n".join(parts)
def build_markdown(base_dir: Path, files: list[str | dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False, execution_mode: str = "standard") -> str: def build_markdown(base_dir: Path, files: list[str | dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False, execution_mode: str = "standard") -> str:
parts = [] with get_monitor().scope("build_markdown"):
# STATIC PREFIX: Files and Screenshots must go first to maximize Cache Hits parts = []
if files: # STATIC PREFIX: Files and Screenshots must go first to maximize Cache Hits
if summary_only: if files:
parts.append("## Files (Summary)\n\n" + build_summary_section(base_dir, files)) if summary_only:
else: parts.append("## Files (Summary)\n\n" + build_summary_section(base_dir, files))
parts.append("## Files\n\n" + build_files_section(base_dir, files)) else:
if screenshots: parts.append("## Files\n\n" + build_files_section(base_dir, files))
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots)) if screenshots:
if execution_mode == "beads": parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots))
beads_md = build_beads_section(base_dir) if execution_mode == "beads":
if beads_md: beads_md = build_beads_section(base_dir)
parts.append(beads_md) if beads_md:
# DYNAMIC SUFFIX: History changes every turn, must go last parts.append(beads_md)
if history: # DYNAMIC SUFFIX: History changes every turn, must go last
parts.append("## Discussion History\n\n" + build_discussion_section(history)) if history:
return "\n\n---\n\n".join(parts) parts.append("## Discussion History\n\n" + build_discussion_section(history))
return "\n\n---\n\n".join(parts)
def run(config: dict[str, Any], aggregation_strategy: str = "auto") -> tuple[str, Path, list[dict[str, Any]]]: def run(config: dict[str, Any], aggregation_strategy: str = "auto") -> tuple[str, Path, list[dict[str, Any]]]:
namespace = config.get("project", {}).get("name") namespace = config.get("project", {}).get("name")
+45 -42
View File
@@ -28,6 +28,7 @@ See Also:
""" """
from typing import List from typing import List
from src.models import Ticket from src.models import Ticket
from src.performance_monitor import get_monitor
class TrackDAG: class TrackDAG:
""" """
@@ -87,29 +88,30 @@ class TrackDAG:
Returns: Returns:
True if a cycle is detected, False otherwise. True if a cycle is detected, False otherwise.
""" """
visited = set() with get_monitor().scope("dag_has_cycle"):
rec_stack = set() visited = set()
rec_stack = set()
def is_cyclic(ticket_id: str) -> bool: def is_cyclic(ticket_id: str) -> bool:
"""Internal recursive helper for cycle detection.""" """Internal recursive helper for cycle detection."""
if ticket_id in rec_stack: if ticket_id in rec_stack:
return True
if ticket_id in visited:
return False
visited.add(ticket_id)
rec_stack.add(ticket_id)
ticket = self.ticket_map.get(ticket_id)
if ticket:
for neighbor in ticket.depends_on:
if is_cyclic(neighbor):
return True
rec_stack.remove(ticket_id)
return False
for ticket in self.tickets:
if ticket.id not in visited:
if is_cyclic(ticket.id):
return True return True
return False if ticket_id in visited:
return False
visited.add(ticket_id)
rec_stack.add(ticket_id)
ticket = self.ticket_map.get(ticket_id)
if ticket:
for neighbor in ticket.depends_on:
if is_cyclic(neighbor):
return True
rec_stack.remove(ticket_id)
return False
for ticket in self.tickets:
if ticket.id not in visited:
if is_cyclic(ticket.id):
return True
return False
def topological_sort(self) -> List[str]: def topological_sort(self) -> List[str]:
""" """
@@ -119,24 +121,25 @@ class TrackDAG:
Raises: Raises:
ValueError: If a dependency cycle is detected. ValueError: If a dependency cycle is detected.
""" """
if self.has_cycle(): with get_monitor().scope("dag_topological_sort"):
raise ValueError("Dependency cycle detected") if self.has_cycle():
visited = set() raise ValueError("Dependency cycle detected")
stack = [] visited = set()
stack = []
def visit(ticket_id: str) -> None: def visit(ticket_id: str) -> None:
"""Internal recursive helper for topological sorting.""" """Internal recursive helper for topological sorting."""
if ticket_id in visited: if ticket_id in visited:
return return
visited.add(ticket_id) visited.add(ticket_id)
ticket = self.ticket_map.get(ticket_id) ticket = self.ticket_map.get(ticket_id)
if ticket: if ticket:
for dep_id in ticket.depends_on: for dep_id in ticket.depends_on:
visit(dep_id) visit(dep_id)
stack.append(ticket_id) stack.append(ticket_id)
for ticket in self.tickets: for ticket in self.tickets:
visit(ticket.id) visit(ticket.id)
return stack return stack
class ExecutionEngine: class ExecutionEngine:
""" """
@@ -161,9 +164,10 @@ class ExecutionEngine:
Returns: Returns:
A list of ready Ticket objects. A list of ready Ticket objects.
""" """
self.dag.cascade_blocks() with get_monitor().scope("dag_tick"):
ready = self.dag.get_ready_tasks() self.dag.cascade_blocks()
return ready ready = self.dag.get_ready_tasks()
return ready
def approve_task(self, task_id: str) -> None: def approve_task(self, task_id: str) -> None:
""" """
@@ -185,4 +189,3 @@ class ExecutionEngine:
ticket = self.dag.ticket_map.get(task_id) ticket = self.dag.ticket_map.get(task_id)
if ticket: if ticket:
ticket.status = status ticket.status = status
+13 -1
View File
@@ -2348,15 +2348,21 @@ class App:
if self.perf_profiling_enabled: if self.perf_profiling_enabled:
imgui.separator() imgui.separator()
imgui.text("Detailed Component Timings (Moving Average)") imgui.text("Detailed Component Timings (Moving Average)")
if imgui.begin_table("comp_timings", 3, imgui.TableFlags_.borders): if imgui.begin_table("comp_timings", 6, imgui.TableFlags_.borders):
imgui.table_setup_column("Component") imgui.table_setup_column("Component")
imgui.table_setup_column("Avg (ms)") imgui.table_setup_column("Avg (ms)")
imgui.table_setup_column("Count")
imgui.table_setup_column("Max (ms)")
imgui.table_setup_column("Min (ms)")
imgui.table_setup_column("Graph") imgui.table_setup_column("Graph")
imgui.table_headers_row() imgui.table_headers_row()
for key, val in metrics.items(): for key, val in metrics.items():
if key.startswith("time_") and key.endswith("_ms") and not key.endswith("_avg"): if key.startswith("time_") and key.endswith("_ms") and not key.endswith("_avg"):
comp_name = key[5:-3] comp_name = key[5:-3]
avg_val = metrics.get(f"{key}_avg", val) avg_val = metrics.get(f"{key}_avg", val)
count = int(metrics.get(f"count_{comp_name}", 0))
max_val = metrics.get(f"max_{comp_name}_ms", 0.0)
min_val = metrics.get(f"min_{comp_name}_ms", 0.0)
imgui.table_next_row() imgui.table_next_row()
imgui.table_next_column() imgui.table_next_column()
imgui.text(comp_name) imgui.text(comp_name)
@@ -2366,6 +2372,12 @@ class App:
else: else:
imgui.text(f"{avg_val:.2f}") imgui.text(f"{avg_val:.2f}")
imgui.table_next_column() imgui.table_next_column()
imgui.text(f"{count}")
imgui.table_next_column()
imgui.text(f"{max_val:.2f}")
imgui.table_next_column()
imgui.text(f"{min_val:.2f}")
imgui.table_next_column()
self.perf_show_graphs.setdefault(comp_name, False) self.perf_show_graphs.setdefault(comp_name, False)
_, self.perf_show_graphs[comp_name] = imgui.checkbox(f"##g_{comp_name}", self.perf_show_graphs[comp_name]) _, self.perf_show_graphs[comp_name] = imgui.checkbox(f"##g_{comp_name}", self.perf_show_graphs[comp_name])
imgui.end_table() imgui.end_table()
+30
View File
@@ -62,6 +62,18 @@ from collections import deque
_instance: Optional[PerformanceMonitor] = None _instance: Optional[PerformanceMonitor] = None
class PerformanceScope:
"""Helper class for PerformanceMonitor.scope() context manager."""
def __init__(self, monitor: PerformanceMonitor, name: str) -> None:
self.monitor = monitor
self.name = name
def __enter__(self) -> PerformanceScope:
self.monitor.start_component(self.name)
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.monitor.end_component(self.name)
def get_monitor() -> PerformanceMonitor: def get_monitor() -> PerformanceMonitor:
global _instance global _instance
if _instance is None: if _instance is None:
@@ -90,6 +102,9 @@ class PerformanceMonitor:
self._component_starts: dict[str, float] = {} self._component_starts: dict[str, float] = {}
self._component_timings: dict[str, float] = {} self._component_timings: dict[str, float] = {}
self._component_counts: dict[str, int] = {}
self._component_max: dict[str, float] = {}
self._component_min: dict[str, float] = {}
# Rolling history and running sums for O(1) average calculation # Rolling history and running sums for O(1) average calculation
# deques are thread-safe for appends and pops. # deques are thread-safe for appends and pops.
@@ -192,6 +207,11 @@ class PerformanceMonitor:
elapsed = (now - start) * 1000 elapsed = (now - start) * 1000
with self._lock: with self._lock:
self._component_timings[name] = elapsed self._component_timings[name] = elapsed
self._component_counts[name] = self._component_counts.get(name, 0) + 1
if name not in self._component_max or elapsed > self._component_max[name]:
self._component_max[name] = elapsed
if name not in self._component_min or elapsed < self._component_min[name]:
self._component_min[name] = elapsed
self._add_to_history(f'comp_{name}', elapsed) self._add_to_history(f'comp_{name}', elapsed)
def get_metrics(self) -> dict[str, float]: def get_metrics(self) -> dict[str, float]:
@@ -203,6 +223,9 @@ class PerformanceMonitor:
ilag = self._input_lag_ms ilag = self._input_lag_ms
last_calc_fps = self._last_calculated_fps last_calc_fps = self._last_calculated_fps
timings_snapshot = dict(self._component_timings) timings_snapshot = dict(self._component_timings)
counts_snapshot = dict(self._component_counts)
max_snapshot = dict(self._component_max)
min_snapshot = dict(self._component_min)
metrics = { metrics = {
'fps': fps, 'fps': fps,
@@ -217,6 +240,9 @@ class PerformanceMonitor:
for name, elapsed in timings_snapshot.items(): for name, elapsed in timings_snapshot.items():
metrics[f'time_{name}_ms'] = elapsed metrics[f'time_{name}_ms'] = elapsed
metrics[f'time_{name}_ms_avg'] = self._get_avg(f'comp_{name}') metrics[f'time_{name}_ms_avg'] = self._get_avg(f'comp_{name}')
metrics[f'count_{name}'] = float(counts_snapshot.get(name, 0))
metrics[f'max_{name}_ms'] = max_snapshot.get(name, 0.0)
metrics[f'min_{name}_ms'] = min_snapshot.get(name, 0.0)
return metrics return metrics
def get_history(self, key: str) -> List[float]: def get_history(self, key: str) -> List[float]:
@@ -228,6 +254,10 @@ class PerformanceMonitor:
return list(self._history[f'comp_{key}']) return list(self._history[f'comp_{key}'])
return [] return []
def scope(self, name: str) -> PerformanceScope:
"""Returns a context manager for timing a component."""
return PerformanceScope(self, name)
def stop(self) -> None: def stop(self) -> None:
self._stop_event.set() self._stop_event.set()
if self._cpu_thread.is_alive(): if self._cpu_thread.is_alive():
+54
View File
@@ -26,3 +26,57 @@ def test_perf_monitor_component_timing() -> None:
metrics = pm.get_metrics() metrics = pm.get_metrics()
assert metrics['time_test_comp_ms'] >= 10.0 assert metrics['time_test_comp_ms'] >= 10.0
pm.stop() pm.stop()
def test_perf_monitor_scope_context_manager() -> None:
pm = PerformanceMonitor()
pm.enabled = True
# Test normal usage
with pm.scope("test_scope"):
time.sleep(0.01)
metrics = pm.get_metrics()
assert metrics['time_test_scope_ms'] >= 10.0
# Test exception handling
try:
with pm.scope("test_error"):
time.sleep(0.01)
raise ValueError("test error")
except ValueError:
pass
metrics = pm.get_metrics()
# Component should still be finished, so timing should be recorded
assert metrics['time_test_error_ms'] >= 10.0
pm.stop()
def test_perf_monitor_extended_metrics() -> None:
pm = PerformanceMonitor()
pm.enabled = True
# 1st call: 10ms
pm.start_component("test_comp")
time.sleep(0.01)
pm.end_component("test_comp")
# 2nd call: 30ms
pm.start_component("test_comp")
time.sleep(0.03)
pm.end_component("test_comp")
# 3rd call: 20ms
pm.start_component("test_comp")
time.sleep(0.02)
pm.end_component("test_comp")
metrics = pm.get_metrics()
assert metrics['count_test_comp'] == 3.0
assert metrics['max_test_comp_ms'] >= 30.0
assert metrics['min_test_comp_ms'] >= 10.0
assert metrics['min_test_comp_ms'] < 20.0
pm.stop()