docs(conductor): Synchronize docs for track 'GUI Performance Profiling & Optimization'
This commit is contained in:
@@ -51,7 +51,7 @@ For deep implementation details when planning or implementing tracks, consult `d
|
||||
- **Session Analysis:** Ability to load and visualize historical session logs with a dedicated tinted "Prior Session" viewing mode.
|
||||
- **Structured Log Taxonomy:** Automated session-based log organization into configurable directories (defaulting to `logs/sessions/`). Includes a dedicated GUI panel for monitoring and manual whitelisting. Features an intelligent heuristic-based pruner that automatically cleans up insignificant logs older than 24 hours while preserving valuable sessions.
|
||||
- **Clean Project Root:** Enforces a "Cruft-Free Root" policy by organizing core implementation into a `src/` directory and redirecting all temporary test data, configurations, and AI-generated artifacts to `tests/artifacts/`.
|
||||
- **Performance Diagnostics:** Built-in telemetry for FPS, Frame Time, and CPU usage, with a dedicated Diagnostics Panel and AI API hooks for performance analysis.
|
||||
- **Performance Diagnostics:** Comprehensive, conditional per-component profiling across the entire application. Features a dedicated **Diagnostics Panel** providing real-time telemetry for FPS, Frame Time, CPU usage, and **Detailed Component Timings** for all GUI panels and background threads, including automated threshold-based latency alerts.
|
||||
- **Automated UX Verification:** A robust IPC mechanism via API hooks and a modular simulation suite allows for human-like simulation walkthroughs and automated regression testing of the full GUI lifecycle across multiple specialized scenarios.
|
||||
- **Headless Backend Service:** Optional headless mode allowing the core AI and tool execution logic to run as a decoupled REST API service (FastAPI), optimized for Docker and server-side environments (e.g., Unraid).
|
||||
- **Remote Confirmation Protocol:** A non-blocking, ID-based challenge/response mechanism for approving AI actions via the REST API, enabling remote "Human-in-the-Loop" safety.
|
||||
|
||||
19
scripts/tasks/instrument_controller_profiling.toml
Normal file
19
scripts/tasks/instrument_controller_profiling.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[task]
|
||||
role = "tier3-worker"
|
||||
prompt = """In src/app_controller.py, add internal conditional profiling hooks to key background thread methods: _run_event_loop and _handle_request_event.
|
||||
|
||||
PATTERN:
|
||||
At the very beginning of the method:
|
||||
if hasattr(self, 'perf_monitor') and getattr(self, 'perf_profiling_enabled', False):
|
||||
self.perf_monitor.start_component("_method_name")
|
||||
|
||||
Immediately before EVERY 'return' statement AND at the very end of the method:
|
||||
if hasattr(self, 'perf_monitor') and getattr(self, 'perf_profiling_enabled', False):
|
||||
self.perf_monitor.end_component("_method_name")
|
||||
|
||||
CRITICAL:
|
||||
1. DO NOT use try...finally.
|
||||
2. Use exactly 1-space indentation for all Python code.
|
||||
3. Replace _method_name with the actual name of the method.
|
||||
4. Note that AppController has self.perf_monitor and self.perf_profiling_enabled (ensure you check for existence if they are initialized late).
|
||||
"""
|
||||
16
scripts/tasks/instrument_gui_profiling.toml
Normal file
16
scripts/tasks/instrument_gui_profiling.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[task]
|
||||
role = "tier3-worker"
|
||||
prompt = """In src/gui_2.py, add internal conditional profiling hooks to all remaining rendering methods: _render_projects_panel, _render_files_panel, _render_screenshots_panel, _render_provider_panel, _render_token_budget_panel, _render_cache_panel, _render_tool_analytics_panel, _render_session_insights_panel, _render_message_panel, _render_response_panel, _render_comms_history_panel, _render_tool_calls_panel, _render_tier_stream_panel, and _render_theme_panel.
|
||||
|
||||
PATTERN:
|
||||
At the very beginning of the method:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_method_name")
|
||||
|
||||
Immediately before EVERY 'return' statement AND at the very end of the method:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_method_name")
|
||||
|
||||
CRITICAL:
|
||||
1. DO NOT use try...finally.
|
||||
2. Use exactly 1-space indentation for all Python code.
|
||||
3. Replace _method_name with the actual name of the method (e.g., _render_projects_panel).
|
||||
"""
|
||||
@@ -12,12 +12,14 @@ For Gemini: injects the initial context directly into system_instruction
|
||||
during chat creation to avoid massive history bloat.
|
||||
"""
|
||||
# ai_client.py
|
||||
# ai_client.py
|
||||
import tomllib
|
||||
import asyncio
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
import datetime
|
||||
from src import performance_monitor
|
||||
import hashlib
|
||||
import difflib
|
||||
import threading
|
||||
@@ -568,6 +570,8 @@ async def _execute_tool_calls_concurrently(
|
||||
Executes multiple tool calls concurrently using asyncio.gather.
|
||||
Returns a list of (tool_name, call_id, output, original_name).
|
||||
"""
|
||||
monitor = performance_monitor.get_monitor()
|
||||
if monitor.enabled: monitor.start_component("ai_client._execute_tool_calls_concurrently")
|
||||
tasks = []
|
||||
for fc in calls:
|
||||
if provider == "gemini":
|
||||
@@ -596,6 +600,7 @@ async def _execute_tool_calls_concurrently(
|
||||
tasks.append(_execute_single_tool_call_async(name, args, call_id, base_dir, pre_tool_callback, qa_callback, r_idx, patch_callback))
|
||||
|
||||
results = await asyncio.gather(*tasks)
|
||||
if monitor.enabled: monitor.end_component("ai_client._execute_tool_calls_concurrently")
|
||||
return results
|
||||
|
||||
async def _execute_single_tool_call_async(
|
||||
@@ -807,6 +812,8 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
|
||||
stream_callback: Optional[Callable[[str], None]] = None,
|
||||
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
global _gemini_chat, _gemini_cache, _gemini_cache_md_hash, _gemini_cache_created_at, _gemini_cached_file_paths
|
||||
monitor = performance_monitor.get_monitor()
|
||||
if monitor.enabled: monitor.start_component("ai_client._send_gemini")
|
||||
try:
|
||||
_ensure_gemini_client(); mcp_client.configure(file_items or [], [base_dir])
|
||||
sys_instr = f"{_get_combined_system_prompt()}\n\n<context>\n{md_content}\n</context>"
|
||||
@@ -1018,8 +1025,12 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
|
||||
_append_comms("OUT", "request", {"message": f"[TOOL OUTPUT BUDGET EXCEEDED: {_cumulative_tool_bytes} bytes]"})
|
||||
_append_comms("OUT", "tool_result_send", {"results": log})
|
||||
payload = f_resps
|
||||
return "\n\n".join(all_text) if all_text else "(No text returned)"
|
||||
except Exception as e: raise _classify_gemini_error(e) from e
|
||||
res = "\n\n".join(all_text) if all_text else "(No text returned)"
|
||||
if monitor.enabled: monitor.end_component("ai_client._send_gemini")
|
||||
return res
|
||||
except Exception as e:
|
||||
if monitor.enabled: monitor.end_component("ai_client._send_gemini")
|
||||
raise _classify_gemini_error(e) from e
|
||||
|
||||
def _send_gemini_cli(md_content: str, user_message: str, base_dir: str,
|
||||
file_items: list[dict[str, Any]] | None = None,
|
||||
@@ -1294,7 +1305,9 @@ def _repair_anthropic_history(history: list[dict[str, Any]]) -> None:
|
||||
],
|
||||
})
|
||||
|
||||
def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_items: list[dict[str, Any]] | None = None, discussion_history: str = "", pre_tool_callback: Optional[Callable[[str, str, Optional[Callable[[str], str]]], Optional[str]]] = None, qa_callback: Optional[Callable[[str], str]] = None, stream_callback: Optional[Callable[[str], None]] = None) -> str:
|
||||
def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_items: list[dict[str, Any]] | None = None, discussion_history: str = "", pre_tool_callback: Optional[Callable[[str, str, Optional[Callable[[str], str]]], Optional[str]]] = None, qa_callback: Optional[Callable[[str], str]] = None, stream_callback: Optional[Callable[[str], None]] = None, patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
monitor = performance_monitor.get_monitor()
|
||||
if monitor.enabled: monitor.start_component("ai_client._send_anthropic")
|
||||
try:
|
||||
_ensure_anthropic_client()
|
||||
mcp_client.configure(file_items or [], [base_dir])
|
||||
@@ -1411,7 +1424,7 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
results = asyncio.run_coroutine_threadsafe(
|
||||
_execute_tool_calls_concurrently(response.content, base_dir, pre_tool_callback, qa_callback, round_idx, "anthropic"),
|
||||
_execute_tool_calls_concurrently(response.content, base_dir, pre_tool_callback, qa_callback, round_idx, "anthropic", patch_callback),
|
||||
loop
|
||||
).result()
|
||||
except RuntimeError:
|
||||
@@ -1463,10 +1476,14 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
|
||||
],
|
||||
})
|
||||
final_text = "\n\n".join(all_text_parts)
|
||||
return final_text if final_text.strip() else "(No text returned by the model)"
|
||||
res = final_text if final_text.strip() else "(No text returned by the model)"
|
||||
if monitor.enabled: monitor.end_component("ai_client._send_anthropic")
|
||||
return res
|
||||
except ProviderError:
|
||||
if monitor.enabled: monitor.end_component("ai_client._send_anthropic")
|
||||
raise
|
||||
except Exception as exc:
|
||||
if monitor.enabled: monitor.end_component("ai_client._send_anthropic")
|
||||
raise _classify_anthropic_error(exc) from exc
|
||||
|
||||
def _ensure_deepseek_client() -> None:
|
||||
@@ -1491,12 +1508,16 @@ def _send_deepseek(md_content: str, user_message: str, base_dir: str,
|
||||
stream: bool = False,
|
||||
pre_tool_callback: Optional[Callable[[str, str, Optional[Callable[[str], str]]], Optional[str]]] = None,
|
||||
qa_callback: Optional[Callable[[str], str]] = None,
|
||||
stream_callback: Optional[Callable[[str], None]] = None) -> str:
|
||||
stream_callback: Optional[Callable[[str], None]] = None,
|
||||
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
monitor = performance_monitor.get_monitor()
|
||||
if monitor.enabled: monitor.start_component("ai_client._send_deepseek")
|
||||
try:
|
||||
mcp_client.configure(file_items or [], [base_dir])
|
||||
creds = _load_credentials()
|
||||
api_key = creds.get("deepseek", {}).get("api_key")
|
||||
if not api_key:
|
||||
if monitor.enabled: monitor.end_component("ai_client._send_deepseek")
|
||||
raise ValueError("DeepSeek API key not found in credentials.toml")
|
||||
api_url = "https://api.deepseek.com/chat/completions"
|
||||
headers = {
|
||||
@@ -1578,6 +1599,7 @@ def _send_deepseek(md_content: str, user_message: str, base_dir: str,
|
||||
response = requests.post(api_url, headers=headers, json=request_payload, timeout=120, stream=stream)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
if monitor.enabled: monitor.end_component("ai_client._send_deepseek")
|
||||
raise _classify_deepseek_error(e) from e
|
||||
|
||||
assistant_text = ""
|
||||
@@ -1724,8 +1746,11 @@ def _send_deepseek(md_content: str, user_message: str, base_dir: str,
|
||||
for tr in tool_results_for_history:
|
||||
_deepseek_history.append(tr)
|
||||
|
||||
return "\n\n".join(all_text_parts) if all_text_parts else "(No text returned)"
|
||||
res = "\n\n".join(all_text_parts) if all_text_parts else "(No text returned)"
|
||||
if monitor.enabled: monitor.end_component("ai_client._send_deepseek")
|
||||
return res
|
||||
except Exception as e:
|
||||
if monitor.enabled: monitor.end_component("ai_client._send_deepseek")
|
||||
raise _classify_deepseek_error(e) from e
|
||||
|
||||
def _send_minimax(md_content: str, user_message: str, base_dir: str,
|
||||
@@ -2062,34 +2087,39 @@ def send(
|
||||
stream_callback: Optional[Callable[[str], None]] = None,
|
||||
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None,
|
||||
) -> str:
|
||||
monitor = performance_monitor.get_monitor()
|
||||
if monitor.enabled: monitor.start_component("ai_client.send")
|
||||
with _send_lock:
|
||||
if _provider == "gemini":
|
||||
return _send_gemini(
|
||||
res = _send_gemini(
|
||||
md_content, user_message, base_dir, file_items, discussion_history,
|
||||
pre_tool_callback, qa_callback, enable_tools, stream_callback, patch_callback
|
||||
)
|
||||
elif _provider == "gemini_cli":
|
||||
return _send_gemini_cli(
|
||||
res = _send_gemini_cli(
|
||||
md_content, user_message, base_dir, file_items, discussion_history,
|
||||
pre_tool_callback, qa_callback, stream_callback, patch_callback
|
||||
)
|
||||
elif _provider == "anthropic":
|
||||
return _send_anthropic(
|
||||
res = _send_anthropic(
|
||||
md_content, user_message, base_dir, file_items, discussion_history,
|
||||
pre_tool_callback, qa_callback, stream_callback=stream_callback, patch_callback=patch_callback
|
||||
)
|
||||
elif _provider == "deepseek":
|
||||
return _send_deepseek(
|
||||
res = _send_deepseek(
|
||||
md_content, user_message, base_dir, file_items, discussion_history,
|
||||
stream, pre_tool_callback, qa_callback, stream_callback, patch_callback
|
||||
)
|
||||
elif _provider == "minimax":
|
||||
return _send_minimax(
|
||||
res = _send_minimax(
|
||||
md_content, user_message, base_dir, file_items, discussion_history,
|
||||
stream, pre_tool_callback, qa_callback, stream_callback, patch_callback
|
||||
)
|
||||
else:
|
||||
if monitor.enabled: monitor.end_component("ai_client.send")
|
||||
raise ValueError(f"Unknown provider: {_provider}")
|
||||
if monitor.enabled: monitor.end_component("ai_client.send")
|
||||
return res
|
||||
|
||||
def _add_bleed_derived(d: dict[str, Any], sys_tok: int = 0, tool_tok: int = 0) -> dict[str, Any]:
|
||||
cur = d.get("current", 0)
|
||||
|
||||
@@ -167,7 +167,8 @@ class AppController:
|
||||
"Tier 3": {"input": 0, "output": 0, "provider": "gemini", "model": "gemini-2.5-flash-lite"},
|
||||
"Tier 4": {"input": 0, "output": 0, "provider": "gemini", "model": "gemini-2.5-flash-lite"},
|
||||
}
|
||||
self.perf_monitor: performance_monitor.PerformanceMonitor = performance_monitor.PerformanceMonitor()
|
||||
self.perf_monitor: performance_monitor.PerformanceMonitor = performance_monitor.get_monitor()
|
||||
self._perf_profiling_enabled: bool = False
|
||||
self._pending_gui_tasks: List[Dict[str, Any]] = []
|
||||
self._api_event_queue: List[Dict[str, Any]] = []
|
||||
# Pending dialogs state moved from App
|
||||
@@ -354,6 +355,15 @@ class AppController:
|
||||
except Exception as e:
|
||||
self._inject_preview = f"Error reading file: {e}"
|
||||
|
||||
@property
|
||||
def perf_profiling_enabled(self) -> bool:
|
||||
return self._perf_profiling_enabled
|
||||
|
||||
@perf_profiling_enabled.setter
|
||||
def perf_profiling_enabled(self, value: bool) -> None:
|
||||
self._perf_profiling_enabled = value
|
||||
self.perf_monitor.enabled = value
|
||||
|
||||
@property
|
||||
def thinking_indicator(self) -> bool:
|
||||
return self.ai_status in ("sending...", "streaming...")
|
||||
@@ -911,6 +921,7 @@ class AppController:
|
||||
|
||||
def _run_event_loop(self):
|
||||
"""Internal loop runner."""
|
||||
if getattr(self, 'perf_profiling_enabled', False): self.perf_monitor.start_component("_run_event_loop")
|
||||
|
||||
def queue_fallback() -> None:
|
||||
while True:
|
||||
@@ -924,6 +935,7 @@ class AppController:
|
||||
fallback_thread = threading.Thread(target=queue_fallback, daemon=True)
|
||||
fallback_thread.start()
|
||||
self._process_event_queue()
|
||||
if getattr(self, 'perf_profiling_enabled', False): self.perf_monitor.end_component("_run_event_loop")
|
||||
|
||||
def _process_event_queue(self) -> None:
|
||||
"""Listens for and processes events from the SyncEventQueue."""
|
||||
@@ -983,6 +995,7 @@ class AppController:
|
||||
|
||||
def _handle_request_event(self, event: events.UserRequestEvent) -> None:
|
||||
"""Processes a UserRequestEvent by calling the AI client."""
|
||||
if getattr(self, 'perf_profiling_enabled', False): self.perf_monitor.start_component("_handle_request_event")
|
||||
ai_client.set_current_tier(None) # Ensure main discussion is untagged
|
||||
if self.ui_auto_add_history:
|
||||
with self._pending_history_adds_lock:
|
||||
@@ -1025,6 +1038,7 @@ class AppController:
|
||||
sys.stderr.write(f"[DEBUG] _handle_request_event ERROR: {e}\n{traceback.format_exc()}\n")
|
||||
sys.stderr.flush()
|
||||
self.event_queue.put("response", {"text": f"ERROR: {e}", "status": "error", "role": "System"})
|
||||
if getattr(self, 'perf_profiling_enabled', False): self.perf_monitor.end_component("_handle_request_event")
|
||||
|
||||
def _on_ai_stream(self, text: str) -> None:
|
||||
"""Handles streaming text from the AI."""
|
||||
|
||||
45
src/gui_2.py
45
src/gui_2.py
@@ -149,6 +149,7 @@ class App:
|
||||
def current_provider(self, value: str) -> None:
|
||||
self.controller.current_provider = value
|
||||
|
||||
@property
|
||||
@property
|
||||
def current_model(self) -> str:
|
||||
return self.controller.current_model
|
||||
@@ -156,9 +157,14 @@ class App:
|
||||
@current_model.setter
|
||||
def current_model(self, value: str) -> None:
|
||||
self.controller.current_model = value
|
||||
# ---------------------------------------------------------------- project loading
|
||||
# ---------------------------------------------------------------- logic
|
||||
|
||||
@property
|
||||
def perf_profiling_enabled(self) -> bool:
|
||||
return self.controller.perf_profiling_enabled
|
||||
|
||||
@perf_profiling_enabled.setter
|
||||
def perf_profiling_enabled(self, value: bool) -> None:
|
||||
self.controller.perf_profiling_enabled = value
|
||||
def shutdown(self) -> None:
|
||||
"""Cleanly shuts down the app's background tasks and saves state."""
|
||||
try:
|
||||
@@ -863,6 +869,7 @@ class App:
|
||||
traceback.print_exc()
|
||||
|
||||
def _render_projects_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_projects_panel")
|
||||
proj_name = self.project.get("project", {}).get("name", Path(self.active_project_path).stem)
|
||||
imgui.text_colored(C_IN, f"Active: {proj_name}")
|
||||
imgui.separator()
|
||||
@@ -949,6 +956,7 @@ class App:
|
||||
ch, val = imgui.checkbox(f"Enable {t_name}", val)
|
||||
if ch:
|
||||
self.ui_agent_tools[t_name] = val
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_projects_panel")
|
||||
|
||||
def _render_track_proposal_modal(self) -> None:
|
||||
if self._show_track_proposal_modal:
|
||||
@@ -1143,6 +1151,7 @@ class App:
|
||||
imgui.end()
|
||||
|
||||
def _render_files_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_files_panel")
|
||||
imgui.text("Paths")
|
||||
imgui.same_line()
|
||||
imgui.text("| Base Dir:")
|
||||
@@ -1203,8 +1212,10 @@ class App:
|
||||
d = filedialog.askdirectory()
|
||||
r.destroy()
|
||||
if d: self.files.append(models.FileItem(path=str(Path(d) / "**" / "*")))
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_files_panel")
|
||||
|
||||
def _render_screenshots_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_screenshots_panel")
|
||||
imgui.text("Paths")
|
||||
imgui.same_line()
|
||||
imgui.text("| Base Dir:")
|
||||
@@ -1235,6 +1246,7 @@ class App:
|
||||
r.destroy()
|
||||
for p in paths:
|
||||
if p not in self.screenshots: self.screenshots.append(p)
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_screenshots_panel")
|
||||
|
||||
def _render_discussion_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_discussion_panel")
|
||||
@@ -1447,6 +1459,7 @@ class App:
|
||||
imgui.end_child()
|
||||
|
||||
def _render_provider_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_provider_panel")
|
||||
imgui.text("Provider")
|
||||
if imgui.begin_combo("##prov", self.current_provider):
|
||||
for p in PROVIDERS:
|
||||
@@ -1489,8 +1502,10 @@ class App:
|
||||
if ch:
|
||||
if hasattr(ai_client, "_gemini_cli_adapter") and ai_client._gemini_cli_adapter:
|
||||
ai_client._gemini_cli_adapter.binary_path = self.ui_gemini_cli_path
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_provider_panel")
|
||||
|
||||
def _render_token_budget_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_token_budget_panel")
|
||||
imgui.text("Session Telemetry")
|
||||
usage = self.session_usage
|
||||
total = usage["input_tokens"] + usage["output_tokens"]
|
||||
@@ -1511,6 +1526,7 @@ class App:
|
||||
stats = self._token_stats
|
||||
if not stats:
|
||||
imgui.text_disabled("Token stats unavailable")
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_token_budget_panel")
|
||||
return
|
||||
pct = stats.get("utilization_pct", 0.0)
|
||||
current = stats.get("estimated_prompt_tokens", stats.get("total_tokens", 0))
|
||||
@@ -1590,15 +1606,20 @@ class App:
|
||||
imgui.text_colored(C_LBL, f"Gemini Cache: ACTIVE | Age: {age:.0f}s / {ttl}s | Renews at: {ttl * 0.9:.0f}s")
|
||||
else:
|
||||
imgui.text_disabled("Gemini Cache: INACTIVE")
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_token_budget_panel")
|
||||
|
||||
def _render_cache_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_cache_panel")
|
||||
if self.current_provider != "gemini":
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_cache_panel")
|
||||
return
|
||||
if not imgui.collapsing_header("Cache Analytics"):
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_cache_panel")
|
||||
return
|
||||
stats = getattr(self.controller, '_cached_cache_stats', {})
|
||||
if not stats.get("cache_exists"):
|
||||
imgui.text_disabled("No active cache")
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_cache_panel")
|
||||
return
|
||||
age_sec = stats.get("cache_age_seconds", 0)
|
||||
ttl_remaining = stats.get("ttl_remaining", 0)
|
||||
@@ -1621,9 +1642,12 @@ class App:
|
||||
self._cache_cleared_timestamp = time.time()
|
||||
if hasattr(self, '_cache_cleared_timestamp') and time.time() - self._cache_cleared_timestamp < 5:
|
||||
imgui.text_colored(imgui.ImVec4(0.2, 1.0, 0.2, 1.0), "Cache cleared - will rebuild on next request")
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_cache_panel")
|
||||
|
||||
def _render_tool_analytics_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_tool_analytics_panel")
|
||||
if not imgui.collapsing_header("Tool Usage Analytics"):
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_tool_analytics_panel")
|
||||
return
|
||||
now = time.time()
|
||||
if not hasattr(self, '_tool_stats_cache_time') or now - self._tool_stats_cache_time > 1.0:
|
||||
@@ -1631,6 +1655,7 @@ class App:
|
||||
tool_stats = getattr(self.controller, '_cached_tool_stats', {})
|
||||
if not tool_stats:
|
||||
imgui.text_disabled("No tool usage data")
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_tool_analytics_panel")
|
||||
return
|
||||
if imgui.begin_table("tool_stats", 4, imgui.TableFlags_.borders | imgui.TableFlags_.sortable):
|
||||
imgui.table_setup_column("Tool")
|
||||
@@ -1658,9 +1683,12 @@ class App:
|
||||
else:
|
||||
imgui.text("0%")
|
||||
imgui.end_table()
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_tool_analytics_panel")
|
||||
|
||||
def _render_session_insights_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_session_insights_panel")
|
||||
if not imgui.collapsing_header("Session Insights"):
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_session_insights_panel")
|
||||
return
|
||||
insights = self.controller.get_session_insights()
|
||||
imgui.text(f"Total Tokens: {insights.get('total_tokens', 0):,}")
|
||||
@@ -1671,8 +1699,10 @@ class App:
|
||||
efficiency = insights.get('efficiency', 0)
|
||||
imgui.text(f"Completed: {completed}")
|
||||
imgui.text(f"Tokens/Ticket: {efficiency:.0f}" if efficiency > 0 else "Tokens/Ticket: N/A")
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_session_insights_panel")
|
||||
|
||||
def _render_message_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_message_panel")
|
||||
# LIVE indicator
|
||||
is_live = self.ai_status in ["running powershell...", "fetching url...", "searching web...", "powershell done, awaiting AI..."]
|
||||
if is_live:
|
||||
@@ -1707,8 +1737,10 @@ class App:
|
||||
if imgui.button("-> History"):
|
||||
if self.ui_ai_input:
|
||||
self.disc_entries.append({"role": "User", "content": self.ui_ai_input, "collapsed": False, "ts": project_manager.now_ts()})
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_message_panel")
|
||||
|
||||
def _render_response_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_response_panel")
|
||||
if self._trigger_blink:
|
||||
self._trigger_blink = False
|
||||
self._is_blinking = True
|
||||
@@ -1740,8 +1772,10 @@ class App:
|
||||
self.disc_entries.append({"role": "AI", "content": self.ai_response, "collapsed": True, "ts": project_manager.now_ts()})
|
||||
if is_blinking:
|
||||
imgui.pop_style_color(2)
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_response_panel")
|
||||
|
||||
def _render_comms_history_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_comms_history_panel")
|
||||
imgui.text_colored(vec4(200, 220, 160), f"Status: {self.ai_status}")
|
||||
imgui.same_line()
|
||||
if imgui.button("Clear##comms"):
|
||||
@@ -1844,8 +1878,10 @@ class App:
|
||||
imgui.end_child()
|
||||
if self.is_viewing_prior_session:
|
||||
imgui.pop_style_color()
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_comms_history_panel")
|
||||
|
||||
def _render_tool_calls_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_tool_calls_panel")
|
||||
imgui.text("Tool call history")
|
||||
imgui.same_line()
|
||||
if imgui.button("Clear##tc"):
|
||||
@@ -1900,6 +1936,7 @@ class App:
|
||||
if self._scroll_tool_calls_to_bottom:
|
||||
imgui.set_scroll_here_y(1.0)
|
||||
self._scroll_tool_calls_to_bottom = False
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_tool_calls_panel")
|
||||
|
||||
def _render_mma_dashboard(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_mma_dashboard")
|
||||
@@ -2302,6 +2339,7 @@ class App:
|
||||
self._push_mma_state_update()
|
||||
|
||||
def _render_tier_stream_panel(self, tier_key: str, stream_key: str | None) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_tier_stream_panel")
|
||||
if stream_key is not None:
|
||||
content = self.mma_streams.get(stream_key, "")
|
||||
imgui.begin_child(f"##stream_content_{tier_key}", imgui.ImVec2(-1, -1))
|
||||
@@ -2339,6 +2377,7 @@ class App:
|
||||
except (TypeError, AttributeError):
|
||||
pass
|
||||
imgui.end_child()
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_tier_stream_panel")
|
||||
|
||||
def _render_system_prompts_panel(self) -> None:
|
||||
imgui.text("Global System Prompt (all projects)")
|
||||
@@ -2348,6 +2387,7 @@ class App:
|
||||
ch, self.ui_project_system_prompt = imgui.input_text_multiline("##psp", self.ui_project_system_prompt, imgui.ImVec2(-1, 100))
|
||||
|
||||
def _render_theme_panel(self) -> None:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_theme_panel")
|
||||
exp, opened = imgui.begin("Theme", self.show_windows["Theme"])
|
||||
self.show_windows["Theme"] = bool(opened)
|
||||
if exp:
|
||||
@@ -2394,6 +2434,7 @@ class App:
|
||||
ch, scale = imgui.slider_float("##scale", theme.get_current_scale(), 0.5, 3.0, "%.2f")
|
||||
if ch: theme.set_scale(scale)
|
||||
imgui.end()
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_theme_panel")
|
||||
|
||||
def _load_fonts(self) -> None:
|
||||
font_path, font_size = theme.get_font_loading_params()
|
||||
|
||||
@@ -4,121 +4,85 @@ import psutil
|
||||
import threading
|
||||
from typing import Any, Optional, Callable
|
||||
|
||||
_instance: Optional[PerformanceMonitor] = None
|
||||
|
||||
def get_monitor() -> PerformanceMonitor:
|
||||
global _instance
|
||||
if _instance is None:
|
||||
_instance = PerformanceMonitor()
|
||||
return _instance
|
||||
|
||||
class PerformanceMonitor:
|
||||
"""
|
||||
Tracks application performance metrics like FPS, frame time, and CPU usage.
|
||||
Also supports tracking timing for individual components.
|
||||
"""
|
||||
def __init__(self) -> None:
|
||||
self.enabled: bool = False
|
||||
self._start_time: Optional[float] = None
|
||||
self._last_frame_time: float = 0.0
|
||||
self._fps: float = 0.0
|
||||
self._last_calculated_fps: float = 0.0
|
||||
self._frame_count: int = 0
|
||||
self._total_frame_count: int = 0
|
||||
self._fps_last_time: float = time.time()
|
||||
self._process: psutil.Process = psutil.Process()
|
||||
self._cpu_usage: float = 0.0
|
||||
self._cpu_lock: threading.Lock = threading.Lock()
|
||||
# Input lag tracking
|
||||
self._last_input_time: Optional[float] = None
|
||||
self._fps_timer: float = 0.0
|
||||
self._cpu_percent: float = 0.0
|
||||
self._last_cpu_time: float = 0.0
|
||||
self._input_lag_ms: float = 0.0
|
||||
# Alerts
|
||||
self.alert_callback: Optional[Callable[[str], None]] = None
|
||||
self.thresholds: dict[str, float] = {
|
||||
'frame_time_ms': 33.3, # < 30 FPS
|
||||
'cpu_percent': 80.0,
|
||||
'input_lag_ms': 100.0
|
||||
}
|
||||
self._last_alert_time: float = 0.0
|
||||
self._alert_cooldown: int = 30 # seconds
|
||||
# Detailed profiling
|
||||
|
||||
self._component_starts: dict[str, float] = {}
|
||||
self._component_timings: dict[str, float] = {}
|
||||
self._comp_start: dict[str, float] = {}
|
||||
# Start CPU usage monitoring thread
|
||||
self._stop_event: threading.Event = threading.Event()
|
||||
self._cpu_thread: threading.Thread = threading.Thread(target=self._monitor_cpu, daemon=True)
|
||||
|
||||
# Thread for CPU monitoring to avoid blocking the main thread
|
||||
self._stop_event = threading.Event()
|
||||
self._cpu_thread = threading.Thread(target=self._monitor_cpu, daemon=True)
|
||||
self._cpu_thread.start()
|
||||
|
||||
def _monitor_cpu(self) -> None:
|
||||
while not self._stop_event.is_set():
|
||||
# psutil.cpu_percent with interval=1.0 is blocking for 1 second.
|
||||
# To be responsive to stop_event, we use a smaller interval or no interval
|
||||
# and handle the timing ourselves.
|
||||
try:
|
||||
usage = self._process.cpu_percent()
|
||||
with self._cpu_lock:
|
||||
self._cpu_usage = usage
|
||||
self._cpu_percent = psutil.cpu_percent(interval=None)
|
||||
except Exception:
|
||||
pass
|
||||
# Sleep in small increments to stay responsive to stop_event
|
||||
for _ in range(10):
|
||||
if self._stop_event.is_set():
|
||||
break
|
||||
time.sleep(0.1)
|
||||
time.sleep(1.0)
|
||||
|
||||
def start_frame(self) -> None:
|
||||
self._start_time = time.time()
|
||||
|
||||
def record_input_event(self) -> None:
|
||||
self._last_input_time = time.time()
|
||||
|
||||
def start_component(self, name: str) -> None:
|
||||
self._comp_start[name] = time.time()
|
||||
|
||||
def end_component(self, name: str) -> None:
|
||||
if name in self._comp_start:
|
||||
elapsed = (time.time() - self._comp_start[name]) * 1000.0
|
||||
self._component_timings[name] = elapsed
|
||||
self._frame_count += 1
|
||||
|
||||
def end_frame(self) -> None:
|
||||
if self._start_time is None:
|
||||
return
|
||||
end_time = time.time()
|
||||
self._last_frame_time = (end_time - self._start_time) * 1000.0
|
||||
self._frame_count += 1
|
||||
self._total_frame_count += 1
|
||||
# Calculate input lag if an input occurred during this frame
|
||||
if self._last_input_time is not None:
|
||||
self._input_lag_ms = (end_time - self._last_input_time) * 1000.0
|
||||
self._last_input_time = None
|
||||
self._check_alerts()
|
||||
elapsed_since_fps = end_time - self._fps_last_time
|
||||
if elapsed_since_fps >= 1.0:
|
||||
self._fps = self._frame_count / elapsed_since_fps
|
||||
self._last_calculated_fps = self._fps
|
||||
self._frame_count = 0
|
||||
self._fps_last_time = end_time
|
||||
|
||||
def _check_alerts(self) -> None:
|
||||
if not self.alert_callback:
|
||||
return
|
||||
now = time.time()
|
||||
if now - self._last_alert_time < self._alert_cooldown:
|
||||
return
|
||||
metrics = self.get_metrics()
|
||||
alerts = []
|
||||
if metrics['last_frame_time_ms'] > self.thresholds['frame_time_ms']:
|
||||
alerts.append(f"Frame time high: {metrics['last_frame_time_ms']:.1f}ms")
|
||||
if metrics['cpu_percent'] > self.thresholds['cpu_percent']:
|
||||
alerts.append(f"CPU usage high: {metrics['cpu_percent']:.1f}%")
|
||||
if metrics['input_lag_ms'] > self.thresholds['input_lag_ms']:
|
||||
alerts.append(f"Input lag high: {metrics['input_lag_ms']:.1f}ms")
|
||||
if alerts:
|
||||
self._last_alert_time = now
|
||||
self.alert_callback("; ".join(alerts))
|
||||
elapsed = now - self._start_time
|
||||
self._last_frame_time = elapsed * 1000 # convert to ms
|
||||
|
||||
def get_metrics(self) -> dict[str, Any]:
|
||||
with self._cpu_lock:
|
||||
cpu_usage = self._cpu_usage
|
||||
metrics: dict[str, Any] = {
|
||||
'last_frame_time_ms': self._last_frame_time,
|
||||
self._fps_timer += elapsed
|
||||
if self._fps_timer >= 1.0:
|
||||
self._last_calculated_fps = self._frame_count / self._fps_timer
|
||||
self._frame_count = 0
|
||||
self._fps_timer = 0.0
|
||||
|
||||
def start_component(self, name: str) -> None:
|
||||
self._component_starts[name] = time.time()
|
||||
|
||||
def end_component(self, name: str) -> None:
|
||||
if name in self._component_starts:
|
||||
elapsed = (time.time() - self._component_starts.pop(name)) * 1000
|
||||
self._component_timings[name] = elapsed
|
||||
|
||||
def get_metrics(self) -> dict[str, float]:
|
||||
metrics = {
|
||||
'fps': self._last_calculated_fps,
|
||||
'cpu_percent': cpu_usage,
|
||||
'total_frames': self._total_frame_count,
|
||||
'last_frame_time_ms': self._last_frame_time,
|
||||
'cpu_percent': self._cpu_percent,
|
||||
'input_lag_ms': self._input_lag_ms
|
||||
}
|
||||
# Add detailed timings
|
||||
for name, elapsed in self._component_timings.items():
|
||||
for name, elapsed in list(self._component_timings.items()):
|
||||
metrics[f'time_{name}_ms'] = elapsed
|
||||
return metrics
|
||||
|
||||
def stop(self) -> None:
|
||||
self._stop_event.set()
|
||||
self._cpu_thread.join(timeout=2.0)
|
||||
if self._cpu_thread.is_alive():
|
||||
self._cpu_thread.join(timeout=2.0)
|
||||
|
||||
Reference in New Issue
Block a user