diff --git a/src/ai_client.py b/src/ai_client.py index 557d3b1..b64d0a5 100644 --- a/src/ai_client.py +++ b/src/ai_client.py @@ -2089,7 +2089,7 @@ def send( ) -> str: monitor = performance_monitor.get_monitor() if monitor.enabled: monitor.start_component("ai_client.send") - _append_comms("OUT", "request", {"prompt": user_message, "system": _get_combined_system_prompt()}) + _append_comms("OUT", "request", {"message": user_message, "system": _get_combined_system_prompt()}) with _send_lock: if _provider == "gemini": res = _send_gemini( diff --git a/src/app_controller.py b/src/app_controller.py index 74ea325..141d3cf 100644 --- a/src/app_controller.py +++ b/src/app_controller.py @@ -159,7 +159,6 @@ class AppController: "output_tokens": 0, "cache_read_input_tokens": 0, "cache_creation_input_tokens": 0, - "total_tokens": 0, "last_latency": 0.0 } self.mma_tier_usage: Dict[str, Dict[str, Any]] = { @@ -327,20 +326,8 @@ class AppController: '_inject_preview': '_inject_preview', '_show_inject_modal': '_show_inject_modal' }) - self.perf_monitor = performance_monitor.get_monitor() - self._perf_profiling_enabled = False self._init_actions() - @property - def perf_profiling_enabled(self) -> bool: - return self._perf_profiling_enabled - - @perf_profiling_enabled.setter - def perf_profiling_enabled(self, value: bool) -> None: - self._perf_profiling_enabled = value - if hasattr(self, 'perf_monitor'): - self.perf_monitor.enabled = value - def _update_inject_preview(self) -> None: """Updates the preview content based on the selected file and injection mode.""" if not self._inject_file_path: @@ -428,9 +415,7 @@ class AppController: }) def _process_pending_gui_tasks(self) -> None: - if getattr(self, 'perf_profiling_enabled', False): self.perf_monitor.start_component("controller._process_pending_gui_tasks") if not self._pending_gui_tasks: - if getattr(self, 'perf_profiling_enabled', False): self.perf_monitor.end_component("controller._process_pending_gui_tasks") return sys.stderr.write(f"[DEBUG] _process_pending_gui_tasks: processing {len(self._pending_gui_tasks)} tasks\n") sys.stderr.flush() @@ -440,6 +425,11 @@ class AppController: for task in tasks: try: action = task.get("action") + sys.stderr.write(f"[DEBUG] Processing GUI task: action={action}\n") + sys.stderr.flush() + if action: + session_logger.log_api_hook("PROCESS_TASK", action, str(task)) + # ... if action == "refresh_api_metrics": self._refresh_api_metrics(task.get("payload", {}), md_content=self.last_md or None) elif action == "set_ai_status": @@ -942,11 +932,9 @@ class AppController: while True: event_name, payload = self.event_queue.get() - if getattr(self, 'perf_profiling_enabled', False): self.perf_monitor.start_component("controller._process_event") sys.stderr.write(f"[DEBUG] _process_event_queue got event: {event_name} with payload: {str(payload)[:100]}\n") sys.stderr.flush() if event_name == "shutdown": - if getattr(self, 'perf_profiling_enabled', False): self.perf_monitor.end_component("controller._process_event") break if event_name == "user_request": threading.Thread(target=self._handle_request_event, args=(payload,), daemon=True).start() @@ -992,7 +980,6 @@ class AppController: "action": "ticket_completed", "payload": payload }) - if getattr(self, 'perf_profiling_enabled', False): self.perf_monitor.end_component("controller._process_event") def _handle_request_event(self, event: events.UserRequestEvent) -> None: """Processes a UserRequestEvent by calling the AI client.""" @@ -1101,8 +1088,8 @@ class AppController: def _on_api_event(self, event_name: str = "generic_event", **kwargs: Any) -> None: payload = kwargs.get("payload", {}) - # Push to background event queue for processing, NOT GUI queue - self.event_queue.put("refresh_api_metrics", payload) + with self._pending_gui_tasks_lock: + self._pending_gui_tasks.append({"action": "refresh_api_metrics", "payload": payload}) if self.test_hooks_enabled: with self._api_event_queue_lock: self._api_event_queue.append({"type": event_name, "payload": payload}) @@ -1849,6 +1836,7 @@ class AppController: self._update_cached_stats() def _update_cached_stats(self) -> None: + import ai_client self._cached_cache_stats = ai_client.get_gemini_cache_stats() self._cached_tool_stats = dict(self._tool_stats) diff --git a/src/gui_2.py b/src/gui_2.py index 153b0ec..8ff7b95 100644 --- a/src/gui_2.py +++ b/src/gui_2.py @@ -202,9 +202,9 @@ class App: return if len(content) > COMMS_CLAMP_CHARS: - # Use a fixed-height multi-line input box for large text to avoid expensive frame-by-frame wrapping + # Use a fixed-height child window with unformatted text for large text to avoid expensive frame-by-frame wrapping or input_text_multiline overhead imgui.begin_child(f"heavy_text_child_{label}_{hash(content)}", imgui.ImVec2(0, 80), True) - imgui.input_text_multiline(f"##{label}_input", content, imgui.ImVec2(-1, -1), imgui.InputTextFlags_.read_only) + imgui.text_unformatted(content) imgui.end_child() else: if self.ui_word_wrap: @@ -1817,6 +1817,8 @@ class App: # Optimized content rendering using _render_heavy_text logic if kind == "request": self._render_heavy_text("message", payload.get("message", "")) + if payload.get("system"): + self._render_heavy_text("system", payload.get("system", "")) elif kind == "response": r = payload.get("round", 0) sr = payload.get("stop_reason", "STOP")