feat(types): Complete strict static analysis and typing track

This commit is contained in:
2026-03-04 09:46:02 -05:00
parent c6c2a1b40c
commit fe2114a2e0
46 changed files with 606 additions and 795 deletions

348
gui_2.py
View File

@@ -9,7 +9,7 @@ import json
import sys
import os
import uuid
import requests
import requests # type: ignore[import-untyped]
from pathlib import Path
from tkinter import filedialog, Tk
from typing import Optional, Callable, Any
@@ -61,21 +61,21 @@ def hide_tk_root() -> Tk:
def vec4(r: float, g: float, b: float, a: float = 1.0) -> imgui.ImVec4: return imgui.ImVec4(r/255, g/255, b/255, a)
C_OUT: tuple[float, ...] = vec4(100, 200, 255)
C_IN: tuple[float, ...] = vec4(140, 255, 160)
C_REQ: tuple[float, ...] = vec4(255, 220, 100)
C_RES: tuple[float, ...] = vec4(180, 255, 180)
C_TC: tuple[float, ...] = vec4(255, 180, 80)
C_TR: tuple[float, ...] = vec4(180, 220, 255)
C_TRS: tuple[float, ...] = vec4(200, 180, 255)
C_LBL: tuple[float, ...] = vec4(180, 180, 180)
C_VAL: tuple[float, ...] = vec4(220, 220, 220)
C_KEY: tuple[float, ...] = vec4(140, 200, 255)
C_NUM: tuple[float, ...] = vec4(180, 255, 180)
C_SUB: tuple[float, ...] = vec4(220, 200, 120)
C_OUT: imgui.ImVec4 = vec4(100, 200, 255)
C_IN: imgui.ImVec4 = vec4(140, 255, 160)
C_REQ: imgui.ImVec4 = vec4(255, 220, 100)
C_RES: imgui.ImVec4 = vec4(180, 255, 180)
C_TC: imgui.ImVec4 = vec4(255, 180, 80)
C_TR: imgui.ImVec4 = vec4(180, 220, 255)
C_TRS: imgui.ImVec4 = vec4(200, 180, 255)
C_LBL: imgui.ImVec4 = vec4(180, 180, 180)
C_VAL: imgui.ImVec4 = vec4(220, 220, 220)
C_KEY: imgui.ImVec4 = vec4(140, 200, 255)
C_NUM: imgui.ImVec4 = vec4(180, 255, 180)
C_SUB: imgui.ImVec4 = vec4(220, 200, 120)
DIR_COLORS: dict[str, tuple[float, ...]] = {"OUT": C_OUT, "IN": C_IN}
KIND_COLORS: dict[str, tuple[float, ...]] = {"request": C_REQ, "response": C_RES, "tool_call": C_TC, "tool_result": C_TR, "tool_result_send": C_TRS}
DIR_COLORS: dict[str, imgui.ImVec4] = {"OUT": C_OUT, "IN": C_IN}
KIND_COLORS: dict[str, imgui.ImVec4] = {"request": C_REQ, "response": C_RES, "tool_call": C_TC, "tool_result": C_TR, "tool_result_send": C_TRS}
HEAVY_KEYS: set[str] = {"message", "text", "script", "output", "content"}
DISC_ROLES: list[str] = ["User", "AI", "Vendor API", "System"]
@@ -173,19 +173,19 @@ class App:
def __init__(self) -> None:
# Initialize locks first to avoid initialization order issues
self._send_thread_lock = threading.Lock()
self._disc_entries_lock = threading.Lock()
self._pending_comms_lock = threading.Lock()
self._pending_tool_calls_lock = threading.Lock()
self._pending_history_adds_lock = threading.Lock()
self._pending_gui_tasks_lock = threading.Lock()
self._pending_dialog_lock = threading.Lock()
self._api_event_queue_lock = threading.Lock()
self._send_thread_lock: threading.Lock = threading.Lock()
self._disc_entries_lock: threading.Lock = threading.Lock()
self._pending_comms_lock: threading.Lock = threading.Lock()
self._pending_tool_calls_lock: threading.Lock = threading.Lock()
self._pending_history_adds_lock: threading.Lock = threading.Lock()
self._pending_gui_tasks_lock: threading.Lock = threading.Lock()
self._pending_dialog_lock: threading.Lock = threading.Lock()
self._api_event_queue_lock: threading.Lock = threading.Lock()
self.config = load_config()
self.event_queue = events.AsyncEventQueue()
self._loop = asyncio.new_event_loop()
self._loop_thread = threading.Thread(target=self._run_event_loop, daemon=True)
self.config: dict[str, Any] = load_config()
self.event_queue: events.AsyncEventQueue = events.AsyncEventQueue()
self._loop: asyncio.AbstractEventLoop = asyncio.new_event_loop()
self._loop_thread: threading.Thread = threading.Thread(target=self._run_event_loop, daemon=True)
self._loop_thread.start()
ai_cfg = self.config.get("ai", {})
self._current_provider: str = ai_cfg.get("provider", "gemini")
@@ -208,33 +208,33 @@ class App:
disc_data = disc_sec.get("discussions", {}).get(self.active_discussion, {})
with self._disc_entries_lock:
self.disc_entries: list[dict[str, Any]] = _parse_history_entries(disc_data.get("history", []), self.disc_roles)
self.ui_output_dir = self.project.get("output", {}).get("output_dir", "./md_gen")
self.ui_files_base_dir = self.project.get("files", {}).get("base_dir", ".")
self.ui_shots_base_dir = self.project.get("screenshots", {}).get("base_dir", ".")
self.ui_output_dir: str = self.project.get("output", {}).get("output_dir", "./md_gen")
self.ui_files_base_dir: str = self.project.get("files", {}).get("base_dir", ".")
self.ui_shots_base_dir: str = self.project.get("screenshots", {}).get("base_dir", ".")
proj_meta = self.project.get("project", {})
self.ui_project_git_dir = proj_meta.get("git_dir", "")
self.ui_project_main_context = proj_meta.get("main_context", "")
self.ui_project_system_prompt = proj_meta.get("system_prompt", "")
self.ui_gemini_cli_path = self.project.get("gemini_cli", {}).get("binary_path", "gemini")
self.ui_word_wrap = proj_meta.get("word_wrap", True)
self.ui_summary_only = proj_meta.get("summary_only", False)
self.ui_auto_add_history = disc_sec.get("auto_add", False)
self.ui_global_system_prompt = self.config.get("ai", {}).get("system_prompt", "")
self.ui_ai_input = ""
self.ui_disc_new_name_input = ""
self.ui_disc_new_role_input = ""
self.ui_epic_input = ""
self.ui_project_git_dir: str = proj_meta.get("git_dir", "")
self.ui_project_main_context: str = proj_meta.get("main_context", "")
self.ui_project_system_prompt: str = proj_meta.get("system_prompt", "")
self.ui_gemini_cli_path: str = self.project.get("gemini_cli", {}).get("binary_path", "gemini")
self.ui_word_wrap: bool = proj_meta.get("word_wrap", True)
self.ui_summary_only: bool = proj_meta.get("summary_only", False)
self.ui_auto_add_history: bool = disc_sec.get("auto_add", False)
self.ui_global_system_prompt: str = self.config.get("ai", {}).get("system_prompt", "")
self.ui_ai_input: str = ""
self.ui_disc_new_name_input: str = ""
self.ui_disc_new_role_input: str = ""
self.ui_epic_input: str = ""
self.proposed_tracks: list[dict[str, Any]] = []
self._show_track_proposal_modal = False
self.ui_new_track_name = ""
self.ui_new_track_desc = ""
self.ui_new_track_type = "feature"
self.ui_conductor_setup_summary = ""
self.ui_last_script_text = ""
self.ui_last_script_output = ""
self.ai_status = "idle"
self.ai_response = ""
self.last_md = ""
self._show_track_proposal_modal: bool = False
self.ui_new_track_name: str = ""
self.ui_new_track_desc: str = ""
self.ui_new_track_type: str = "feature"
self.ui_conductor_setup_summary: str = ""
self.ui_last_script_text: str = ""
self.ui_last_script_output: str = ""
self.ai_status: str = "idle"
self.ai_response: str = ""
self.last_md: str = ""
self.last_md_path: Path | None = None
self.last_file_items: list[Any] = []
self.send_thread: threading.Thread | None = None
@@ -255,82 +255,82 @@ class App:
"Diagnostics": False,
}
saved = self.config.get("gui", {}).get("show_windows", {})
self.show_windows = {k: saved.get(k, v) for k, v in _default_windows.items()}
self.show_script_output = False
self.show_text_viewer = False
self.text_viewer_title = ""
self.text_viewer_content = ""
self.show_windows: dict[str, bool] = {k: saved.get(k, v) for k, v in _default_windows.items()}
self.show_script_output: bool = False
self.show_text_viewer: bool = False
self.text_viewer_title: str = ""
self.text_viewer_content: str = ""
self._pending_dialog: ConfirmDialog | None = None
self._pending_dialog_open = False
self._pending_dialog_open: bool = False
self._pending_actions: dict[str, ConfirmDialog] = {}
self._pending_ask_dialog = False
self._ask_dialog_open = False
self._ask_request_id = None
self._ask_tool_data = None
self.mma_step_mode = False
self._pending_ask_dialog: bool = False
self._ask_dialog_open: bool = False
self._ask_request_id: str | None = None
self._ask_tool_data: dict[str, Any] | None = None
self.mma_step_mode: bool = False
self.active_track: Track | None = None
self.active_tickets: list[dict[str, Any]] = []
self.active_tier: str | None = None
self.ui_focus_agent: str | None = None
self.mma_status = "idle"
self.mma_status: str = "idle"
self._pending_mma_approval: dict[str, Any] | None = None
self._mma_approval_open = False
self._mma_approval_edit_mode = False
self._mma_approval_payload = ""
self._mma_approval_open: bool = False
self._mma_approval_edit_mode: bool = False
self._mma_approval_payload: str = ""
self._pending_mma_spawn: dict[str, Any] | None = None
self._mma_spawn_open = False
self._mma_spawn_edit_mode = False
self._mma_spawn_prompt = ''
self._mma_spawn_context = ''
self.mma_tier_usage = {
self._mma_spawn_open: bool = False
self._mma_spawn_edit_mode: bool = False
self._mma_spawn_prompt: str = ''
self._mma_spawn_context: str = ''
self.mma_tier_usage: dict[str, dict[str, Any]] = {
"Tier 1": {"input": 0, "output": 0, "model": "gemini-3.1-pro-preview"},
"Tier 2": {"input": 0, "output": 0, "model": "gemini-3-flash-preview"},
"Tier 3": {"input": 0, "output": 0, "model": "gemini-2.5-flash-lite"},
"Tier 4": {"input": 0, "output": 0, "model": "gemini-2.5-flash-lite"},
}
self._tool_log: list[dict] = []
self._tool_log: list[dict[str, Any]] = []
self._comms_log: list[dict[str, Any]] = []
self._pending_comms: list[dict[str, Any]] = []
self._pending_tool_calls: list[dict] = []
self._pending_tool_calls: list[dict[str, Any]] = []
self._pending_history_adds: list[dict[str, Any]] = []
self._trigger_blink = False
self._is_blinking = False
self._blink_start_time = 0.0
self._trigger_script_blink = False
self._is_script_blinking = False
self._script_blink_start_time = 0.0
self._scroll_disc_to_bottom = False
self._scroll_comms_to_bottom = False
self._scroll_tool_calls_to_bottom = False
self._trigger_blink: bool = False
self._is_blinking: bool = False
self._blink_start_time: float = 0.0
self._trigger_script_blink: bool = False
self._is_script_blinking: bool = False
self._script_blink_start_time: float = 0.0
self._scroll_disc_to_bottom: bool = False
self._scroll_comms_to_bottom: bool = False
self._scroll_tool_calls_to_bottom: bool = False
self._pending_gui_tasks: list[dict[str, Any]] = []
self.session_usage = {"input_tokens": 0, "output_tokens": 0, "cache_read_input_tokens": 0, "cache_creation_input_tokens": 0}
self._gemini_cache_text = ""
self.session_usage: dict[str, Any] = {"input_tokens": 0, "output_tokens": 0, "cache_read_input_tokens": 0, "cache_creation_input_tokens": 0, "last_latency": 0.0}
self._gemini_cache_text: str = ""
self._last_stable_md: str = ''
self._token_stats: dict = {}
self._token_stats: dict[str, Any] = {}
self._token_stats_dirty: bool = False
self.ui_disc_truncate_pairs: int = 2
self.ui_auto_scroll_comms = True
self.ui_auto_scroll_tool_calls = True
self.ui_auto_scroll_comms: bool = True
self.ui_auto_scroll_tool_calls: bool = True
agent_tools_cfg = self.project.get("agent", {}).get("tools", {})
self.ui_agent_tools: dict[str, bool] = {t: agent_tools_cfg.get(t, True) for t in AGENT_TOOL_NAMES}
self.tracks: list[dict[str, Any]] = []
self._show_add_ticket_form = False
self.ui_new_ticket_id = ""
self.ui_new_ticket_desc = ""
self.ui_new_ticket_target = ""
self.ui_new_ticket_deps = ""
self._track_discussion_active = False
self._show_add_ticket_form: bool = False
self.ui_new_ticket_id: str = ""
self.ui_new_ticket_desc: str = ""
self.ui_new_ticket_target: str = ""
self.ui_new_ticket_deps: str = ""
self._track_discussion_active: bool = False
self.mma_streams: dict[str, str] = {}
self._tier_stream_last_len: dict[str, int] = {}
self.is_viewing_prior_session = False
self.is_viewing_prior_session: bool = False
self.prior_session_entries: list[dict[str, Any]] = []
self.test_hooks_enabled = ("--enable-test-hooks" in sys.argv) or (os.environ.get("SLOP_TEST_HOOKS") == "1")
self.ui_manual_approve = False
self.perf_monitor = PerformanceMonitor()
self.perf_history = {"frame_time": [0.0]*100, "fps": [0.0]*100, "cpu": [0.0]*100, "input_lag": [0.0]*100}
self._perf_last_update = 0.0
self._autosave_interval = 60.0
self._last_autosave = time.time()
self.test_hooks_enabled: bool = ("--enable-test-hooks" in sys.argv) or (os.environ.get("SLOP_TEST_HOOKS") == "1")
self.ui_manual_approve: bool = False
self.perf_monitor: PerformanceMonitor = PerformanceMonitor()
self.perf_history: dict[str, list[float]] = {"frame_time": [0.0]*100, "fps": [0.0]*100, "cpu": [0.0]*100, "input_lag": [0.0]*100}
self._perf_last_update: float = 0.0
self._autosave_interval: float = 60.0
self._last_autosave: float = time.time()
label = self.project.get("project", {}).get("name", "")
session_logger.open_session(label=label)
self._prune_old_logs()
@@ -856,7 +856,7 @@ class App:
self._switch_discussion(remaining[0])
# ---------------------------------------------------------------- logic
def _on_comms_entry(self, entry: dict) -> None:
def _on_comms_entry(self, entry: dict[str, Any]) -> None:
# sys.stderr.write(f"[DEBUG] _on_comms_entry: {entry.get('kind')} {entry.get('direction')}\n")
session_logger.log_comms(entry)
entry["local_ts"] = time.time()
@@ -898,7 +898,7 @@ class App:
with self._pending_tool_calls_lock:
self._pending_tool_calls.append({"script": script, "result": result, "ts": time.time(), "source_tier": source_tier})
def _on_api_event(self, *args, **kwargs) -> None:
def _on_api_event(self, *args: Any, **kwargs: Any) -> None:
payload = kwargs.get("payload", {})
with self._pending_gui_tasks_lock:
self._pending_gui_tasks.append({"action": "refresh_api_metrics", "payload": payload})
@@ -992,16 +992,16 @@ class App:
setattr(self, attr_name, value)
if item == "gcli_path":
if not ai_client._gemini_cli_adapter:
ai_client._gemini_cli_adapter = ai_client.GeminiCliAdapter(binary_path=value)
ai_client._gemini_cli_adapter = ai_client.GeminiCliAdapter(binary_path=str(value))
else:
ai_client._gemini_cli_adapter.binary_path = value
ai_client._gemini_cli_adapter.binary_path = str(value)
elif action == "click":
item = task.get("item")
user_data = task.get("user_data")
if item == "btn_project_new_automated":
self._cb_new_project_automated(user_data)
elif item == "btn_mma_load_track":
self._cb_load_track(user_data)
self._cb_load_track(str(user_data or ""))
elif item in self._clickable_actions:
# Check if it's a method that accepts user_data
import inspect
@@ -1018,7 +1018,7 @@ class App:
item = task.get("listbox", task.get("item"))
value = task.get("item_value", task.get("value"))
if item == "disc_listbox":
self._switch_discussion(value)
self._switch_discussion(str(value or ""))
elif task.get("type") == "ask":
self._pending_ask_dialog = True
self._ask_request_id = task.get("request_id")
@@ -1037,18 +1037,18 @@ class App:
elif cb in self._predefined_callbacks:
self._predefined_callbacks[cb](*args)
elif action == "mma_step_approval":
dlg = MMAApprovalDialog(task.get("ticket_id"), task.get("payload"))
dlg = MMAApprovalDialog(str(task.get("ticket_id") or ""), str(task.get("payload") or ""))
self._pending_mma_approval = task
if "dialog_container" in task:
task["dialog_container"][0] = dlg
elif action == 'refresh_from_project':
self._refresh_from_project()
elif action == "mma_spawn_approval":
dlg = MMASpawnApprovalDialog(
task.get("ticket_id"),
task.get("role"),
task.get("prompt"),
task.get("context_md")
spawn_dlg = MMASpawnApprovalDialog(
str(task.get("ticket_id") or ""),
str(task.get("role") or ""),
str(task.get("prompt") or ""),
str(task.get("context_md") or "")
)
self._pending_mma_spawn = task
self._mma_spawn_prompt = task.get("prompt", "")
@@ -1056,7 +1056,7 @@ class App:
self._mma_spawn_open = True
self._mma_spawn_edit_mode = False
if "dialog_container" in task:
task["dialog_container"][0] = dlg
task["dialog_container"][0] = spawn_dlg
except Exception as e:
print(f"Error executing GUI task: {e}")
@@ -1143,7 +1143,7 @@ class App:
else:
print("[DEBUG] No pending spawn approval found")
def _handle_mma_respond(self, approved: bool, payload: str = None, abort: bool = False, prompt: str = None, context_md: str = None) -> None:
def _handle_mma_respond(self, approved: bool, payload: str | None = None, abort: bool = False, prompt: str | None = None, context_md: str | None = None) -> None:
if self._pending_mma_approval:
dlg = self._pending_mma_approval.get("dialog_container", [None])[0]
if dlg:
@@ -1155,17 +1155,17 @@ class App:
dlg._condition.notify_all()
self._pending_mma_approval = None
if self._pending_mma_spawn:
dlg = self._pending_mma_spawn.get("dialog_container", [None])[0]
if dlg:
with dlg._condition:
dlg._approved = approved
dlg._abort = abort
spawn_dlg = self._pending_mma_spawn.get("dialog_container", [None])[0]
if spawn_dlg:
with spawn_dlg._condition:
spawn_dlg._approved = approved
spawn_dlg._abort = abort
if prompt is not None:
dlg._prompt = prompt
spawn_dlg._prompt = prompt
if context_md is not None:
dlg._context_md = context_md
dlg._done = True
dlg._condition.notify_all()
spawn_dlg._context_md = context_md
spawn_dlg._done = True
spawn_dlg._condition.notify_all()
self._pending_mma_spawn = None
def _handle_approve_ask(self) -> None:
@@ -1173,7 +1173,7 @@ class App:
if not self._ask_request_id: return
request_id = self._ask_request_id
def do_post():
def do_post() -> None:
try:
requests.post(
"http://127.0.0.1:8999/api/ask/respond",
@@ -1191,7 +1191,7 @@ class App:
if not self._ask_request_id: return
request_id = self._ask_request_id
def do_post():
def do_post() -> None:
try:
requests.post(
"http://127.0.0.1:8999/api/ask/respond",
@@ -1268,7 +1268,7 @@ class App:
self._loop.create_task(self._process_event_queue())
# Fallback: process queues even if GUI thread is idling/stuck
async def queue_fallback():
async def queue_fallback() -> None:
while True:
try:
self._process_pending_gui_tasks()
@@ -1393,7 +1393,7 @@ class App:
usage[k] += u.get(k, 0) or 0
self.session_usage = usage
def _refresh_api_metrics(self, payload: dict, md_content: str | None = None) -> None:
def _refresh_api_metrics(self, payload: dict[str, Any], md_content: str | None = None) -> None:
if "latency" in payload:
self.session_usage["last_latency"] = payload["latency"]
self._recalculate_session_usage()
@@ -1567,7 +1567,7 @@ class App:
self.config["gui"] = {"show_windows": self.show_windows}
theme.save_to_config(self.config)
def _do_generate(self) -> tuple[str, Path, list, str, str]:
def _do_generate(self) -> tuple[str, Path, list[dict[str, Any]], str, str]:
"""Returns (full_md, output_path, file_items, stable_md, discussion_text)."""
self._flush_to_project()
self._save_active_project()
@@ -1589,7 +1589,7 @@ class App:
def _fetch_models(self, provider: str) -> None:
self.ai_status = "fetching models..."
def do_fetch():
def do_fetch() -> None:
try:
models = ai_client.list_models(provider)
self.available_models = models
@@ -1698,12 +1698,14 @@ class App:
self._tool_log.append(tc)
self._pending_tool_calls.clear()
if self.show_windows.get("Context Hub", False):
exp, self.show_windows["Context Hub"] = imgui.begin("Context Hub", self.show_windows["Context Hub"])
exp, opened = imgui.begin("Context Hub", self.show_windows["Context Hub"])
self.show_windows["Context Hub"] = bool(opened)
if exp:
self._render_projects_panel()
imgui.end()
if self.show_windows.get("Files & Media", False):
exp, self.show_windows["Files & Media"] = imgui.begin("Files & Media", self.show_windows["Files & Media"])
exp, opened = imgui.begin("Files & Media", self.show_windows["Files & Media"])
self.show_windows["Files & Media"] = bool(opened)
if exp:
if imgui.collapsing_header("Files"):
self._render_files_panel()
@@ -1711,7 +1713,8 @@ class App:
self._render_screenshots_panel()
imgui.end()
if self.show_windows.get("AI Settings", False):
exp, self.show_windows["AI Settings"] = imgui.begin("AI Settings", self.show_windows["AI Settings"])
exp, opened = imgui.begin("AI Settings", self.show_windows["AI Settings"])
self.show_windows["AI Settings"] = bool(opened)
if exp:
if imgui.collapsing_header("Provider & Model"):
self._render_provider_panel()
@@ -1721,34 +1724,40 @@ class App:
self._render_token_budget_panel()
imgui.end()
if self.show_windows.get("MMA Dashboard", False):
exp, self.show_windows["MMA Dashboard"] = imgui.begin("MMA Dashboard", self.show_windows["MMA Dashboard"])
exp, opened = imgui.begin("MMA Dashboard", self.show_windows["MMA Dashboard"])
self.show_windows["MMA Dashboard"] = bool(opened)
if exp:
self._render_mma_dashboard()
imgui.end()
if self.show_windows.get("Tier 1: Strategy", False):
exp, self.show_windows["Tier 1: Strategy"] = imgui.begin("Tier 1: Strategy", self.show_windows["Tier 1: Strategy"])
exp, opened = imgui.begin("Tier 1: Strategy", self.show_windows["Tier 1: Strategy"])
self.show_windows["Tier 1: Strategy"] = bool(opened)
if exp:
self._render_tier_stream_panel("Tier 1", "Tier 1")
imgui.end()
if self.show_windows.get("Tier 2: Tech Lead", False):
exp, self.show_windows["Tier 2: Tech Lead"] = imgui.begin("Tier 2: Tech Lead", self.show_windows["Tier 2: Tech Lead"])
exp, opened = imgui.begin("Tier 2: Tech Lead", self.show_windows["Tier 2: Tech Lead"])
self.show_windows["Tier 2: Tech Lead"] = bool(opened)
if exp:
self._render_tier_stream_panel("Tier 2", "Tier 2 (Tech Lead)")
imgui.end()
if self.show_windows.get("Tier 3: Workers", False):
exp, self.show_windows["Tier 3: Workers"] = imgui.begin("Tier 3: Workers", self.show_windows["Tier 3: Workers"])
exp, opened = imgui.begin("Tier 3: Workers", self.show_windows["Tier 3: Workers"])
self.show_windows["Tier 3: Workers"] = bool(opened)
if exp:
self._render_tier_stream_panel("Tier 3", None)
imgui.end()
if self.show_windows.get("Tier 4: QA", False):
exp, self.show_windows["Tier 4: QA"] = imgui.begin("Tier 4: QA", self.show_windows["Tier 4: QA"])
exp, opened = imgui.begin("Tier 4: QA", self.show_windows["Tier 4: QA"])
self.show_windows["Tier 4: QA"] = bool(opened)
if exp:
self._render_tier_stream_panel("Tier 4", "Tier 4 (QA)")
imgui.end()
if self.show_windows.get("Theme", False):
self._render_theme_panel()
if self.show_windows.get("Discussion Hub", False):
exp, self.show_windows["Discussion Hub"] = imgui.begin("Discussion Hub", self.show_windows["Discussion Hub"])
exp, opened = imgui.begin("Discussion Hub", self.show_windows["Discussion Hub"])
self.show_windows["Discussion Hub"] = bool(opened)
if exp:
# Top part for the history
imgui.begin_child("HistoryChild", size=(0, -200))
@@ -1765,7 +1774,8 @@ class App:
imgui.end_tab_bar()
imgui.end()
if self.show_windows.get("Operations Hub", False):
exp, self.show_windows["Operations Hub"] = imgui.begin("Operations Hub", self.show_windows["Operations Hub"])
exp, opened = imgui.begin("Operations Hub", self.show_windows["Operations Hub"])
self.show_windows["Operations Hub"] = bool(opened)
if exp:
imgui.text("Focus Agent:")
imgui.same_line()
@@ -1794,7 +1804,8 @@ class App:
if self.show_windows.get("Log Management", False):
self._render_log_management()
if self.show_windows["Diagnostics"]:
exp, self.show_windows["Diagnostics"] = imgui.begin("Diagnostics", self.show_windows["Diagnostics"])
exp, opened = imgui.begin("Diagnostics", self.show_windows["Diagnostics"])
self.show_windows["Diagnostics"] = bool(opened)
if exp:
now = time.time()
if now - self._perf_last_update >= 0.5:
@@ -1893,7 +1904,7 @@ class App:
else:
self._ask_dialog_open = False
if imgui.begin_popup_modal("Approve Tool Execution", None, imgui.WindowFlags_.always_auto_resize)[0]:
if not self._pending_ask_dialog:
if not self._pending_ask_dialog or self._ask_tool_data is None:
imgui.close_current_popup()
else:
tool_name = self._ask_tool_data.get("tool", "unknown")
@@ -2000,7 +2011,7 @@ class App:
self._is_script_blinking = True
self._script_blink_start_time = time.time()
try:
imgui.set_window_focus("Last Script Output")
imgui.set_window_focus("Last Script Output") # type: ignore[call-arg]
except Exception:
pass
if self._is_script_blinking:
@@ -2013,7 +2024,8 @@ class App:
imgui.push_style_color(imgui.Col_.frame_bg, vec4(0, 100, 255, alpha))
imgui.push_style_color(imgui.Col_.child_bg, vec4(0, 100, 255, alpha))
imgui.set_next_window_size(imgui.ImVec2(800, 600), imgui.Cond_.first_use_ever)
expanded, self.show_script_output = imgui.begin("Last Script Output", self.show_script_output)
expanded, opened = imgui.begin("Last Script Output", self.show_script_output)
self.show_script_output = bool(opened)
if expanded:
imgui.text("Script:")
imgui.same_line()
@@ -2043,7 +2055,8 @@ class App:
imgui.end()
if self.show_text_viewer:
imgui.set_next_window_size(imgui.ImVec2(900, 700), imgui.Cond_.first_use_ever)
expanded, self.show_text_viewer = imgui.begin(f"Text Viewer - {self.text_viewer_title}", self.show_text_viewer)
expanded, opened = imgui.begin(f"Text Viewer - {self.text_viewer_title}", self.show_text_viewer)
self.show_text_viewer = bool(opened)
if expanded:
if self.ui_word_wrap:
imgui.begin_child("tv_wrap", imgui.ImVec2(-1, -1), False)
@@ -2153,7 +2166,7 @@ class App:
self._cb_plan_epic()
def _cb_plan_epic(self) -> None:
def _bg_task():
def _bg_task() -> None:
try:
self.ai_status = "Planning Epic (Tier 1)..."
history = orchestrator_pm.get_track_history_summary()
@@ -2166,7 +2179,7 @@ class App:
_t1_resp = [e for e in _t1_new if e.get("direction") == "IN" and e.get("kind") == "response"]
_t1_in = sum(e.get("payload", {}).get("usage", {}).get("input_tokens", 0) for e in _t1_resp)
_t1_out = sum(e.get("payload", {}).get("usage", {}).get("output_tokens", 0) for e in _t1_resp)
def _push_t1_usage(i, o):
def _push_t1_usage(i: int, o: int) -> None:
self.mma_tier_usage["Tier 1"]["input"] += i
self.mma_tier_usage["Tier 1"]["output"] += o
with self._pending_gui_tasks_lock:
@@ -2194,7 +2207,7 @@ class App:
def _cb_accept_tracks(self) -> None:
self._show_track_proposal_modal = False
def _bg_task():
def _bg_task() -> None:
# Generate skeletons once
self.ai_status = "Phase 2: Generating skeletons for all tracks..."
parser = ASTParser(language="python")
@@ -2374,7 +2387,8 @@ class App:
imgui.end_popup()
def _render_log_management(self) -> None:
exp, self.show_windows["Log Management"] = imgui.begin("Log Management", self.show_windows["Log Management"])
exp, opened = imgui.begin("Log Management", self.show_windows["Log Management"])
self.show_windows["Log Management"] = bool(opened)
if not exp:
imgui.end()
return
@@ -2413,19 +2427,19 @@ class App:
if imgui.button(f"Unstar##{session_id}"):
registry.update_session_metadata(
session_id,
message_count=metadata.get("message_count"),
errors=metadata.get("errors"),
size_kb=metadata.get("size_kb"),
message_count=int(metadata.get("message_count") or 0),
errors=int(metadata.get("errors") or 0),
size_kb=int(metadata.get("size_kb") or 0),
whitelisted=False,
reason=metadata.get("reason")
reason=str(metadata.get("reason") or "")
)
else:
if imgui.button(f"Star##{session_id}"):
registry.update_session_metadata(
session_id,
message_count=metadata.get("message_count"),
errors=metadata.get("errors"),
size_kb=metadata.get("size_kb"),
message_count=int(metadata.get("message_count") or 0),
errors=int(metadata.get("errors") or 0),
size_kb=int(metadata.get("size_kb") or 0),
whitelisted=True,
reason="Manually whitelisted"
)
@@ -2867,7 +2881,7 @@ class App:
self._is_blinking = True
self._blink_start_time = time.time()
try:
imgui.set_window_focus("Response")
imgui.set_window_focus("Response") # type: ignore[call-arg]
except:
pass
is_blinking = False
@@ -3131,7 +3145,7 @@ class App:
imgui.pop_style_color()
imgui.table_next_column()
if imgui.button(f"Load##{track.get('id')}"):
self._cb_load_track(track.get("id"))
self._cb_load_track(str(track.get("id") or ""))
imgui.end_table()
# 1b. New Track Form
@@ -3248,14 +3262,14 @@ class App:
# 4. Task DAG Visualizer
imgui.text("Task DAG")
if self.active_track:
tickets_by_id = {t.get('id'): t for t in self.active_tickets}
tickets_by_id = {str(t.get('id') or ''): t for t in self.active_tickets}
all_ids = set(tickets_by_id.keys())
# Build children map
children_map = {}
children_map: dict[str, list[str]] = {}
for t in self.active_tickets:
for dep in t.get('depends_on', []):
if dep not in children_map: children_map[dep] = []
children_map[dep].append(t.get('id'))
children_map[dep].append(str(t.get('id') or ''))
# Roots are those whose depends_on elements are NOT in all_ids
roots = []
for t in self.active_tickets:
@@ -3263,7 +3277,7 @@ class App:
has_local_dep = any(d in all_ids for d in deps)
if not has_local_dep:
roots.append(t)
rendered = set()
rendered: set[str] = set()
for root in roots:
self._render_ticket_dag_node(root, tickets_by_id, children_map, rendered)
@@ -3341,7 +3355,7 @@ class App:
pass
imgui.end_child()
def _render_ticket_dag_node(self, ticket: Ticket, tickets_by_id: dict[str, Ticket], children_map: dict[str, list[str]], rendered: set[str]) -> None:
def _render_ticket_dag_node(self, ticket: dict[str, Any], tickets_by_id: dict[str, Any], children_map: dict[str, list[str]], rendered: set[str]) -> None:
tid = ticket.get('id', '??')
is_duplicate = tid in rendered
if not is_duplicate:
@@ -3552,8 +3566,10 @@ class App:
imgui.text("Project System Prompt")
ch, self.ui_project_system_prompt = imgui.input_text_multiline("##psp", self.ui_project_system_prompt, imgui.ImVec2(-1, 100))
def _render_theme_panel(self) -> None:
exp, self.show_windows["Theme"] = imgui.begin("Theme", self.show_windows["Theme"])
def _render_theme_panel(self) -> None:
exp, opened = imgui.begin("Theme", self.show_windows["Theme"])
self.show_windows["Theme"] = bool(opened)
if exp:
imgui.text("Palette")
cp = theme.get_current_palette()