fix(logging): Update GUI and controller to use correct session log paths and fix syntax errors.

This commit is contained in:
2026-03-06 14:22:41 -05:00
parent 0de50e216b
commit 2cfd0806cf
2 changed files with 86 additions and 159 deletions

View File

@@ -116,7 +116,7 @@ class AppController:
PROVIDERS: list[str] = ["gemini", "anthropic", "gemini_cli", "deepseek"]
def __init__(self):
# Initialize locks first to avoid initialization order issues
# Initialize locks first to avoid initialization order issues
self._send_thread_lock: threading.Lock = threading.Lock()
self._disc_entries_lock: threading.Lock = threading.Lock()
self._pending_comms_lock: threading.Lock = threading.Lock()
@@ -125,7 +125,6 @@ class AppController:
self._pending_gui_tasks_lock: threading.Lock = threading.Lock()
self._pending_dialog_lock: threading.Lock = threading.Lock()
self._api_event_queue_lock: threading.Lock = threading.Lock()
self.config: Dict[str, Any] = {}
self.project: Dict[str, Any] = {}
self.active_project_path: str = ""
@@ -135,19 +134,15 @@ class AppController:
self.disc_roles: List[str] = []
self.files: List[str] = []
self.screenshots: List[str] = []
self.event_queue: events.SyncEventQueue = events.SyncEventQueue()
self._loop_thread: Optional[threading.Thread] = None
self.tracks: List[Dict[str, Any]] = []
self.active_track: Optional[models.Track] = None
self.active_tickets: List[Dict[str, Any]] = []
self.mma_streams: Dict[str, str] = {}
self.mma_status: str = "idle"
self._tool_log: List[Dict[str, Any]] = []
self._comms_log: List[Dict[str, Any]] = []
self.session_usage: Dict[str, Any] = {
"input_tokens": 0,
"output_tokens": 0,
@@ -155,31 +150,26 @@ class AppController:
"cache_creation_input_tokens": 0,
"last_latency": 0.0
}
self.mma_tier_usage: Dict[str, Dict[str, Any]] = {
"Tier 1": {"input": 0, "output": 0, "model": "gemini-3.1-pro-preview"},
"Tier 2": {"input": 0, "output": 0, "model": "gemini-3-flash-preview"},
"Tier 3": {"input": 0, "output": 0, "model": "gemini-2.5-flash-lite"},
"Tier 4": {"input": 0, "output": 0, "model": "gemini-2.5-flash-lite"},
}
self.perf_monitor: performance_monitor.PerformanceMonitor = performance_monitor.PerformanceMonitor()
self._pending_gui_tasks: List[Dict[str, Any]] = []
self._api_event_queue: List[Dict[str, Any]] = []
# Pending dialogs state moved from App
self._pending_dialog: Optional[ConfirmDialog] = None
self._pending_dialog_open: bool = False
self._pending_actions: Dict[str, ConfirmDialog] = {}
self._pending_ask_dialog: bool = False
# AI settings state
self._current_provider: str = "gemini"
self._current_model: str = "gemini-2.5-flash-lite"
self.temperature: float = 0.0
self.max_tokens: int = 8192
self.history_trunc_limit: int = 8000
# UI-related state moved to controller
self.ui_ai_input: str = ""
self.ui_disc_new_name_input: str = ""
@@ -195,7 +185,6 @@ class AppController:
self.ui_new_ticket_desc: str = ""
self.ui_new_ticket_target: str = ""
self.ui_new_ticket_deps: str = ""
self.ui_output_dir: str = ""
self.ui_files_base_dir: str = ""
self.ui_shots_base_dir: str = ""
@@ -208,7 +197,6 @@ class AppController:
self.ui_auto_add_history: bool = False
self.ui_global_system_prompt: str = ""
self.ui_agent_tools: Dict[str, bool] = {}
self.available_models: List[str] = []
self.proposed_tracks: List[Dict[str, Any]] = []
self._show_track_proposal_modal: bool = False
@@ -231,7 +219,6 @@ class AppController:
self._perf_last_update: float = 0.0
self._autosave_interval: float = 60.0
self._last_autosave: float = time.time()
# More state moved from App
self._ask_dialog_open: bool = False
self._ask_request_id: Optional[str] = None
@@ -248,7 +235,6 @@ class AppController:
self._mma_spawn_edit_mode: bool = False
self._mma_spawn_prompt: str = ''
self._mma_spawn_context: str = ''
self._trigger_blink: bool = False
self._is_blinking: bool = False
self._blink_start_time: float = 0.0
@@ -272,7 +258,6 @@ class AppController:
self.prior_session_entries: List[Dict[str, Any]] = []
self.test_hooks_enabled: bool = ("--enable-test-hooks" in sys.argv) or (os.environ.get("SLOP_TEST_HOOKS") == "1")
self.ui_manual_approve: bool = False
self._settable_fields: Dict[str, str] = {
'ai_input': 'ui_ai_input',
'project_git_dir': 'ui_project_git_dir',
@@ -298,22 +283,20 @@ class AppController:
'ui_new_track_desc': 'ui_new_track_desc',
'manual_approve': 'ui_manual_approve'
}
self._gettable_fields = dict(self._settable_fields)
self._gettable_fields.update({
'ui_focus_agent': 'ui_focus_agent',
'active_discussion': 'active_discussion',
'_track_discussion_active': '_track_discussion_active',
'proposed_tracks': 'proposed_tracks',
'mma_streams': 'mma_streams',
'active_track': 'active_track',
'active_tickets': 'active_tickets',
'tracks': 'tracks',
'thinking_indicator': 'thinking_indicator',
'operations_live_indicator': 'operations_live_indicator',
'prior_session_indicator': 'prior_session_indicator'
})
'ui_focus_agent': 'ui_focus_agent',
'active_discussion': 'active_discussion',
'_track_discussion_active': '_track_discussion_active',
'proposed_tracks': 'proposed_tracks',
'mma_streams': 'mma_streams',
'active_track': 'active_track',
'active_tickets': 'active_tickets',
'tracks': 'tracks',
'thinking_indicator': 'thinking_indicator',
'operations_live_indicator': 'operations_live_indicator',
'prior_session_indicator': 'prior_session_indicator'
})
self._init_actions()
@property
@@ -329,7 +312,7 @@ class AppController:
return self.is_viewing_prior_session
def _init_actions(self) -> None:
# Set up state-related action maps
# Set up state-related action maps
self._clickable_actions: dict[str, Callable[..., Any]] = {
'btn_reset': self._handle_reset_session,
'btn_gen_send': self._handle_generate_send,
@@ -350,6 +333,7 @@ class AppController:
'_test_callback_func_write_to_file': self._test_callback_func_write_to_file,
'_set_env_var': lambda k, v: os.environ.update({k: v})
}
def _update_gcli_adapter(self, path: str) -> None:
sys.stderr.write(f"[DEBUG] _update_gcli_adapter called with: {path}\n")
sys.stderr.flush()
@@ -385,7 +369,7 @@ class AppController:
action = task.get("action")
if action:
session_logger.log_api_hook("PROCESS_TASK", action, str(task))
# ...
# ...
if action == "refresh_api_metrics":
self._refresh_api_metrics(task.get("payload", {}), md_content=self.last_md or None)
elif action == "set_ai_status":
@@ -419,7 +403,7 @@ class AppController:
self._trigger_blink = True
if not stream_id:
self._token_stats_dirty = True
# ONLY add to history when turn is complete
# ONLY add to history when turn is complete
if self.ui_auto_add_history and not stream_id and not is_streaming:
role = payload.get("role", "AI")
with self._pending_history_adds_lock:
@@ -430,7 +414,7 @@ class AppController:
"ts": project_manager.now_ts()
})
elif action in ("mma_stream", "mma_stream_append"):
# Some events might have these at top level, some in a 'payload' dict
# Some events might have these at top level, some in a 'payload' dict
stream_id = task.get("stream_id") or task.get("payload", {}).get("stream_id")
text = task.get("text") or task.get("payload", {}).get("text", "")
if stream_id:
@@ -441,11 +425,10 @@ class AppController:
self.proposed_tracks = task.get("payload", [])
self._show_track_proposal_modal = True
elif action == "mma_state_update":
# Handle both internal (nested) and hook-server (flattened) payloads
# Handle both internal (nested) and hook-server (flattened) payloads
payload = task.get("payload")
if not isinstance(payload, dict):
payload = task # Fallback to task if payload missing or wrong type
self.mma_status = payload.get("status", "idle")
self.active_tier = payload.get("active_tier")
self.mma_tier_usage = payload.get("tier_usage", self.mma_tier_usage)
@@ -604,24 +587,19 @@ class AppController:
self.temperature = ai_cfg.get("temperature", 0.0)
self.max_tokens = ai_cfg.get("max_tokens", 8192)
self.history_trunc_limit = ai_cfg.get("history_trunc_limit", 8000)
projects_cfg = self.config.get("projects", {})
self.project_paths = list(projects_cfg.get("paths", []))
self.active_project_path = projects_cfg.get("active", "")
self._load_active_project()
self.files = list(self.project.get("files", {}).get("paths", []))
self.screenshots = list(self.project.get("screenshots", {}).get("paths", []))
disc_sec = self.project.get("discussion", {})
self.disc_roles = list(disc_sec.get("roles", list(models.DISC_ROLES)))
self.active_discussion = disc_sec.get("active", "main")
disc_data = disc_sec.get("discussions", {}).get(self.active_discussion, {})
with self._disc_entries_lock:
self.disc_entries = models.parse_history_entries(disc_data.get("history", []), self.disc_roles)
# UI state
# UI state
self.ui_output_dir = self.project.get("output", {}).get("output_dir", "./md_gen")
self.ui_files_base_dir = self.project.get("files", {}).get("base_dir", ".")
self.ui_shots_base_dir = self.project.get("screenshots", {}).get("base_dir", ".")
@@ -635,7 +613,6 @@ class AppController:
self.ui_summary_only = proj_meta.get("summary_only", False)
self.ui_auto_add_history = disc_sec.get("auto_add", False)
self.ui_global_system_prompt = self.config.get("ai", {}).get("system_prompt", "")
_default_windows = {
"Context Hub": True,
"Files & Media": True,
@@ -653,10 +630,8 @@ class AppController:
}
saved = self.config.get("gui", {}).get("show_windows", {})
self.show_windows = {k: saved.get(k, v) for k, v in _default_windows.items()}
agent_tools_cfg = self.project.get("agent", {}).get("tools", {})
self.ui_agent_tools = {t: agent_tools_cfg.get(t, True) for t in models.AGENT_TOOL_NAMES}
label = self.project.get("project", {}).get("name", "")
session_logger.open_session(label=label)
@@ -664,7 +639,7 @@ class AppController:
root = hide_tk_root()
path = filedialog.askopenfilename(
title="Load Session Log",
initialdir="logs",
initialdir="logs/sessions",
filetypes=[("Log/JSONL", "*.log *.jsonl"), ("All Files", "*.*")]
)
root.destroy()
@@ -713,20 +688,21 @@ class AppController:
def _prune_old_logs(self) -> None:
"""Asynchronously prunes old insignificant logs on startup."""
def run_prune() -> None:
try:
from src import log_registry
from src import log_pruner
registry = log_registry.LogRegistry("logs/log_registry.toml")
pruner = log_pruner.LogPruner(registry, "logs")
pruner.prune()
except Exception as e:
registry = log_registry.LogRegistry("logs/sessions/log_registry.toml")
pruner = log_pruner.LogPruner(registry, "logs/sessions")
pruner.prune() except Exception as e:
print(f"Error during log pruning: {e}")
thread = threading.Thread(target=run_prune, daemon=True)
thread.start()
def _fetch_models(self, provider: str) -> None:
self._set_status("fetching models...")
def do_fetch() -> None:
try:
models_list = ai_client.list_models(provider)
@@ -777,12 +753,12 @@ class AppController:
ai_client.events.on("request_start", lambda **kw: self._on_api_event("request_start", **kw))
ai_client.events.on("response_received", lambda **kw: self._on_api_event("response_received", **kw))
ai_client.events.on("tool_execution", lambda **kw: self._on_api_event("tool_execution", **kw))
self.hook_server = api_hooks.HookServer(app if app else self)
self.hook_server.start()
def _run_event_loop(self):
"""Internal loop runner."""
def queue_fallback() -> None:
while True:
try:
@@ -792,7 +768,6 @@ class AppController:
self._process_pending_history_adds()
except: pass
time.sleep(0.1)
fallback_thread = threading.Thread(target=queue_fallback, daemon=True)
fallback_thread.start()
self._process_event_queue()
@@ -801,16 +776,14 @@ class AppController:
"""Listens for and processes events from the SyncEventQueue."""
sys.stderr.write("[DEBUG] _process_event_queue entered\n")
sys.stderr.flush()
def tick_perf():
while True:
self.perf_monitor.start_frame()
time.sleep(0.01) # Measurable frame time
self.perf_monitor.end_frame()
time.sleep(0.006) # Aim for ~60 FPS total
threading.Thread(target=tick_perf, daemon=True).start()
while True:
event_name, payload = self.event_queue.get()
sys.stderr.write(f"[DEBUG] _process_event_queue got event: {event_name} with payload: {str(payload)[:100]}\n")
@@ -821,8 +794,8 @@ class AppController:
threading.Thread(target=self._handle_request_event, args=(payload,), daemon=True).start()
elif event_name == "gui_task":
with self._pending_gui_tasks_lock:
# Directly append the task from the hook server.
# It already contains 'action' and any necessary fields.
# Directly append the task from the hook server.
# It already contains 'action' and any necessary fields.
self._pending_gui_tasks.append(payload)
elif event_name == "mma_state_update":
with self._pending_gui_tasks_lock:
@@ -838,7 +811,7 @@ class AppController:
})
elif event_name in ("mma_spawn_approval", "mma_step_approval"):
with self._pending_gui_tasks_lock:
# These payloads already contain the 'action' field
# These payloads already contain the 'action' field
self._pending_gui_tasks.append(payload)
elif event_name == "response":
with self._pending_gui_tasks_lock:
@@ -849,6 +822,7 @@ class AppController:
if self.test_hooks_enabled:
with self._api_event_queue_lock:
self._api_event_queue.append({"type": "response", "payload": payload})
def _handle_request_event(self, event: events.UserRequestEvent) -> None:
"""Processes a UserRequestEvent by calling the AI client."""
ai_client.set_current_tier(None) # Ensure main discussion is untagged
@@ -860,10 +834,8 @@ class AppController:
"collapsed": False,
"ts": project_manager.now_ts()
})
# Clear response area for new turn
# Clear response area for new turn
self.ai_response = ""
csp = filter(bool, [self.ui_global_system_prompt.strip(), self.ui_project_system_prompt.strip()])
ai_client.set_custom_system_prompt("\n\n".join(csp))
ai_client.set_model_params(self.temperature, self.max_tokens, self.history_trunc_limit)
@@ -946,6 +918,7 @@ class AppController:
if self.test_hooks_enabled:
with self._api_event_queue_lock:
self._api_event_queue.append({"type": event_name, "payload": payload})
def _on_performance_alert(self, message: str) -> None:
alert_text = f"[PERFORMANCE ALERT] {message}. Please consider optimizing recent changes or reducing load."
with self._pending_history_adds_lock:
@@ -966,7 +939,6 @@ class AppController:
self._append_tool_log(script, output)
self._set_status("powershell done, awaiting AI...")
return output
sys.stderr.write("[DEBUG] Creating ConfirmDialog.\n")
sys.stderr.flush()
dialog = ConfirmDialog(script, base_dir)
@@ -977,7 +949,6 @@ class AppController:
else:
with self._pending_dialog_lock:
self._pending_dialog = dialog
if self.test_hooks_enabled and hasattr(self, '_api_event_queue'):
with self._api_event_queue_lock:
self._api_event_queue.append({
@@ -989,7 +960,6 @@ class AppController:
})
sys.stderr.write(f"[DEBUG] Appended script_confirmation_required to _api_event_queue. ID={dialog._uid}\n")
sys.stderr.flush()
sys.stderr.write(f"[DEBUG] Waiting for dialog ID={dialog._uid}...\n")
sys.stderr.flush()
approved, final_script = dialog.wait()
@@ -1034,6 +1004,7 @@ class AppController:
dialog._condition.notify_all()
return True
return False
@property
def current_provider(self) -> str:
return self._current_provider
@@ -1063,7 +1034,6 @@ class AppController:
def create_api(self) -> FastAPI:
"""Creates and configures the FastAPI application for headless mode."""
api = FastAPI(title="Manual Slop Headless API")
API_KEY_NAME = "X-API-KEY"
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
@@ -1244,27 +1214,28 @@ class AppController:
@api.get("/api/v1/sessions", dependencies=[Depends(get_api_key)])
def list_sessions() -> list[str]:
"""Lists all session log files."""
log_dir = Path("logs")
"""Lists all session IDs."""
log_dir = Path("logs/sessions")
if not log_dir.exists():
return []
return [f.name for f in log_dir.glob("*.log")]
return [d.name for d in log_dir.iterdir() if d.is_dir()]
@api.get("/api/v1/sessions/{session_id}", dependencies=[Depends(get_api_key)])
def get_session(session_id: str) -> dict[str, Any]:
"""Returns the content of a specific session log."""
log_path = Path("logs") / session_id
"""Returns the content of the comms.log for a specific session."""
log_path = Path("logs/sessions") / session_id / "comms.log"
if not log_path.exists():
raise HTTPException(status_code=404, detail="Session log not found")
return {"id": session_id, "content": log_path.read_text(encoding="utf-8", errors="replace")}
@api.delete("/api/v1/sessions/{session_id}", dependencies=[Depends(get_api_key)])
def delete_session(session_id: str) -> dict[str, str]:
"""Deletes a specific session log."""
log_path = Path("logs") / session_id
if not log_path.exists():
raise HTTPException(status_code=404, detail="Session log not found")
log_path.unlink()
"""Deletes a specific session directory."""
log_path = Path("logs/sessions") / session_id
if not log_path.exists() or not log_path.is_dir():
raise HTTPException(status_code=404, detail="Session directory not found")
import shutil
shutil.rmtree(log_path)
return {"status": "deleted"}
@api.get("/api/v1/context", dependencies=[Depends(get_api_key)])
@@ -1288,7 +1259,6 @@ class AppController:
def token_stats() -> dict[str, Any]:
"""Returns current token usage and budget statistics."""
return self._token_stats
return api
def _cb_new_project_automated(self, user_data: Any) -> None:
@@ -1327,6 +1297,7 @@ class AppController:
return
self._refresh_from_project()
self._set_status(f"switched to: {Path(path).stem}")
def _refresh_from_project(self) -> None:
self.files = list(self.project.get("files", {}).get("paths", []))
self.screenshots = list(self.project.get("screenshots", {}).get("paths", []))
@@ -1559,7 +1530,6 @@ class AppController:
discussions = disc_sec.get("discussions", {})
for d_name in discussions:
discussions[d_name]["history"] = []
self._set_status("session reset")
self.ai_response = ""
self.ui_ai_input = ""
@@ -1568,7 +1538,6 @@ class AppController:
self._current_provider = "gemini"
self._current_model = "gemini-2.5-flash-lite"
ai_client.set_provider(self._current_provider, self._current_model)
with self._pending_history_adds_lock:
self._pending_history_adds.clear()
with self._api_event_queue_lock:
@@ -1578,6 +1547,7 @@ class AppController:
def _handle_md_only(self) -> None:
"""Logic for the 'MD Only' action."""
def worker():
try:
md, path, *_ = self._do_generate()
@@ -1592,6 +1562,7 @@ class AppController:
def _handle_generate_send(self) -> None:
"""Logic for the 'Gen + Send' action."""
def worker():
sys.stderr.write("[DEBUG] _handle_generate_send worker started\n")
sys.stderr.flush()
@@ -1601,7 +1572,6 @@ class AppController:
self.last_md = md
self.last_md_path = path
self.last_file_items = file_items
self._set_status("sending...")
user_msg = self.ui_ai_input
base_dir = self.ui_files_base_dir
@@ -1640,14 +1610,12 @@ class AppController:
if "latency" in payload:
self.session_usage["last_latency"] = payload["latency"]
self._recalculate_session_usage()
if md_content is not None:
stats = ai_client.get_token_stats(md_content)
# Ensure compatibility if keys are named differently
if "total_tokens" in stats and "estimated_prompt_tokens" not in stats:
stats["estimated_prompt_tokens"] = stats["total_tokens"]
self._token_stats = stats
cache_stats = payload.get("cache_stats")
if cache_stats:
count = cache_stats.get("cache_count", 0)
@@ -1736,6 +1704,7 @@ class AppController:
_t1_resp = [e for e in _t1_new if e.get("direction") == "IN" and e.get("kind") == "response"]
_t1_in = sum(e.get("payload", {}).get("usage", {}).get("input_tokens", 0) for e in _t1_resp)
_t1_out = sum(e.get("payload", {}).get("usage", {}).get("output_tokens", 0) for e in _t1_resp)
def _push_t1_usage(i: int, o: int) -> None:
self.mma_tier_usage["Tier 1"]["input"] += i
self.mma_tier_usage["Tier 1"]["output"] += o
@@ -1764,16 +1733,16 @@ class AppController:
def _cb_accept_tracks(self) -> None:
self._show_track_proposal_modal = False
def _bg_task() -> None:
sys.stderr.write("[DEBUG] _cb_accept_tracks _bg_task started\n")
# Generate skeletons once
self._set_status("Phase 2: Generating skeletons for all tracks...")
sys.stderr.write("[DEBUG] Creating ASTParser...\n")
parser = ASTParser(language="python")
generated_skeletons = ""
try:
# Use a local copy of files to avoid concurrent modification issues
# Use a local copy of files to avoid concurrent modification issues
files_to_scan = list(self.files)
sys.stderr.write(f"[DEBUG] Scanning {len(files_to_scan)} files for skeletons...\n")
for i, file_path in enumerate(files_to_scan):
@@ -1790,7 +1759,6 @@ class AppController:
sys.stderr.write(f"[DEBUG] Error in scan loop: {e}\n")
self._set_status(f"Error generating skeletons: {e}")
return # Exit if skeleton generation fails
sys.stderr.write("[DEBUG] Skeleton generation complete. Starting tracks...\n")
# Now loop through tracks and call _start_track_logic with generated skeletons
total_tracks = len(self.proposed_tracks)
@@ -1798,7 +1766,6 @@ class AppController:
title = track_data.get("title") or track_data.get("goal", "Untitled Track")
self._set_status(f"Processing track {i+1} of {total_tracks}: '{title}'...")
self._start_track_logic(track_data, skeletons_str=generated_skeletons) # Pass skeletons
sys.stderr.write("[DEBUG] All tracks started. Refreshing...\n")
with self._pending_gui_tasks_lock:
self._pending_gui_tasks.append({'action': 'refresh_from_project'}) # Ensure UI refresh after tracks are started
@@ -1807,14 +1774,13 @@ class AppController:
def _cb_start_track(self, user_data: Any = None) -> None:
if isinstance(user_data, str):
# If track_id is provided directly
# If track_id is provided directly
track_id = user_data
# Ensure it's loaded as active
if not self.active_track or self.active_track.id != track_id:
self._cb_load_track(track_id)
if self.active_track:
# Use the active track object directly to start execution
# Use the active track object directly to start execution
self._set_mma_status("running")
engine = multi_agent_conductor.ConductorEngine(self.active_track, self.event_queue, auto_queue=not self.mma_step_mode)
flat = project_manager.flat_config(self.project, self.active_discussion, track_id=self.active_track.id)
@@ -1822,7 +1788,6 @@ class AppController:
threading.Thread(target=engine.run, kwargs={"md_content": full_md}, daemon=True).start()
self._set_status(f"Track '{self.active_track.description}' started.")
return
idx = 0
if isinstance(user_data, int):
idx = user_data
@@ -1839,9 +1804,7 @@ class AppController:
goal = track_data.get("goal", "")
title = track_data.get("title") or track_data.get("goal", "Untitled Track")
self._set_status(f"Phase 2: Generating tickets for {title}...")
skeletons = skeletons_str or "" # Use provided skeletons or empty
self._set_status("Phase 2: Calling Tech Lead...")
_t2_baseline = len(ai_client.get_comms_log())
raw_tickets = conductor_tech_lead.generate_tickets(goal, skeletons)
@@ -1849,18 +1812,16 @@ class AppController:
_t2_resp = [e for e in _t2_new if e.get("direction") == "IN" and e.get("kind") == "response"]
_t2_in = sum(e.get("payload", {}).get("usage", {}).get("input_tokens", 0) for e in _t2_resp)
_t2_out = sum(e.get("payload", {}).get("usage", {}).get("output_tokens", 0) for e in _t2_resp)
def _push_t2_usage(i: int, o: int) -> None:
self.mma_tier_usage["Tier 2"]["input"] += i
self.mma_tier_usage["Tier 2"]["output"] += o
with self._pending_gui_tasks_lock:
self._pending_gui_tasks.append({
"action": "custom_callback",
"callback": _push_t2_usage,
"args": [_t2_in, _t2_out]
})
if not raw_tickets:
self._set_status(f"Error: No tickets generated for track: {title}")
print(f"Warning: No tickets generated for track: {title}")
@@ -1889,13 +1850,11 @@ class AppController:
meta = models.Metadata(id=track_id, name=title, status="todo", created_at=datetime.now(), updated_at=datetime.now())
state = models.TrackState(metadata=meta, discussion=[], tasks=tickets)
project_manager.save_track_state(track_id, state, self.ui_files_base_dir)
# Add to memory and notify UI
self.tracks.append({"id": track_id, "title": title, "status": "todo"})
with self._pending_gui_tasks_lock:
self._pending_gui_tasks.append({'action': 'refresh_from_project'})
# 4. Initialize ConductorEngine and run loop
# 4. Initialize ConductorEngine and run loop
engine = multi_agent_conductor.ConductorEngine(track, self.event_queue, auto_queue=not self.mma_step_mode)
# Use current full markdown context for the track execution
track_id_param = track.id
@@ -1963,23 +1922,22 @@ class AppController:
meta_file = track_dir / "metadata.json"
with open(meta_file, "w", encoding="utf-8") as f:
json.dump({
"id": track_id,
"title": name,
"description": desc,
"type": track_type,
"status": "new",
"progress": 0.0
}, f, indent=1)
# Refresh tracks from disk
"id": track_id,
"title": name,
"description": desc,
"type": track_type,
"status": "new",
"progress": 0.0
}, f, indent=1)
# Refresh tracks from disk
self.tracks = project_manager.get_all_tracks(self.ui_files_base_dir)
def _push_mma_state_update(self) -> None:
if not self.active_track:
return
# Sync active_tickets (list of dicts) back to active_track.tickets (list of models.Ticket objects)
# Sync active_tickets (list of dicts) back to active_track.tickets (list of models.Ticket objects)
self.active_track.tickets = [models.Ticket.from_dict(t) for t in self.active_tickets]
# Save the state to disk
existing = project_manager.load_track_state(self.active_track.id, self.ui_files_base_dir)
meta = models.Metadata(
id=self.active_track.id,