feat(ui): Implement Track Browser and progress visualization in MMA Dashboard

This commit is contained in:
2026-02-27 22:49:03 -05:00
parent a97eb2a222
commit 2b1cfbb34d
6 changed files with 315 additions and 3 deletions

View File

@@ -26,7 +26,7 @@ This file tracks all major tracks for the project. Each track has its own detail
---
- [ ] **Track: MMA Dashboard Visualization Overhaul**
- [~] **Track: MMA Dashboard Visualization Overhaul**
*Link: [./tracks/mma_dashboard_visualization_overhaul/](./tracks/mma_dashboard_visualization_overhaul/)*
---

View File

@@ -1,7 +1,7 @@
# Implementation Plan: MMA Dashboard Visualization Overhaul
## Phase 1: Track Browser Panel
- [ ] Task: Implement a list view in the MMA Dashboard that reads from the `tracks` directory.
- [~] Task: Implement a list view in the MMA Dashboard that reads from the `tracks` directory.
- [ ] Task: Add functionality to select an active track and load its state into the UI.
- [ ] Task: Display progress bars based on task completion within the active track.

View File

@@ -336,6 +336,9 @@ class App:
agent_tools_cfg = self.project.get("agent", {}).get("tools", {})
self.ui_agent_tools: dict[str, bool] = {t: agent_tools_cfg.get(t, True) for t in AGENT_TOOL_NAMES}
# MMA Tracks
self.tracks: list[dict] = []
# Prior session log viewing
self.is_viewing_prior_session = False
self.prior_session_entries: list[dict] = []
@@ -762,6 +765,9 @@ class App:
agent_tools_cfg = proj.get("agent", {}).get("tools", {})
self.ui_agent_tools = {t: agent_tools_cfg.get(t, True) for t in AGENT_TOOL_NAMES}
# MMA Tracks
self.tracks = project_manager.get_all_tracks(self.ui_files_base_dir)
# Restore MMA state
mma_sec = proj.get("mma", {})
self.ui_epic_input = mma_sec.get("epic", "")
@@ -790,6 +796,40 @@ class App:
if track_history:
self.disc_entries = _parse_history_entries(track_history, self.disc_roles)
def _cb_load_track(self, track_id: str):
state = project_manager.load_track_state(track_id, self.ui_files_base_dir)
if state:
try:
# Convert list[Ticket] or list[dict] to list[Ticket] for Track object
tickets = []
for t in state.tasks:
if isinstance(t, dict):
tickets.append(Ticket(**t))
else:
tickets.append(t)
self.active_track = Track(
id=state.metadata.id,
description=state.metadata.name,
tickets=tickets
)
# Keep dicts for UI table (or convert Ticket objects back to dicts if needed)
from dataclasses import asdict
self.active_tickets = [asdict(t) if not isinstance(t, dict) else t for t in tickets]
# Load track-scoped history
history = project_manager.load_track_history(track_id, self.ui_files_base_dir)
if history:
self.disc_entries = _parse_history_entries(history, self.disc_roles)
else:
self.disc_entries = []
self._recalculate_session_usage()
self.ai_status = f"Loaded track: {state.metadata.name}"
except Exception as e:
self.ai_status = f"Load track error: {e}"
print(f"Error loading track {track_id}: {e}")
def _save_active_project(self):
if self.active_project_path:
try:
@@ -2752,7 +2792,36 @@ class App:
)
def _render_mma_dashboard(self):
# 1. Global Controls
# 1. Track Browser
imgui.text("Track Browser")
if imgui.begin_table("mma_tracks_table", 4, imgui.TableFlags_.borders | imgui.TableFlags_.row_bg | imgui.TableFlags_.resizable):
imgui.table_setup_column("Title")
imgui.table_setup_column("Status")
imgui.table_setup_column("Progress")
imgui.table_setup_column("Actions")
imgui.table_headers_row()
for track in self.tracks:
imgui.table_next_row()
imgui.table_next_column()
imgui.text(track.get("title", "Untitled"))
imgui.table_next_column()
imgui.text(track.get("status", "unknown"))
imgui.table_next_column()
progress = track.get("progress", 0.0)
imgui.progress_bar(progress, imgui.ImVec2(-1, 0), f"{int(progress*100)}%")
imgui.table_next_column()
if imgui.button(f"Load##{track.get('id')}"):
self._cb_load_track(track.get("id"))
imgui.end_table()
imgui.separator()
# 2. Global Controls
changed, self.mma_step_mode = imgui.checkbox("Step Mode (HITL)", self.mma_step_mode)
if changed:
# We could push an event here if the engine needs to know immediately

View File

@@ -10,6 +10,7 @@ import datetime
import tomllib
import tomli_w
import re
import json
from pathlib import Path
TS_FMT = "%Y-%m-%dT%H:%M:%S"
@@ -309,3 +310,85 @@ def save_track_history(track_id: str, history: list, base_dir: str | Path = ".")
entries = [str_to_entry(h, roles) for h in history]
state.discussion = entries
save_track_state(track_id, state, base_dir)
def get_all_tracks(base_dir: str | Path = ".") -> list[dict]:
"""
Scans the conductor/tracks/ directory and returns a list of dictionaries
containing track metadata: 'id', 'title', 'status', 'complete', 'total',
and 'progress' (0.0 to 1.0).
Handles missing or malformed metadata.json or state.toml by falling back
to available info or defaults.
"""
from models import TrackState
tracks_dir = Path(base_dir) / "conductor" / "tracks"
if not tracks_dir.exists():
return []
results = []
for entry in tracks_dir.iterdir():
if not entry.is_dir():
continue
track_id = entry.name
track_info = {
"id": track_id,
"title": track_id,
"status": "unknown",
"complete": 0,
"total": 0,
"progress": 0.0
}
state_found = False
# Try loading state.toml
try:
state = load_track_state(track_id, base_dir)
if state:
track_info["id"] = state.metadata.id or track_id
track_info["title"] = state.metadata.name or track_id
track_info["status"] = state.metadata.status or "unknown"
track_info["complete"] = len([t for t in state.tasks if t.status == "completed"])
track_info["total"] = len(state.tasks)
if track_info["total"] > 0:
track_info["progress"] = track_info["complete"] / track_info["total"]
state_found = True
except Exception:
pass
if not state_found:
# Try loading metadata.json
metadata_file = entry / "metadata.json"
if metadata_file.exists():
try:
with open(metadata_file, "r") as f:
data = json.load(f)
track_info["id"] = data.get("id", data.get("track_id", track_id))
track_info["title"] = data.get("title", data.get("name", data.get("description", track_id)))
track_info["status"] = data.get("status", "unknown")
except Exception:
pass
# Try parsing plan.md for complete/total if state was missing or empty
if track_info["total"] == 0:
plan_file = entry / "plan.md"
if plan_file.exists():
try:
with open(plan_file, "r", encoding="utf-8") as f:
content = f.read()
# Simple regex to count tasks
# - [ ] Task: ...
# - [x] Task: ...
# - [~] Task: ...
tasks = re.findall(r"^[ \t]*- \[[ x~]\] .*", content, re.MULTILINE)
completed_tasks = re.findall(r"^[ \t]*- \[x\] .*", content, re.MULTILINE)
track_info["total"] = len(tasks)
track_info["complete"] = len(completed_tasks)
if track_info["total"] > 0:
track_info["progress"] = float(track_info["complete"]) / track_info["total"]
except Exception:
pass
results.append(track_info)
return results

View File

@@ -0,0 +1,66 @@
import pytest
from unittest.mock import patch, MagicMock
from gui_2 import App
@pytest.fixture
def app_instance():
# We patch the dependencies of App.__init__ to avoid side effects
with (
patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}),
patch('gui_2.save_config'),
patch('gui_2.project_manager') as mock_pm,
patch('gui_2.session_logger'),
patch('gui_2.immapp.run'),
patch.object(App, '_load_active_project'),
patch.object(App, '_fetch_models'),
patch.object(App, '_load_fonts'),
patch.object(App, '_post_init')
):
app = App()
# Ensure project and ui_files_base_dir are set for _refresh_from_project
app.project = {}
app.ui_files_base_dir = "."
# Return the app and the mock_pm for use in tests
yield app, mock_pm
def test_mma_dashboard_refresh(app_instance):
app, mock_pm = app_instance
# 1. Define mock tracks
mock_tracks = [
MagicMock(id="track_1", description="Track 1"),
MagicMock(id="track_2", description="Track 2")
]
# 2. Patch get_all_tracks to return our mock list
mock_pm.get_all_tracks.return_value = mock_tracks
# 3. Call _refresh_from_project
app._refresh_from_project()
# 4. Verify that app.tracks contains the mock tracks
assert hasattr(app, 'tracks'), "App instance should have a 'tracks' attribute"
assert app.tracks == mock_tracks
assert len(app.tracks) == 2
assert app.tracks[0].id == "track_1"
assert app.tracks[1].id == "track_2"
# Verify get_all_tracks was called with the correct base_dir
mock_pm.get_all_tracks.assert_called_with(app.ui_files_base_dir)
def test_mma_dashboard_initialization_refresh(app_instance):
"""
Checks that _refresh_from_project is called during initialization if
_load_active_project is NOT mocked to skip it (but here it IS mocked in fixture).
This test verifies that calling it manually works as expected for initialization scenarios.
"""
app, mock_pm = app_instance
mock_tracks = [MagicMock(id="init_track", description="Initial Track")]
mock_pm.get_all_tracks.return_value = mock_tracks
# Simulate the refresh that would happen during a project load
app._refresh_from_project()
assert app.tracks == mock_tracks
assert app.tracks[0].id == "init_track"

View File

@@ -0,0 +1,94 @@
import pytest
import json
from pathlib import Path
from project_manager import get_all_tracks, save_track_state
from models import TrackState, Metadata, Ticket
from datetime import datetime
def test_get_all_tracks_empty(tmp_path):
# conductor/tracks directory doesn't exist
assert get_all_tracks(tmp_path) == []
def test_get_all_tracks_with_state(tmp_path):
tracks_dir = tmp_path / "conductor" / "tracks"
tracks_dir.mkdir(parents=True)
track_id = "test_track_1"
track_dir = tracks_dir / track_id
track_dir.mkdir()
# Create TrackState
metadata = Metadata(id=track_id, name="Test Track 1", status="in_progress",
created_at=datetime.now(), updated_at=datetime.now())
tasks = [
Ticket(id="task1", description="desc1", status="completed", assigned_to="user"),
Ticket(id="task2", description="desc2", status="todo", assigned_to="user")
]
state = TrackState(metadata=metadata, discussion=[], tasks=tasks)
save_track_state(track_id, state, tmp_path)
tracks = get_all_tracks(tmp_path)
assert len(tracks) == 1
track = tracks[0]
assert track["id"] == track_id
assert track["title"] == "Test Track 1"
assert track["status"] == "in_progress"
assert track["complete"] == 1
assert track["total"] == 2
assert track["progress"] == 0.5
def test_get_all_tracks_with_metadata_json(tmp_path):
tracks_dir = tmp_path / "conductor" / "tracks"
tracks_dir.mkdir(parents=True)
track_id = "test_track_2"
track_dir = tracks_dir / track_id
track_dir.mkdir()
metadata = {
"id": track_id,
"title": "Test Track 2",
"status": "planned"
}
with open(track_dir / "metadata.json", "w") as f:
json.dump(metadata, f)
# Create plan.md to test parsing
plan_content = """
# Plan
- [x] Task: Task 1
- [ ] Task: Task 2
- [~] Task: Task 3
"""
with open(track_dir / "plan.md", "w") as f:
f.write(plan_content)
tracks = get_all_tracks(tmp_path)
assert len(tracks) == 1
track = tracks[0]
assert track["id"] == track_id
assert track["title"] == "Test Track 2"
assert track["status"] == "planned"
assert track["complete"] == 1
assert track["total"] == 3
assert pytest.approx(track["progress"]) == 0.333333
def test_get_all_tracks_malformed(tmp_path):
tracks_dir = tmp_path / "conductor" / "tracks"
tracks_dir.mkdir(parents=True)
track_id = "malformed_track"
track_dir = tracks_dir / track_id
track_dir.mkdir()
# Malformed metadata.json
with open(track_dir / "metadata.json", "w") as f:
f.write("{ invalid json }")
tracks = get_all_tracks(tmp_path)
assert len(tracks) == 1
track = tracks[0]
assert track["id"] == track_id
assert track["status"] == "unknown"
assert track["complete"] == 0
assert track["total"] == 0