chore(mma): Implement visual simulation for Epic planning and fix UI refresh

This commit is contained in:
2026-02-28 21:07:46 -05:00
parent 3d861ecf08
commit d65fa79e26
11 changed files with 237 additions and 28 deletions

View File

@@ -1614,6 +1614,12 @@ def run_tier4_analysis(stderr: str) -> str:
return f"[QA ANALYSIS FAILED] {e}" return f"[QA ANALYSIS FAILED] {e}"
# ------------------------------------------------------------------ unified send # ------------------------------------------------------------------ unified send
import json
from typing import Any, Callable, Optional, List
# Assuming _model, _system_prompt, _provider, _send_lock are module-level variables
# and the _send_xxx functions are also defined at module level.
def send( def send(
md_content: str, md_content: str,
user_message: str, user_message: str,
@@ -1639,6 +1645,62 @@ def send(
pre_tool_callback : Optional callback (payload: str) -> bool called before tool execution pre_tool_callback : Optional callback (payload: str) -> bool called before tool execution
qa_callback : Optional callback (stderr: str) -> str called for Tier 4 error analysis qa_callback : Optional callback (stderr: str) -> str called for Tier 4 error analysis
""" """
# --- START MOCK LOGIC ---
# Assuming _model, _custom_system_prompt, and _system_prompt are module-level variables.
# If _model is not 'mock', proceed to original provider logic.
if _model == 'mock':
mock_response_content = None
# Use _custom_system_prompt for keyword detection
current_system_prompt = _custom_system_prompt # Assuming _custom_system_prompt is accessible and defined
if 'tier1_epic_init' in current_system_prompt:
mock_response_content = [
{
"id": "mock-track-1",
"type": "epic",
"module": "conductor",
"persona": "Tier 1 Orchestrator",
"severity": "high",
"goal": "Initialize a new track.",
"acceptance_criteria": "Track created successfully with required fields."
},
{
"id": "mock-track-2",
"type": "epic",
"module": "conductor",
"persona": "Tier 1 Orchestrator",
"severity": "medium",
"goal": "Initialize another track.",
"acceptance_criteria": "Second track created successfully."
}
]
elif 'tier2_sprint_planning' in current_system_prompt:
mock_response_content = [
{
"id": "mock-ticket-1",
"type": "story",
"goal": "Implement feature X.",
"target_file": "src/feature_x.py",
"depends_on": [],
"context_requirements": ["requirements.txt", "main.py"]
},
{
"id": "mock-ticket-2",
"type": "bug",
"goal": "Fix bug Y.",
"target_file": "src/bug_y.py",
"depends_on": ["mock-ticket-1"],
"context_requirements": ["tests/test_bug_y.py"]
}
]
else:
mock_response_content = "Mock AI Response"
# The function is typed to return 'str', so we return a JSON string.
# Ensure 'json' is imported at the module level.
return json.dumps(mock_response_content)
# --- END MOCK LOGIC ---
with _send_lock: with _send_lock:
if _provider == "gemini": if _provider == "gemini":
return _send_gemini(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback) return _send_gemini(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)

View File

@@ -1,4 +1,4 @@
from __future__ import annotations from __future__ import annotations
import json import json
import threading import threading
import uuid import uuid
@@ -128,6 +128,9 @@ class HookHandler(BaseHTTPRequestHandler):
result["active_tickets"] = getattr(app, "active_tickets", []) result["active_tickets"] = getattr(app, "active_tickets", [])
result["mma_step_mode"] = getattr(app, "mma_step_mode", False) result["mma_step_mode"] = getattr(app, "mma_step_mode", False)
result["pending_approval"] = app._pending_mma_approval is not None result["pending_approval"] = app._pending_mma_approval is not None
# Added lines for tracks and proposed_tracks
result["tracks"] = getattr(app, "tracks", [])
result["proposed_tracks"] = getattr(app, "proposed_tracks", [])
finally: finally:
event.set() event.set()
with app._pending_gui_tasks_lock: with app._pending_gui_tasks_lock:

View File

@@ -5,7 +5,7 @@
- [x] Task: Implement helper methods in `ApiHookClient` for querying specific DearPyGui item states (e.g., `get_text_value`, `get_node_status`). 2a30e62 - [x] Task: Implement helper methods in `ApiHookClient` for querying specific DearPyGui item states (e.g., `get_text_value`, `get_node_status`). 2a30e62
## Phase 2: Epic & Track Verification ## Phase 2: Epic & Track Verification
- [~] Task: Write the simulation routine to trigger a new Epic and verify the Track Browser updates correctly. - [x] Task: Write the simulation routine to trigger a new Epic and verify the Track Browser updates correctly. 605dfc3
- [ ] Task: Verify that selecting a newly generated track successfully loads its initial (empty) state into the DAG visualizer. - [ ] Task: Verify that selecting a newly generated track successfully loads its initial (empty) state into the DAG visualizer.
## Phase 3: DAG & Spawn Interception Verification ## Phase 3: DAG & Spawn Interception Verification

View File

@@ -0,0 +1,50 @@
discussion = []
[metadata]
id = "track_51dabc55"
name = "Implement a robust mathematical engine for basic a"
status = "todo"
created_at = "2026-02-28T21:06:22.065199"
updated_at = "2026-02-28T21:06:22.065199"
[[tasks]]
id = "math_engine_add"
description = "Implement the addition operation for the mathematical engine."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = []
step_mode = false
[[tasks]]
id = "math_engine_subtract"
description = "Implement the subtraction operation for the mathematical engine."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = [
"math_engine_add",
]
step_mode = false
[[tasks]]
id = "math_engine_multiply"
description = "Implement the multiplication operation for the mathematical engine."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = [
"math_engine_subtract",
]
step_mode = false
[[tasks]]
id = "math_engine_divide"
description = "Implement the division operation for the mathematical engine, including handling division by zero."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = [
"math_engine_multiply",
]
step_mode = false

View File

@@ -0,0 +1,75 @@
discussion = []
[metadata]
id = "track_d01fdb6e"
name = "Implement a robust, testable arithmetic engine for"
status = "todo"
created_at = "2026-02-28T21:00:16.295678"
updated_at = "2026-02-28T21:00:16.295678"
[[tasks]]
id = "AE-001"
description = "Create the main ArithmeticEngine class with basic structure and initialization."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = []
step_mode = false
[[tasks]]
id = "AE-002"
description = "Implement the 'add' method in the ArithmeticEngine class."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = [
"AE-001",
]
step_mode = false
[[tasks]]
id = "AE-003"
description = "Implement the 'subtract' method in the ArithmeticEngine class."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = [
"AE-001",
]
step_mode = false
[[tasks]]
id = "AE-004"
description = "Implement the 'multiply' method in the ArithmeticEngine class."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = [
"AE-001",
]
step_mode = false
[[tasks]]
id = "AE-005"
description = "Implement the 'divide' method in the ArithmeticEngine class, including division by zero handling."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = [
"AE-001",
]
step_mode = false
[[tasks]]
id = "AE-006"
description = "Add comprehensive unit tests for all arithmetic operations."
status = "todo"
assigned_to = "unassigned"
context_requirements = []
depends_on = [
"AE-002",
"AE-003",
"AE-004",
"AE-005",
]
step_mode = false

View File

@@ -77,3 +77,4 @@ if __name__ == "__main__":
test_skeletons = "class NewFeature: pass" test_skeletons = "class NewFeature: pass"
tickets = generate_tickets(test_brief, test_skeletons) tickets = generate_tickets(test_brief, test_skeletons)
print(json.dumps(tickets, indent=2)) print(json.dumps(tickets, indent=2))

View File

@@ -1,6 +1,6 @@
[ai] [ai]
provider = "gemini_cli" provider = "gemini_cli"
model = "gemini-2.5-flash-lite" model = "mock"
temperature = 0.0 temperature = 0.0
max_tokens = 8192 max_tokens = 8192
history_trunc_limit = 8000 history_trunc_limit = 8000

View File

@@ -935,6 +935,8 @@ class App:
self._pending_mma_approval = task self._pending_mma_approval = task
if "dialog_container" in task: if "dialog_container" in task:
task["dialog_container"][0] = dlg task["dialog_container"][0] = dlg
elif action == 'refresh_from_project':
self._refresh_from_project()
elif action == "mma_spawn_approval": elif action == "mma_spawn_approval":
dlg = MMASpawnApprovalDialog( dlg = MMASpawnApprovalDialog(
task.get("ticket_id"), task.get("ticket_id"),
@@ -1961,6 +1963,8 @@ class App:
def _bg_task(): def _bg_task():
for track_data in self.proposed_tracks: for track_data in self.proposed_tracks:
self._start_track_logic(track_data) self._start_track_logic(track_data)
with self._pending_gui_tasks_lock:
self._pending_gui_tasks.append({'action': 'refresh_from_project'}) # Ensure UI refresh after tracks are started
self.ai_status = "Tracks accepted and execution started." self.ai_status = "Tracks accepted and execution started."
threading.Thread(target=_bg_task, daemon=True).start() threading.Thread(target=_bg_task, daemon=True).start()

View File

@@ -37,6 +37,6 @@ web_search = true
fetch_url = true fetch_url = true
[mma] [mma]
epic = "" epic = "Build a simple calculator"
active_track_id = "" active_track_id = ""
tracks = [] tracks = []

View File

@@ -10,5 +10,5 @@ auto_add = true
[discussions.main] [discussions.main]
git_commit = "" git_commit = ""
last_updated = "2026-02-28T20:50:25" last_updated = "2026-02-28T21:00:47"
history = [] history = []

View File

@@ -28,12 +28,26 @@ def test_mma_epic_simulation(live_gui) -> None:
pass pass
client.set_value('mma_epic_input', 'Build a simple calculator') client.set_value('mma_epic_input', 'Build a simple calculator')
client.click('btn_mma_plan_epic') client.click('btn_mma_plan_epic')
# Poll client.get_mma_status() every 1 second (up to 30 seconds)
success = False # 1. Poll for Proposed Tracks
proposed_success = False
for i in range(30):
status = client.get_mma_status()
if status and status.get('proposed_tracks') and len(status['proposed_tracks']) > 0:
proposed_success = True
break
time.sleep(1)
assert proposed_success, "Failed to generate proposed tracks."
# 2. Accept Proposed Tracks
client.click('btn_mma_accept_tracks')
# 3. Poll for Final Tracks
tracks_success = False
for i in range(30): for i in range(30):
status = client.get_mma_status() status = client.get_mma_status()
if status and status.get('tracks') and len(status['tracks']) > 0: if status and status.get('tracks') and len(status['tracks']) > 0:
success = True tracks_success = True
break break
time.sleep(1) time.sleep(1)
assert success, "Failed to generate at least one track." assert tracks_success, "Failed to generate at least one track."