feat(ui): Support multiple concurrent AI response streams and strategy visualization
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
import pytest
|
||||
import json
|
||||
from unittest.mock import patch, MagicMock
|
||||
import threading
|
||||
import time
|
||||
@@ -29,9 +30,11 @@ def test_mma_ui_state_initialization(app_instance):
|
||||
assert hasattr(app_instance, 'ui_epic_input')
|
||||
assert hasattr(app_instance, 'proposed_tracks')
|
||||
assert hasattr(app_instance, '_show_track_proposal_modal')
|
||||
assert hasattr(app_instance, 'mma_streams')
|
||||
assert app_instance.ui_epic_input == ""
|
||||
assert app_instance.proposed_tracks == []
|
||||
assert app_instance._show_track_proposal_modal is False
|
||||
assert app_instance.mma_streams == {}
|
||||
|
||||
def test_process_pending_gui_tasks_show_track_proposal(app_instance):
|
||||
"""Verifies that the 'show_track_proposal' action correctly updates the UI state."""
|
||||
@@ -69,16 +72,21 @@ def test_cb_plan_epic_launches_thread(app_instance):
|
||||
app_instance._cb_plan_epic()
|
||||
|
||||
# Wait for the background thread to finish (it should be quick with mocks)
|
||||
# In a real test, we might need a more robust way to wait, but for now:
|
||||
max_wait = 5
|
||||
start_time = time.time()
|
||||
while len(app_instance._pending_gui_tasks) == 0 and time.time() - start_time < max_wait:
|
||||
while len(app_instance._pending_gui_tasks) < 2 and time.time() - start_time < max_wait:
|
||||
time.sleep(0.1)
|
||||
|
||||
assert len(app_instance._pending_gui_tasks) > 0
|
||||
task = app_instance._pending_gui_tasks[0]
|
||||
assert task['action'] == 'show_track_proposal'
|
||||
assert task['payload'] == mock_tracks
|
||||
assert len(app_instance._pending_gui_tasks) == 2
|
||||
|
||||
task1 = app_instance._pending_gui_tasks[0]
|
||||
assert task1['action'] == 'handle_ai_response'
|
||||
assert task1['payload']['stream_id'] == 'Tier 1'
|
||||
assert task1['payload']['text'] == json.dumps(mock_tracks, indent=2)
|
||||
|
||||
task2 = app_instance._pending_gui_tasks[1]
|
||||
assert task2['action'] == 'show_track_proposal'
|
||||
assert task2['payload'] == mock_tracks
|
||||
|
||||
mock_get_history.assert_called_once()
|
||||
mock_gen_tracks.assert_called_once()
|
||||
@@ -104,3 +112,36 @@ def test_process_pending_gui_tasks_mma_spawn_approval(app_instance):
|
||||
assert app_instance._mma_spawn_edit_mode is False
|
||||
assert task["dialog_container"][0] is not None
|
||||
assert task["dialog_container"][0]._ticket_id == "T1"
|
||||
|
||||
def test_handle_ai_response_with_stream_id(app_instance):
|
||||
"""Verifies routing to mma_streams."""
|
||||
task = {
|
||||
"action": "handle_ai_response",
|
||||
"payload": {
|
||||
"text": "Tier 1 Strategy Content",
|
||||
"stream_id": "Tier 1",
|
||||
"status": "Thinking..."
|
||||
}
|
||||
}
|
||||
app_instance._pending_gui_tasks.append(task)
|
||||
app_instance._process_pending_gui_tasks()
|
||||
|
||||
assert app_instance.mma_streams.get("Tier 1") == "Tier 1 Strategy Content"
|
||||
assert app_instance.ai_status == "Thinking..."
|
||||
assert app_instance.ai_response == ""
|
||||
|
||||
def test_handle_ai_response_fallback(app_instance):
|
||||
"""Verifies fallback to ai_response when stream_id is missing."""
|
||||
task = {
|
||||
"action": "handle_ai_response",
|
||||
"payload": {
|
||||
"text": "Regular AI Response",
|
||||
"status": "done"
|
||||
}
|
||||
}
|
||||
app_instance._pending_gui_tasks.append(task)
|
||||
app_instance._process_pending_gui_tasks()
|
||||
|
||||
assert app_instance.ai_response == "Regular AI Response"
|
||||
assert app_instance.ai_status == "done"
|
||||
assert len(app_instance.mma_streams) == 0
|
||||
|
||||
Reference in New Issue
Block a user