feat(sdm): inject structural dependency mapping tags across codebase
Adds [C: caller] tags to functions/methods and [M: mutation] / [U: usage] tags to class variables based on cross-module call analysis.
This commit is contained in:
@@ -4,7 +4,7 @@ from simulation.sim_base import BaseSimulation, run_sim
|
||||
class AISettingsSimulation(BaseSimulation):
|
||||
def run(self) -> None:
|
||||
"""
|
||||
[C: simulation/sim_base.py:run_sim, tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
[C: simulation/sim_base.py:run_sim, tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
"""
|
||||
print("\n--- Running AI Settings Simulation (Gemini Only) ---")
|
||||
# 1. Verify initial model
|
||||
|
||||
@@ -63,7 +63,7 @@ class BaseSimulation:
|
||||
|
||||
def setup(self, project_name: str = "SimProject") -> None:
|
||||
"""
|
||||
[C: simulation/sim_execution.py:ExecutionSimulation.setup, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_sim_base.py:test_base_simulation_setup]
|
||||
[C: simulation/sim_execution.py:ExecutionSimulation.setup, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_sim_base.py:test_base_simulation_setup]
|
||||
"""
|
||||
print("\n[BaseSim] Connecting to GUI...")
|
||||
if not self.client.wait_for_server(timeout=5):
|
||||
@@ -92,7 +92,7 @@ class BaseSimulation:
|
||||
|
||||
def teardown(self) -> None:
|
||||
"""
|
||||
[C: tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live]
|
||||
[C: tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live]
|
||||
"""
|
||||
if self.project_path and os.path.exists(self.project_path):
|
||||
# We keep it for debugging if it failed, but usually we'd clean up
|
||||
@@ -102,13 +102,13 @@ class BaseSimulation:
|
||||
|
||||
def get_value(self, tag: str) -> Any:
|
||||
"""
|
||||
[C: simulation/sim_context.py:ContextSimulation.run, simulation/sim_execution.py:ExecutionSimulation.run, simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async, simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response, tests/smoke_status_hook.py:test_status_hook, tests/smoke_status_hook.py:wait_for_value, tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_deepseek_infra.py:test_gui_provider_list_via_hooks, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_gui2_parity.py:test_gui2_set_value_hook_works, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_rag_visual_sim.py:test_rag_settings_persistence_sim, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_visual_mma.py:test_visual_mma_components, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration]
|
||||
[C: simulation/sim_context.py:ContextSimulation.run, simulation/sim_execution.py:ExecutionSimulation.run, simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async, simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response, tests/smoke_status_hook.py:test_status_hook, tests/smoke_status_hook.py:wait_for_value, tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_deepseek_infra.py:test_gui_provider_list_via_hooks, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_gui2_parity.py:test_gui2_set_value_hook_works, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_rag_visual_sim.py:test_rag_settings_persistence_sim, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration]
|
||||
"""
|
||||
return self.client.get_value(tag)
|
||||
|
||||
def wait_for_event(self, event_type: str, timeout: int = 5) -> Optional[dict]:
|
||||
"""
|
||||
[C: simulation/sim_execution.py:ExecutionSimulation.run, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
[C: simulation/sim_execution.py:ExecutionSimulation.run, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
"""
|
||||
return self.client.wait_for_event(event_type, timeout)
|
||||
|
||||
@@ -128,7 +128,8 @@ class BaseSimulation:
|
||||
|
||||
def run_sim(sim_class: type) -> None:
|
||||
"""
|
||||
Helper to run a simulation class standalone.
|
||||
|
||||
Helper to run a simulation class standalone.
|
||||
[C: simulation/sim_context.py:module, simulation/sim_execution.py:module, simulation/sim_tools.py:module]
|
||||
"""
|
||||
sim = sim_class()
|
||||
|
||||
@@ -5,7 +5,7 @@ from simulation.sim_base import BaseSimulation, run_sim
|
||||
class ContextSimulation(BaseSimulation):
|
||||
def run(self) -> None:
|
||||
"""
|
||||
[C: tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
[C: tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
"""
|
||||
print("\n--- Running Context & Chat Simulation ---")
|
||||
# 1. Skip Discussion Creation, use 'main'
|
||||
|
||||
@@ -5,7 +5,7 @@ from simulation.sim_base import BaseSimulation, run_sim
|
||||
class ExecutionSimulation(BaseSimulation):
|
||||
def setup(self, project_name: str = "SimProject") -> None:
|
||||
"""
|
||||
[C: tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_sim_base.py:test_base_simulation_setup]
|
||||
[C: tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_sim_base.py:test_base_simulation_setup]
|
||||
"""
|
||||
super().setup(project_name)
|
||||
if os.path.exists("hello.ps1"):
|
||||
@@ -13,7 +13,7 @@ class ExecutionSimulation(BaseSimulation):
|
||||
|
||||
def run(self) -> None:
|
||||
"""
|
||||
[C: tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
[C: tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
"""
|
||||
print("\n--- Running Execution & Modals Simulation ---")
|
||||
# 1. Trigger script generation (Async so we don't block on the wait loop)
|
||||
|
||||
@@ -4,7 +4,7 @@ from simulation.sim_base import BaseSimulation, run_sim
|
||||
class ToolsSimulation(BaseSimulation):
|
||||
def run(self) -> None:
|
||||
"""
|
||||
[C: tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
[C: tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
"""
|
||||
print("\n--- Running Tools Simulation ---")
|
||||
# 1. Trigger list_directory tool
|
||||
|
||||
@@ -23,7 +23,7 @@ class UserSimAgent:
|
||||
|
||||
def wait_to_read(self, text: str) -> None:
|
||||
"""
|
||||
[C: simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response]
|
||||
[C: simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response]
|
||||
"""
|
||||
if self.enable_delays:
|
||||
delay = self.calculate_reading_delay(text)
|
||||
@@ -31,7 +31,7 @@ class UserSimAgent:
|
||||
|
||||
def wait_to_think(self, probability: float = 0.2, min_delay: float = 2.0, max_delay: float = 5.0) -> None:
|
||||
"""
|
||||
[C: simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response]
|
||||
[C: simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response]
|
||||
"""
|
||||
if self.enable_delays and random.random() < probability:
|
||||
delay = random.uniform(min_delay, max_delay)
|
||||
@@ -39,7 +39,7 @@ class UserSimAgent:
|
||||
|
||||
def simulate_typing(self, text: str, jitter_range: tuple[float, float] = (0.01, 0.05), batch_typing: bool = False) -> None:
|
||||
"""
|
||||
[C: simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async]
|
||||
[C: simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async]
|
||||
"""
|
||||
if not self.enable_delays:
|
||||
return
|
||||
@@ -60,8 +60,9 @@ class UserSimAgent:
|
||||
def generate_response(self, conversation_history: list[dict]) -> str:
|
||||
"""
|
||||
|
||||
Generates a human-like response based on the conversation history.
|
||||
conversation_history: list of dicts with 'role' and 'content'
|
||||
|
||||
Generates a human-like response based on the conversation history.
|
||||
conversation_history: list of dicts with 'role' and 'content'
|
||||
[C: simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async]
|
||||
"""
|
||||
last_ai_msg = ""
|
||||
@@ -79,7 +80,8 @@ class UserSimAgent:
|
||||
def perform_action_with_delay(self, action_func: Callable, *args: Any, **kwargs: Any) -> Any:
|
||||
"""
|
||||
|
||||
Executes an action with a human-like delay if enabled.
|
||||
|
||||
Executes an action with a human-like delay if enabled.
|
||||
[C: tests/test_user_agent.py:test_perform_action_with_delay]
|
||||
"""
|
||||
if self.enable_delays:
|
||||
|
||||
@@ -50,7 +50,7 @@ class WorkflowSimulator:
|
||||
|
||||
def setup_new_project(self, name: str, git_dir: str, project_path: str = None) -> None:
|
||||
"""
|
||||
[C: tests/test_workflow_sim.py:test_setup_new_project]
|
||||
[C: tests/test_workflow_sim.py:test_setup_new_project]
|
||||
"""
|
||||
print(f"Setting up new project: {name}")
|
||||
if project_path:
|
||||
@@ -66,7 +66,7 @@ class WorkflowSimulator:
|
||||
|
||||
def create_discussion(self, name: str) -> None:
|
||||
"""
|
||||
[C: tests/test_workflow_sim.py:test_discussion_switching]
|
||||
[C: tests/test_workflow_sim.py:test_discussion_switching]
|
||||
"""
|
||||
print(f"Creating discussion: {name}")
|
||||
self.client.set_value("disc_new_name_input", name)
|
||||
@@ -76,7 +76,7 @@ class WorkflowSimulator:
|
||||
|
||||
def switch_discussion(self, name: str) -> None:
|
||||
"""
|
||||
[C: tests/test_workflow_sim.py:test_discussion_switching]
|
||||
[C: tests/test_workflow_sim.py:test_discussion_switching]
|
||||
"""
|
||||
print(f"Switching to discussion: {name}")
|
||||
self.client.select_list_item("disc_listbox", name)
|
||||
@@ -91,7 +91,7 @@ class WorkflowSimulator:
|
||||
|
||||
def truncate_history(self, pairs: int) -> None:
|
||||
"""
|
||||
[C: tests/test_workflow_sim.py:test_history_truncation]
|
||||
[C: tests/test_workflow_sim.py:test_history_truncation]
|
||||
"""
|
||||
print(f"Truncating history to {pairs} pairs")
|
||||
self.client.set_value("disc_truncate_pairs", pairs)
|
||||
|
||||
+39
-27
@@ -61,7 +61,10 @@ def resolve_paths(base_dir: Path, entry: str) -> list[Path]:
|
||||
return sorted(filtered)
|
||||
|
||||
def group_files_by_dir(files: list[Any]) -> dict[str, list[Any]]:
|
||||
"""Groups FileItem objects by their relative directory path."""
|
||||
"""
|
||||
Groups FileItem objects by their relative directory path.
|
||||
[C: src/gui_2.py:App._render_context_files_table, tests/test_context_composition_phase3.py:test_group_files_by_dir]
|
||||
"""
|
||||
grouped = {}
|
||||
for f in files:
|
||||
path_str = f.path if hasattr(f, 'path') else str(f)
|
||||
@@ -76,7 +79,10 @@ def group_files_by_dir(files: list[Any]) -> dict[str, list[Any]]:
|
||||
return grouped
|
||||
|
||||
def compute_file_stats(abs_path: str) -> dict[str, int]:
|
||||
"""Computes lines and basic AST stats for a file."""
|
||||
"""
|
||||
Computes lines and basic AST stats for a file.
|
||||
[C: src/gui_2.py:App._stats_worker, tests/test_context_composition_phase3.py:test_compute_file_stats]
|
||||
"""
|
||||
stats = {"lines": 0, "ast_elements": 0}
|
||||
try:
|
||||
with open(abs_path, 'r', encoding='utf-8') as f:
|
||||
@@ -97,8 +103,9 @@ def build_discussion_section(history: list[Any]) -> str:
|
||||
"""
|
||||
|
||||
|
||||
Builds a markdown section for discussion history.
|
||||
Handles both legacy list[str] and new list[dict].
|
||||
|
||||
Builds a markdown section for discussion history.
|
||||
Handles both legacy list[str] and new list[dict].
|
||||
"""
|
||||
sections = []
|
||||
for i, entry in enumerate(history, start=1):
|
||||
@@ -133,20 +140,21 @@ def build_file_items(base_dir: Path, files: list[str | dict[str, Any]]) -> list[
|
||||
"""
|
||||
|
||||
|
||||
Return a list of dicts describing each file, for use by ai_client when it
|
||||
wants to upload individual files rather than inline everything as markdown.
|
||||
|
||||
Each dict has:
|
||||
path : Path (resolved absolute path)
|
||||
entry : str (original config entry string)
|
||||
content : str (file text, or error string)
|
||||
error : bool
|
||||
mtime : float (last modification time, for skip-if-unchanged optimization)
|
||||
tier : int | None (optional tier for context management)
|
||||
auto_aggregate : bool
|
||||
force_full : bool
|
||||
view_mode : str (summary, full, skeleton, outline, none)
|
||||
[C: src/app_controller.py:AppController._bg_task, src/orchestrator_pm.py:module, tests/test_aggregate_flags.py:test_auto_aggregate_skip, tests/test_aggregate_flags.py:test_force_full, tests/test_tiered_context.py:test_build_file_items_with_tiers]
|
||||
|
||||
Return a list of dicts describing each file, for use by ai_client when it
|
||||
wants to upload individual files rather than inline everything as markdown.
|
||||
|
||||
Each dict has:
|
||||
path : Path (resolved absolute path)
|
||||
entry : str (original config entry string)
|
||||
content : str (file text, or error string)
|
||||
error : bool
|
||||
mtime : float (last modification time, for skip-if-unchanged optimization)
|
||||
tier : int | None (optional tier for context management)
|
||||
auto_aggregate : bool
|
||||
force_full : bool
|
||||
view_mode : str (summary, full, skeleton, outline, none)
|
||||
[C: src/app_controller.py:AppController._bg_task, src/orchestrator_pm.py:module, tests/test_aggregate_flags.py:test_auto_aggregate_skip, tests/test_aggregate_flags.py:test_force_full, tests/test_context_composition_phase6.py:test_view_mode_custom, tests/test_context_composition_phase6.py:test_view_mode_custom_empty_default_to_summary, tests/test_context_composition_phase6.py:test_view_mode_default_summary, tests/test_context_composition_phase6.py:test_view_mode_full, tests/test_context_composition_phase6.py:test_view_mode_none, tests/test_context_composition_phase6.py:test_view_mode_outline, tests/test_context_composition_phase6.py:test_view_mode_skeleton, tests/test_context_composition_phase6.py:test_view_mode_summary, tests/test_tiered_context.py:test_build_file_items_with_tiers, tests/test_tiered_context.py:test_build_files_section_with_dicts]
|
||||
"""
|
||||
with get_monitor().scope("build_file_items"):
|
||||
items: list[dict[str, Any]] = []
|
||||
@@ -245,8 +253,9 @@ def build_file_items(base_dir: Path, files: list[str | dict[str, Any]]) -> list[
|
||||
def _build_files_section_from_items(file_items: list[dict[str, Any]]) -> str:
|
||||
"""
|
||||
|
||||
Build the files markdown section from pre-read file items (avoids double I/O).
|
||||
[C: tests/test_aggregate_flags.py:test_auto_aggregate_skip, tests/test_ui_summary_only_removal.py:test_aggregate_from_items_respects_auto_aggregate]
|
||||
|
||||
Build the files markdown section from pre-read file items (avoids double I/O).
|
||||
[C: tests/test_aggregate_flags.py:test_auto_aggregate_skip, tests/test_context_composition_phase6.py:test_files_section_rendering, tests/test_tiered_context.py:test_build_files_section_with_dicts, tests/test_ui_summary_only_removal.py:test_aggregate_from_items_respects_auto_aggregate]
|
||||
"""
|
||||
sections = []
|
||||
for item in file_items:
|
||||
@@ -323,7 +332,8 @@ def build_markdown_from_items(file_items: list[dict[str, Any]], screenshot_base_
|
||||
def build_markdown_no_history(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], summary_only: bool = False, aggregation_strategy: str = "auto") -> str:
|
||||
"""
|
||||
|
||||
Build markdown with only files + screenshots (no history). Used for stable caching.
|
||||
|
||||
Build markdown with only files + screenshots (no history). Used for stable caching.
|
||||
[C: src/app_controller.py:AppController._do_generate, tests/test_history_management.py:test_aggregate_blacklist]
|
||||
"""
|
||||
return build_markdown_from_items(file_items, screenshot_base_dir, screenshots, history=[], summary_only=summary_only, aggregation_strategy=aggregation_strategy)
|
||||
@@ -331,7 +341,8 @@ def build_markdown_no_history(file_items: list[dict[str, Any]], screenshot_base_
|
||||
def build_discussion_text(history: list[str]) -> str:
|
||||
"""
|
||||
|
||||
Build just the discussion history section text. Returns empty string if no history.
|
||||
|
||||
Build just the discussion history section text. Returns empty string if no history.
|
||||
[C: src/app_controller.py:AppController._do_generate, tests/test_history_management.py:test_aggregate_includes_segregated_history]
|
||||
"""
|
||||
if not history:
|
||||
@@ -342,9 +353,10 @@ def build_tier3_context(file_items: list[dict[str, Any]], screenshot_base_dir: P
|
||||
"""
|
||||
|
||||
|
||||
Tier 3 Context: Execution/Worker.
|
||||
Full content for focus_files and files with tier=3, summaries/skeletons for others.
|
||||
[C: tests/test_aggregate_flags.py:test_auto_aggregate_skip, tests/test_aggregate_flags.py:test_force_full, tests/test_perf_aggregate.py:test_build_tier3_context_scaling, tests/test_tiered_context.py:test_build_tier3_context_ast_skeleton, tests/test_tiered_context.py:test_build_tier3_context_exists, tests/test_tiered_context.py:test_tiered_context_by_tier_field]
|
||||
|
||||
Tier 3 Context: Execution/Worker.
|
||||
Full content for focus_files and files with tier=3, summaries/skeletons for others.
|
||||
[C: tests/test_aggregate_flags.py:test_auto_aggregate_skip, tests/test_aggregate_flags.py:test_force_full, tests/test_ast_masking_core.py:test_ast_masking_gencpp_samples, tests/test_gencpp_full_suite.py:test_gencpp_full_suite, tests/test_perf_aggregate.py:test_build_tier3_context_scaling, tests/test_tiered_context.py:test_build_tier3_context_ast_skeleton, tests/test_tiered_context.py:test_build_tier3_context_exists, tests/test_tiered_context.py:test_tiered_context_by_tier_field]
|
||||
"""
|
||||
with get_monitor().scope("build_tier3_context"):
|
||||
focus_set = set(focus_files)
|
||||
@@ -446,7 +458,7 @@ def build_markdown(base_dir: Path, files: list[str | dict[str, Any]], screenshot
|
||||
|
||||
def run(config: dict[str, Any], aggregation_strategy: str = "auto") -> tuple[str, Path, list[dict[str, Any]]]:
|
||||
"""
|
||||
[C: simulation/sim_base.py:run_sim, src/ai_client.py:_send_anthropic, src/ai_client.py:_send_deepseek, src/ai_client.py:_send_gemini, src/ai_client.py:_send_gemini_cli, src/ai_client.py:_send_minimax, src/app_controller.py:AppController._cb_start_track, src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._process_event_queue, src/app_controller.py:AppController._start_track_logic, src/external_editor.py:_find_vscode_in_registry, src/gui_2.py:App._render_snapshot_tab, src/gui_2.py:App.run, src/gui_2.py:main, src/mcp_client.py:get_git_diff, src/project_manager.py:get_git_commit, src/project_manager.py:get_git_log, src/rag_engine.py:RAGEngine._search_mcp, src/shell_runner.py:run_powershell, tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
[C: simulation/sim_base.py:run_sim, src/ai_client.py:_send_anthropic, src/ai_client.py:_send_deepseek, src/ai_client.py:_send_gemini, src/ai_client.py:_send_gemini_cli, src/ai_client.py:_send_minimax, src/app_controller.py:AppController._cb_start_track, src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._process_event_queue, src/app_controller.py:AppController._start_track_logic, src/external_editor.py:_find_vscode_in_registry, src/gui_2.py:App._render_snapshot_tab, src/gui_2.py:App.run, src/gui_2.py:main, src/mcp_client.py:get_git_diff, src/project_manager.py:get_git_commit, src/rag_engine.py:RAGEngine._search_mcp, src/shell_runner.py:run_powershell, tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
"""
|
||||
namespace = config.get("project", {}).get("name")
|
||||
if not namespace:
|
||||
@@ -472,7 +484,7 @@ def run(config: dict[str, Any], aggregation_strategy: str = "auto") -> tuple[str
|
||||
def main() -> None:
|
||||
# Load global config to find active project
|
||||
"""
|
||||
[C: simulation/live_walkthrough.py:module, simulation/ping_pong.py:module, src/api_hooks.py:WebSocketServer._run_loop, src/gui_2.py:module, tests/mock_concurrent_mma.py:module, tests/mock_gemini_cli.py:module, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_allow_decision, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_deny_decision, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_unreachable_hook_server, tests/test_cli_tool_bridge.py:module, tests/test_cli_tool_bridge_mapping.py:TestCliToolBridgeMapping.test_mapping_from_api_format, tests/test_cli_tool_bridge_mapping.py:module, tests/test_discussion_takes.py:module, tests/test_external_editor_gui.py:module, tests/test_headless_service.py:TestHeadlessStartup.test_headless_flag_triggers_run, tests/test_headless_service.py:TestHeadlessStartup.test_normal_startup_calls_app_run, tests/test_mma_skeleton.py:module, tests/test_orchestrator_pm.py:module, tests/test_orchestrator_pm_history.py:module, tests/test_post_process.py:module, tests/test_presets.py:module, tests/test_project_serialization.py:module, tests/test_run_worker_lifecycle_abort.py:module, tests/test_symbol_lookup.py:module, tests/test_system_prompt_exposure.py:module, tests/test_theme_nerv_fx.py:module]
|
||||
[C: simulation/live_walkthrough.py:module, simulation/ping_pong.py:module, src/ai_server.py:module, src/api_hooks.py:WebSocketServer._run_loop, src/gui_2.py:module, tests/mock_concurrent_mma.py:module, tests/mock_gemini_cli.py:module, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_allow_decision, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_deny_decision, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_unreachable_hook_server, tests/test_cli_tool_bridge.py:module, tests/test_cli_tool_bridge_mapping.py:TestCliToolBridgeMapping.test_mapping_from_api_format, tests/test_cli_tool_bridge_mapping.py:module, tests/test_discussion_takes.py:module, tests/test_external_editor_gui.py:module, tests/test_headless_service.py:TestHeadlessStartup.test_headless_flag_triggers_run, tests/test_headless_service.py:TestHeadlessStartup.test_normal_startup_calls_app_run, tests/test_mma_skeleton.py:module, tests/test_orchestrator_pm.py:module, tests/test_orchestrator_pm_history.py:module, tests/test_presets.py:module, tests/test_project_serialization.py:module, tests/test_run_worker_lifecycle_abort.py:module, tests/test_symbol_lookup.py:module, tests/test_system_prompt_exposure.py:module, tests/test_theme_nerv_fx.py:module]
|
||||
"""
|
||||
from src.paths import get_config_path
|
||||
config_path = get_config_path()
|
||||
|
||||
+77
-46
@@ -53,12 +53,18 @@ events: EventEmitter = EventEmitter()
|
||||
|
||||
class ProviderError(Exception):
|
||||
def __init__(self, kind: str, provider: str, original: Exception) -> None:
|
||||
"""
|
||||
[C: src/api_hooks.py:HookServerInstance.__init__, src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.kind = kind
|
||||
self.provider = provider
|
||||
self.original = original
|
||||
super().__init__(str(original))
|
||||
|
||||
def ui_message(self) -> str:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._handle_request_event, src/app_controller.py:_api_generate]
|
||||
"""
|
||||
labels = {
|
||||
"quota": "QUOTA EXHAUSTED",
|
||||
"rate_limit": "RATE LIMITED",
|
||||
@@ -74,8 +80,9 @@ class ProviderError(Exception):
|
||||
|
||||
def set_model_params(temp: float, max_tok: int, trunc_limit: int = 8000, top_p: float = 1.0) -> None:
|
||||
"""
|
||||
Sets global generation parameters like temperature and max tokens.
|
||||
[C: src/app_controller.py:AppController._handle_request_event, src/app_controller.py:AppController.generate]
|
||||
|
||||
Sets global generation parameters like temperature and max tokens.
|
||||
[C: src/app_controller.py:AppController._handle_request_event, src/app_controller.py:_api_generate]
|
||||
"""
|
||||
global _temperature, _max_tokens, _history_trunc_limit, _top_p
|
||||
_temperature = temp
|
||||
@@ -130,21 +137,24 @@ _tool_approval_modes: dict[str, str] = {}
|
||||
|
||||
def get_current_tier() -> Optional[str]:
|
||||
"""
|
||||
Returns the current tier from thread-local storage.
|
||||
|
||||
Returns the current tier from thread-local storage.
|
||||
[C: src/app_controller.py:AppController._on_tool_log, tests/test_ai_client_concurrency.py:intercepted_append]
|
||||
"""
|
||||
return getattr(_local_storage, "current_tier", None)
|
||||
|
||||
def set_current_tier(tier: Optional[str]) -> None:
|
||||
"""
|
||||
Sets the current tier in thread-local storage.
|
||||
|
||||
Sets the current tier in thread-local storage.
|
||||
[C: src/app_controller.py:AppController._handle_request_event, src/conductor_tech_lead.py:generate_tickets, src/multi_agent_conductor.py:run_worker_lifecycle, tests/test_ai_client_concurrency.py:run_t1, tests/test_ai_client_concurrency.py:run_t2, tests/test_mma_agent_focus_phase1.py:reset_tier, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_none_when_unset, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_set_when_current_tier_set, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_tier2]
|
||||
"""
|
||||
_local_storage.current_tier = tier
|
||||
|
||||
def get_comms_log_callback() -> Optional[Callable[[dict[str, Any]], None]]:
|
||||
"""
|
||||
Returns the comms log callback (thread-local with global fallback).
|
||||
|
||||
Returns the comms log callback (thread-local with global fallback).
|
||||
[C: src/multi_agent_conductor.py:run_worker_lifecycle]
|
||||
"""
|
||||
tl_cb = getattr(_local_storage, "comms_log_callback", None)
|
||||
@@ -153,7 +163,8 @@ def get_comms_log_callback() -> Optional[Callable[[dict[str, Any]], None]]:
|
||||
|
||||
def set_comms_log_callback(cb: Optional[Callable[[dict[str, Any]], None]]) -> None:
|
||||
"""
|
||||
Sets the comms log callback (both global and thread-local).
|
||||
|
||||
Sets the comms log callback (both global and thread-local).
|
||||
[C: src/app_controller.py:AppController._init_ai_and_hooks, src/multi_agent_conductor.py:run_worker_lifecycle]
|
||||
"""
|
||||
global comms_log_callback
|
||||
@@ -196,29 +207,30 @@ _project_context_marker: str = ""
|
||||
|
||||
def set_custom_system_prompt(prompt: str) -> None:
|
||||
"""
|
||||
Sets a custom system prompt to be combined with the default instructions.
|
||||
[C: simulation/user_agent.py:UserSimAgent.generate_response, src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:AppController.generate, src/conductor_tech_lead.py:generate_tickets, src/multi_agent_conductor.py:run_worker_lifecycle, src/orchestrator_pm.py:generate_tracks, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.setUp]
|
||||
|
||||
Sets a custom system prompt to be combined with the default instructions.
|
||||
[C: simulation/user_agent.py:UserSimAgent.generate_response, src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:_api_generate, src/conductor_tech_lead.py:generate_tickets, src/multi_agent_conductor.py:run_worker_lifecycle, src/orchestrator_pm.py:generate_tracks, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.setUp]
|
||||
"""
|
||||
global _custom_system_prompt
|
||||
_custom_system_prompt = prompt
|
||||
|
||||
def set_base_system_prompt(prompt: str) -> None:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:AppController.generate, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.setUp, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_get_combined_respects_use_default, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_set_base_overrides_when_default_false]
|
||||
[C: src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:_api_generate, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.setUp, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_get_combined_respects_use_default, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_set_base_overrides_when_default_false]
|
||||
"""
|
||||
global _base_system_prompt_override
|
||||
_base_system_prompt_override = prompt
|
||||
|
||||
def set_use_default_base_prompt(use_default: bool) -> None:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:AppController.generate, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.setUp, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_get_combined_respects_use_default, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_set_base_overrides_when_default_false]
|
||||
[C: src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:_api_generate, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.setUp, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_get_combined_respects_use_default, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_set_base_overrides_when_default_false]
|
||||
"""
|
||||
global _use_default_base_system_prompt
|
||||
_use_default_base_system_prompt = use_default
|
||||
|
||||
def set_project_context_marker(marker: str) -> None:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:AppController.generate]
|
||||
[C: src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:_api_generate]
|
||||
"""
|
||||
global _project_context_marker
|
||||
_project_context_marker = marker
|
||||
@@ -228,7 +240,7 @@ def _get_context_marker() -> str:
|
||||
|
||||
def _get_combined_system_prompt(preset: Optional[ToolPreset] = None, bias: Optional[BiasProfile] = None) -> str:
|
||||
"""
|
||||
[C: tests/test_bias_efficacy.py:test_bias_efficacy_prompt_generation, tests/test_bias_integration.py:test_system_prompt_biasing, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_get_combined_respects_use_default, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_set_base_overrides_when_default_false]
|
||||
[C: tests/test_bias_efficacy.py:test_bias_efficacy_prompt_generation, tests/test_bias_integration.py:test_system_prompt_biasing, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_get_combined_respects_use_default, tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_ai_client_set_base_overrides_when_default_false]
|
||||
"""
|
||||
if preset is None: preset = _active_tool_preset
|
||||
if bias is None: bias = _active_bias_profile
|
||||
@@ -246,7 +258,7 @@ def _get_combined_system_prompt(preset: Optional[ToolPreset] = None, bias: Optio
|
||||
|
||||
def get_combined_system_prompt(preset: Optional[ToolPreset] = None, bias: Optional[BiasProfile] = None) -> str:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event]
|
||||
[C: src/app_controller.py:AppController._do_generate, src/app_controller.py:AppController._handle_request_event]
|
||||
"""
|
||||
return _get_combined_system_prompt(preset, bias)
|
||||
|
||||
@@ -260,7 +272,7 @@ COMMS_CLAMP_CHARS: int = 300
|
||||
|
||||
def _append_comms(direction: str, kind: str, payload: dict[str, Any]) -> None:
|
||||
"""
|
||||
[C: tests/test_ai_client_concurrency.py:run_t1, tests/test_ai_client_concurrency.py:run_t2, tests/test_mma_agent_focus_phase1.py:test_append_comms_has_source_tier_key, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_none_when_unset, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_set_when_current_tier_set, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_tier2]
|
||||
[C: tests/test_ai_client_concurrency.py:run_t1, tests/test_ai_client_concurrency.py:run_t2, tests/test_mma_agent_focus_phase1.py:test_append_comms_has_source_tier_key, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_none_when_unset, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_set_when_current_tier_set, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_tier2]
|
||||
"""
|
||||
entry: dict[str, Any] = {
|
||||
"ts": datetime.datetime.now().strftime("%H:%M:%S"),
|
||||
@@ -279,25 +291,25 @@ def _append_comms(direction: str, kind: str, payload: dict[str, Any]) -> None:
|
||||
|
||||
def get_comms_log() -> list[dict[str, Any]]:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._bg_task, src/app_controller.py:AppController._recalculate_session_usage, src/app_controller.py:AppController._start_track_logic, src/multi_agent_conductor.py:run_worker_lifecycle, tests/test_mma_agent_focus_phase1.py:test_append_comms_has_source_tier_key, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_none_when_unset, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_set_when_current_tier_set, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_tier2, tests/test_token_usage.py:test_token_usage_tracking]
|
||||
[C: src/app_controller.py:AppController._bg_task, src/app_controller.py:AppController._recalculate_session_usage, src/app_controller.py:AppController._start_track_logic, src/multi_agent_conductor.py:run_worker_lifecycle, tests/test_mma_agent_focus_phase1.py:test_append_comms_has_source_tier_key, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_none_when_unset, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_set_when_current_tier_set, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_tier2, tests/test_token_usage.py:test_token_usage_tracking]
|
||||
"""
|
||||
return list(_comms_log)
|
||||
|
||||
def clear_comms_log() -> None:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._handle_reset_session, src/gui_2.py:App._render_comms_history_panel, src/gui_2.py:App._show_menus, tests/test_ai_client_concurrency.py:test_ai_client_tier_isolation, tests/test_token_usage.py:test_token_usage_tracking]
|
||||
[C: src/app_controller.py:AppController._handle_reset_session, src/gui_2.py:App._render_comms_history_panel, src/gui_2.py:App._show_menus, tests/test_ai_client_concurrency.py:test_ai_client_tier_isolation, tests/test_token_usage.py:test_token_usage_tracking]
|
||||
"""
|
||||
_comms_log.clear()
|
||||
|
||||
def get_credentials_path() -> Path:
|
||||
"""
|
||||
[C: src/mcp_client.py:_is_allowed]
|
||||
[C: src/mcp_client.py:_is_allowed]
|
||||
"""
|
||||
return Path(os.environ.get("SLOP_CREDENTIALS", str(Path(__file__).parent.parent / "credentials.toml")))
|
||||
|
||||
def _load_credentials() -> dict[str, Any]:
|
||||
"""
|
||||
[C: tests/test_deepseek_infra.py:test_credentials_error_mentions_deepseek, tests/test_minimax_provider.py:test_minimax_credentials_template]
|
||||
[C: src/ai_server.py:_send_anthropic, src/ai_server.py:_send_deepseek, src/ai_server.py:_send_gemini, src/ai_server.py:_send_minimax, src/ai_server.py:handle_command, tests/test_deepseek_infra.py:test_credentials_error_mentions_deepseek, tests/test_minimax_provider.py:test_minimax_credentials_template]
|
||||
"""
|
||||
cred_path = get_credentials_path()
|
||||
try:
|
||||
@@ -433,7 +445,8 @@ def _classify_minimax_error(exc: Exception) -> ProviderError:
|
||||
|
||||
def set_provider(provider: str, model: str) -> None:
|
||||
"""
|
||||
Updates the active LLM provider and model name.
|
||||
|
||||
Updates the active LLM provider and model name.
|
||||
[C: src/app_controller.py:AppController._handle_reset_session, src/app_controller.py:AppController._init_ai_and_hooks, src/app_controller.py:AppController.current_model, src/app_controller.py:AppController.current_provider, src/app_controller.py:AppController.do_fetch, src/multi_agent_conductor.py:run_worker_lifecycle, src/orchestrator_pm.py:generate_tracks, tests/conftest.py:reset_ai_client, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_ai_client_cli.py:test_ai_client_send_gemini_cli, tests/test_api_events.py:test_send_emits_events_proper, tests/test_api_events.py:test_send_emits_tool_events, tests/test_deepseek_provider.py:test_deepseek_completion_logic, tests/test_deepseek_provider.py:test_deepseek_model_selection, tests/test_deepseek_provider.py:test_deepseek_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoner_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoning_logic, tests/test_deepseek_provider.py:test_deepseek_streaming, tests/test_deepseek_provider.py:test_deepseek_tool_calling, tests/test_gemini_cli_edge_cases.py:test_gemini_cli_loop_termination, tests/test_gemini_cli_integration.py:test_gemini_cli_full_integration, tests/test_gemini_cli_integration.py:test_gemini_cli_rejection_and_history, tests/test_gemini_cli_parity_regression.py:test_send_invokes_adapter_send, tests/test_gui2_mcp.py:test_mcp_tool_call_is_dispatched, tests/test_minimax_provider.py:test_minimax_default_model, tests/test_minimax_provider.py:test_minimax_model_selection, tests/test_mma_agent_focus_phase1.py:test_append_comms_has_source_tier_key, tests/test_rag_integration.py:test_rag_integration, tests/test_tier4_interceptor.py:test_ai_client_passes_qa_callback, tests/test_tier4_interceptor.py:test_gemini_provider_passes_qa_callback_to_run_script, tests/test_token_usage.py:test_token_usage_tracking]
|
||||
"""
|
||||
global _provider, _model
|
||||
@@ -459,14 +472,16 @@ def set_provider(provider: str, model: str) -> None:
|
||||
|
||||
def get_provider() -> str:
|
||||
"""
|
||||
Returns the current active provider name.
|
||||
|
||||
Returns the current active provider name.
|
||||
[C: src/multi_agent_conductor.py:run_worker_lifecycle]
|
||||
"""
|
||||
return _provider
|
||||
|
||||
def cleanup() -> None:
|
||||
"""
|
||||
Performs cleanup operations like deleting server-side Gemini caches.
|
||||
|
||||
Performs cleanup operations like deleting server-side Gemini caches.
|
||||
[C: src/app_controller.py:AppController.clear_cache, src/app_controller.py:AppController.shutdown, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking_cleanup, tests/test_log_registry.py:TestLogRegistry.tearDown, tests/test_project_serialization.py:TestProjectSerialization.tearDown]
|
||||
"""
|
||||
global _gemini_client, _gemini_cache, _gemini_cached_file_paths
|
||||
@@ -479,8 +494,9 @@ def cleanup() -> None:
|
||||
|
||||
def reset_session() -> None:
|
||||
"""
|
||||
Clears conversation history and resets provider-specific session state.
|
||||
[C: src/app_controller.py:AppController._handle_reset_session, src/app_controller.py:AppController.current_model, src/app_controller.py:AppController.current_provider, src/app_controller.py:AppController.init_state, src/gui_2.py:App._render_provider_panel, src/gui_2.py:App._show_menus, src/multi_agent_conductor.py:run_worker_lifecycle, tests/conftest.py:live_gui, tests/conftest.py:reset_ai_client, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_ai_client_cli.py:test_ai_client_send_gemini_cli, tests/test_api_events.py:test_send_emits_events_proper, tests/test_api_events.py:test_send_emits_tool_events, tests/test_deepseek_provider.py:test_deepseek_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoner_payload_verification, tests/test_gemini_cli_integration.py:test_gemini_cli_full_integration, tests/test_gemini_cli_integration.py:test_gemini_cli_rejection_and_history, tests/test_gemini_metrics.py:test_get_gemini_cache_stats_with_mock_client, tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_minimax_provider.py:test_minimax_history_bleed_stats, tests/test_mma_agent_focus_phase1.py:test_append_comms_has_source_tier_key, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_none_when_unset, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_set_when_current_tier_set, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_tier2, tests/test_session_logger_reset.py:test_reset_session, tests/test_token_usage.py:test_token_usage_tracking]
|
||||
|
||||
Clears conversation history and resets provider-specific session state.
|
||||
[C: src/app_controller.py:AppController._handle_reset_session, src/app_controller.py:AppController.current_model, src/app_controller.py:AppController.current_provider, src/app_controller.py:AppController.init_state, src/gui_2.py:App._render_provider_panel, src/gui_2.py:App._show_menus, src/multi_agent_conductor.py:run_worker_lifecycle, tests/conftest.py:live_gui, tests/conftest.py:reset_ai_client, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_ai_client_cli.py:test_ai_client_send_gemini_cli, tests/test_api_events.py:test_send_emits_events_proper, tests/test_api_events.py:test_send_emits_tool_events, tests/test_deepseek_provider.py:test_deepseek_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoner_payload_verification, tests/test_gemini_cli_integration.py:test_gemini_cli_full_integration, tests/test_gemini_cli_integration.py:test_gemini_cli_rejection_and_history, tests/test_gemini_metrics.py:test_get_gemini_cache_stats_with_mock_client, tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_mma_agent_focus_phase1.py:test_append_comms_has_source_tier_key, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_none_when_unset, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_set_when_current_tier_set, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_tier2, tests/test_session_logger_reset.py:test_reset_session, tests/test_token_usage.py:test_token_usage_tracking]
|
||||
"""
|
||||
global _gemini_client, _gemini_chat, _gemini_cache
|
||||
global _gemini_cache_md_hash, _gemini_cache_created_at, _gemini_cached_file_paths
|
||||
@@ -521,7 +537,7 @@ def reset_session() -> None:
|
||||
|
||||
def list_models(provider: str) -> list[str]:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController.do_fetch, tests/test_agent_capabilities.py:test_agent_capabilities_listing, tests/test_ai_client_list_models.py:test_list_models_gemini_cli, tests/test_deepseek_infra.py:test_deepseek_model_listing, tests/test_minimax_provider.py:test_minimax_list_models]
|
||||
[C: src/app_controller.py:AppController.do_fetch, tests/test_agent_capabilities.py:test_agent_capabilities_listing, tests/test_ai_client_list_models.py:test_list_models_gemini_cli, tests/test_deepseek_infra.py:test_deepseek_model_listing, tests/test_minimax_provider.py:test_minimax_list_models]
|
||||
"""
|
||||
creds = _load_credentials()
|
||||
if provider == "gemini":
|
||||
@@ -546,8 +562,9 @@ _agent_tools: dict[str, bool] = {}
|
||||
|
||||
def set_agent_tools(tools: dict[str, bool]) -> None:
|
||||
"""
|
||||
Configures which tools are enabled for the AI agent.
|
||||
[C: src/app_controller.py:AppController._handle_request_event, src/app_controller.py:AppController.generate, tests/test_agent_tools_wiring.py:test_build_anthropic_tools_conversion, tests/test_agent_tools_wiring.py:test_set_agent_tools, tests/test_tool_access_exclusion.py:test_build_anthropic_tools_excludes_disabled, tests/test_tool_access_exclusion.py:test_build_deepseek_tools_excludes_disabled, tests/test_tool_access_exclusion.py:test_gemini_tool_declaration_excludes_disabled, tests/test_tool_access_exclusion.py:test_set_agent_tools_clears_caches]
|
||||
|
||||
Configures which tools are enabled for the AI agent.
|
||||
[C: src/app_controller.py:AppController._handle_request_event, src/app_controller.py:_api_generate, tests/test_agent_tools_wiring.py:test_build_anthropic_tools_conversion, tests/test_agent_tools_wiring.py:test_set_agent_tools, tests/test_tool_access_exclusion.py:test_build_anthropic_tools_excludes_disabled, tests/test_tool_access_exclusion.py:test_build_deepseek_tools_excludes_disabled, tests/test_tool_access_exclusion.py:test_gemini_tool_declaration_excludes_disabled, tests/test_tool_access_exclusion.py:test_set_agent_tools_clears_caches]
|
||||
"""
|
||||
global _agent_tools, _CACHED_ANTHROPIC_TOOLS, _CACHED_DEEPSEEK_TOOLS
|
||||
_agent_tools = tools
|
||||
@@ -556,7 +573,8 @@ def set_agent_tools(tools: dict[str, bool]) -> None:
|
||||
|
||||
def set_tool_preset(preset_name: Optional[str]) -> None:
|
||||
"""
|
||||
Loads a tool preset and applies it via set_agent_tools.
|
||||
|
||||
Loads a tool preset and applies it via set_agent_tools.
|
||||
[C: src/app_controller.py:AppController.init_state, src/gui_2.py:App._render_persona_selector_panel, src/multi_agent_conductor.py:run_worker_lifecycle, tests/test_bias_integration.py:test_set_tool_preset_with_objects, tests/test_tool_preset_env.py:test_tool_preset_env_loading, tests/test_tool_preset_env.py:test_tool_preset_env_no_var, tests/test_tool_presets_execution.py:test_tool_ask_approval, tests/test_tool_presets_execution.py:test_tool_auto_approval, tests/test_tool_presets_execution.py:test_tool_rejection]
|
||||
"""
|
||||
global _agent_tools, _CACHED_ANTHROPIC_TOOLS, _CACHED_DEEPSEEK_TOOLS, _tool_approval_modes, _active_tool_preset
|
||||
@@ -590,7 +608,8 @@ def set_tool_preset(preset_name: Optional[str]) -> None:
|
||||
|
||||
def set_bias_profile(profile_name: Optional[str]) -> None:
|
||||
"""
|
||||
Sets the active tool bias profile for tuning model behavior.
|
||||
|
||||
Sets the active tool bias profile for tuning model behavior.
|
||||
[C: src/app_controller.py:AppController.init_state, src/gui_2.py:App._render_agent_tools_panel, src/gui_2.py:App._render_persona_selector_panel, src/multi_agent_conductor.py:run_worker_lifecycle]
|
||||
"""
|
||||
global _active_bias_profile
|
||||
@@ -613,7 +632,7 @@ def get_bias_profile() -> Optional[str]:
|
||||
|
||||
def _build_anthropic_tools() -> list[dict[str, Any]]:
|
||||
"""
|
||||
[C: tests/test_agent_tools_wiring.py:test_build_anthropic_tools_conversion, tests/test_tool_access_exclusion.py:test_build_anthropic_tools_excludes_disabled]
|
||||
[C: tests/test_agent_tools_wiring.py:test_build_anthropic_tools_conversion, tests/test_tool_access_exclusion.py:test_build_anthropic_tools_excludes_disabled]
|
||||
"""
|
||||
raw_tools: list[dict[str, Any]] = []
|
||||
for spec in mcp_client.get_tool_schemas():
|
||||
@@ -654,7 +673,7 @@ _CACHED_ANTHROPIC_TOOLS: Optional[list[dict[str, Any]]] = None
|
||||
|
||||
def _get_anthropic_tools() -> list[dict[str, Any]]:
|
||||
"""
|
||||
[C: tests/test_bias_efficacy.py:test_bias_efficacy_prompt_generation, tests/test_bias_efficacy.py:test_bias_parameter_nudging, tests/test_bias_integration.py:test_tool_declaration_biasing_anthropic]
|
||||
[C: tests/test_bias_efficacy.py:test_bias_efficacy_prompt_generation, tests/test_bias_efficacy.py:test_bias_parameter_nudging, tests/test_bias_integration.py:test_tool_declaration_biasing_anthropic]
|
||||
"""
|
||||
global _CACHED_ANTHROPIC_TOOLS
|
||||
if _CACHED_ANTHROPIC_TOOLS is None:
|
||||
@@ -663,7 +682,7 @@ def _get_anthropic_tools() -> list[dict[str, Any]]:
|
||||
|
||||
def _gemini_tool_declaration() -> Optional[types.Tool]:
|
||||
"""
|
||||
[C: tests/test_tool_access_exclusion.py:test_gemini_tool_declaration_excludes_disabled]
|
||||
[C: tests/test_tool_access_exclusion.py:test_gemini_tool_declaration_excludes_disabled]
|
||||
"""
|
||||
raw_tools: list[dict[str, Any]] = []
|
||||
for spec in mcp_client.get_tool_schemas():
|
||||
@@ -733,8 +752,9 @@ async def _execute_tool_calls_concurrently(
|
||||
) -> list[tuple[str, str, str, str]]: # tool_name, call_id, output, original_name
|
||||
"""
|
||||
|
||||
Executes multiple tool calls concurrently using asyncio.gather.
|
||||
Returns a list of (tool_name, call_id, output, original_name).
|
||||
|
||||
Executes multiple tool calls concurrently using asyncio.gather.
|
||||
Returns a list of (tool_name, call_id, output, original_name).
|
||||
[C: tests/test_async_tools.py:test_execute_tool_calls_concurrently_exception_handling, tests/test_async_tools.py:test_execute_tool_calls_concurrently_timing]
|
||||
"""
|
||||
monitor = performance_monitor.get_monitor()
|
||||
@@ -783,7 +803,7 @@ async def _execute_single_tool_call_async(
|
||||
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None
|
||||
) -> tuple[str, str, str, str]:
|
||||
"""
|
||||
[C: tests/test_external_mcp_e2e.py:test_external_mcp_e2e_refresh_and_call, tests/test_external_mcp_hitl.py:test_external_mcp_hitl_approval, tests/test_external_mcp_hitl.py:test_external_mcp_hitl_rejection, tests/test_tool_presets_execution.py:test_tool_ask_approval, tests/test_tool_presets_execution.py:test_tool_auto_approval, tests/test_tool_presets_execution.py:test_tool_rejection]
|
||||
[C: tests/test_external_mcp_e2e.py:test_external_mcp_e2e_refresh_and_call, tests/test_external_mcp_hitl.py:test_external_mcp_hitl_approval, tests/test_external_mcp_hitl.py:test_external_mcp_hitl_rejection, tests/test_tool_presets_execution.py:test_tool_ask_approval, tests/test_tool_presets_execution.py:test_tool_auto_approval, tests/test_tool_presets_execution.py:test_tool_rejection]
|
||||
"""
|
||||
if tier:
|
||||
set_current_tier(tier)
|
||||
@@ -921,7 +941,7 @@ def _build_file_diff_text(changed_items: list[dict[str, Any]]) -> str:
|
||||
|
||||
def _build_deepseek_tools() -> list[dict[str, Any]]:
|
||||
"""
|
||||
[C: tests/test_tool_access_exclusion.py:test_build_deepseek_tools_excludes_disabled]
|
||||
[C: tests/test_tool_access_exclusion.py:test_build_deepseek_tools_excludes_disabled]
|
||||
"""
|
||||
raw_tools: list[dict[str, Any]] = []
|
||||
for spec in mcp_client.get_tool_schemas():
|
||||
@@ -1061,7 +1081,7 @@ def _strip_stale_file_refreshes(history: list[dict[str, Any]]) -> None:
|
||||
|
||||
def _chunk_text(text: str, chunk_size: int) -> list[str]:
|
||||
"""
|
||||
[C: src/rag_engine.py:RAGEngine._chunk_code, src/rag_engine.py:RAGEngine.index_file]
|
||||
[C: src/rag_engine.py:RAGEngine._chunk_code, src/rag_engine.py:RAGEngine.index_file]
|
||||
"""
|
||||
return [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]
|
||||
|
||||
@@ -1178,6 +1198,9 @@ def _repair_anthropic_history(history: list[dict[str, Any]]) -> None:
|
||||
})
|
||||
|
||||
def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_items: list[dict[str, Any]] | None = None, discussion_history: str = "", pre_tool_callback: Optional[Callable[[str, str, Optional[Callable[[str], str]]], Optional[str]]] = None, qa_callback: Optional[Callable[[str], str]] = None, stream_callback: Optional[Callable[[str], None]] = None, patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
"""
|
||||
[C: src/ai_server.py:_handle_send]
|
||||
"""
|
||||
monitor = performance_monitor.get_monitor()
|
||||
if monitor.enabled: monitor.start_component("ai_client._send_anthropic")
|
||||
try:
|
||||
@@ -1358,7 +1381,7 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
|
||||
|
||||
def get_gemini_cache_stats() -> dict[str, Any]:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._recalculate_session_usage, src/app_controller.py:AppController._update_cached_stats, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_gemini_metrics.py:test_get_gemini_cache_stats_with_mock_client]
|
||||
[C: src/app_controller.py:AppController._recalculate_session_usage, src/app_controller.py:AppController._update_cached_stats, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_gemini_metrics.py:test_get_gemini_cache_stats_with_mock_client]
|
||||
"""
|
||||
_ensure_gemini_client()
|
||||
if not _gemini_client:
|
||||
@@ -1398,7 +1421,7 @@ def _list_gemini_models(api_key: str) -> list[str]:
|
||||
|
||||
def _ensure_gemini_client() -> None:
|
||||
"""
|
||||
[C: src/rag_engine.py:GeminiEmbeddingProvider.embed]
|
||||
[C: src/rag_engine.py:GeminiEmbeddingProvider.embed]
|
||||
"""
|
||||
global _gemini_client
|
||||
if _gemini_client is None:
|
||||
@@ -1424,7 +1447,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
|
||||
stream_callback: Optional[Callable[[str], None]] = None,
|
||||
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
"""
|
||||
[C: tests/test_tier4_interceptor.py:test_gemini_provider_passes_qa_callback_to_run_script]
|
||||
[C: src/ai_server.py:_handle_send, tests/test_tier4_interceptor.py:test_gemini_provider_passes_qa_callback_to_run_script]
|
||||
"""
|
||||
global _gemini_chat, _gemini_cache, _gemini_cache_md_hash, _gemini_cache_created_at, _gemini_cached_file_paths
|
||||
monitor = performance_monitor.get_monitor()
|
||||
@@ -1656,6 +1679,9 @@ def _send_gemini_cli(md_content: str, user_message: str, base_dir: str,
|
||||
qa_callback: Optional[Callable[[str], str]] = None,
|
||||
stream_callback: Optional[Callable[[str], None]] = None,
|
||||
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
"""
|
||||
[C: src/ai_server.py:_handle_send]
|
||||
"""
|
||||
global _gemini_cli_adapter
|
||||
try:
|
||||
if _gemini_cli_adapter is None:
|
||||
@@ -1802,6 +1828,9 @@ def _send_deepseek(md_content: str, user_message: str, base_dir: str,
|
||||
qa_callback: Optional[Callable[[str], str]] = None,
|
||||
stream_callback: Optional[Callable[[str], None]] = None,
|
||||
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
"""
|
||||
[C: src/ai_server.py:_handle_send]
|
||||
"""
|
||||
monitor = performance_monitor.get_monitor()
|
||||
if monitor.enabled: monitor.start_component("ai_client._send_deepseek")
|
||||
try:
|
||||
@@ -2081,6 +2110,9 @@ def _send_minimax(md_content: str, user_message: str, base_dir: str,
|
||||
qa_callback: Optional[Callable[[str], str]] = None,
|
||||
stream_callback: Optional[Callable[[str], None]] = None,
|
||||
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
"""
|
||||
[C: src/ai_server.py:_handle_send]
|
||||
"""
|
||||
try:
|
||||
mcp_client.configure(file_items or [], [base_dir])
|
||||
creds = _load_credentials()
|
||||
@@ -2297,7 +2329,6 @@ def _send_minimax(md_content: str, user_message: str, base_dir: str,
|
||||
|
||||
def run_tier4_analysis(stderr: str) -> str:
|
||||
"""
|
||||
[C: src/native_orchestrator.py:NativeOrchestrator.analyze_error]
|
||||
"""
|
||||
if not stderr or not stderr.strip():
|
||||
return ""
|
||||
@@ -2346,7 +2377,7 @@ def run_tier4_patch_callback(stderr: str, base_dir: str) -> Optional[str]:
|
||||
|
||||
def run_tier4_patch_generation(error: str, file_context: str) -> str:
|
||||
"""
|
||||
[C: src/gui_2.py:App.request_patch_from_tier4, src/native_orchestrator.py:NativeOrchestrator.run_tier4_patch, tests/test_tier4_patch_generation.py:test_run_tier4_patch_generation_calls_ai, tests/test_tier4_patch_generation.py:test_run_tier4_patch_generation_empty_error, tests/test_tier4_patch_generation.py:test_run_tier4_patch_generation_returns_diff]
|
||||
[C: src/gui_2.py:App.request_patch_from_tier4, tests/test_tier4_patch_generation.py:test_run_tier4_patch_generation_calls_ai, tests/test_tier4_patch_generation.py:test_run_tier4_patch_generation_empty_error, tests/test_tier4_patch_generation.py:test_run_tier4_patch_generation_returns_diff]
|
||||
"""
|
||||
if not error or not error.strip():
|
||||
return ""
|
||||
@@ -2375,7 +2406,7 @@ def run_tier4_patch_generation(error: str, file_context: str) -> str:
|
||||
|
||||
def get_token_stats(md_content: str) -> dict[str, Any]:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._refresh_api_metrics]
|
||||
[C: src/app_controller.py:AppController._refresh_api_metrics]
|
||||
"""
|
||||
global _provider, _gemini_client, _model, _CHARS_PER_TOKEN
|
||||
total_tokens = 0
|
||||
@@ -2424,7 +2455,7 @@ def send(
|
||||
rag_engine: Optional[Any] = None,
|
||||
) -> str:
|
||||
"""
|
||||
[C: simulation/user_agent.py:UserSimAgent.generate_response, src/api_hooks.py:WebSocketServer._handler, src/api_hooks.py:WebSocketServer.broadcast, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:AppController.generate, src/conductor_tech_lead.py:generate_tickets, src/multi_agent_conductor.py:run_worker_lifecycle, src/native_orchestrator.py:NativeOrchestrator.execute_ticket, src/orchestrator_pm.py:generate_tracks, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_ai_client_cli.py:test_ai_client_send_gemini_cli, tests/test_api_events.py:test_send_emits_events_proper, tests/test_api_events.py:test_send_emits_tool_events, tests/test_deepseek_provider.py:test_deepseek_completion_logic, tests/test_deepseek_provider.py:test_deepseek_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoner_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoning_logic, tests/test_deepseek_provider.py:test_deepseek_streaming, tests/test_deepseek_provider.py:test_deepseek_tool_calling, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_full_flow_integration, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_captures_usage_metadata, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_handles_tool_use_events, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_parses_jsonl_output, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_starts_subprocess_with_correct_args, tests/test_gemini_cli_adapter_parity.py:TestGeminiCliAdapterParity.test_send_parses_tool_calls_from_streaming_json, tests/test_gemini_cli_adapter_parity.py:TestGeminiCliAdapterParity.test_send_starts_subprocess_with_model, tests/test_gemini_cli_edge_cases.py:test_gemini_cli_context_bleed_prevention, tests/test_gemini_cli_edge_cases.py:test_gemini_cli_loop_termination, tests/test_gemini_cli_integration.py:test_gemini_cli_full_integration, tests/test_gemini_cli_integration.py:test_gemini_cli_rejection_and_history, tests/test_gemini_cli_parity_regression.py:test_get_history_bleed_stats, tests/test_gemini_cli_parity_regression.py:test_send_invokes_adapter_send, tests/test_gui2_mcp.py:test_mcp_tool_call_is_dispatched, tests/test_tier4_interceptor.py:test_ai_client_passes_qa_callback, tests/test_token_usage.py:test_token_usage_tracking, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
[C: simulation/user_agent.py:UserSimAgent.generate_response, src/api_hooks.py:WebSocketServer._handler, src/api_hooks.py:WebSocketServer.broadcast, src/app_controller.py:AppController._handle_request_event, src/app_controller.py:_api_generate, src/conductor_tech_lead.py:generate_tickets, src/multi_agent_conductor.py:run_worker_lifecycle, src/orchestrator_pm.py:generate_tracks, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_ai_client_cli.py:test_ai_client_send_gemini_cli, tests/test_api_events.py:test_send_emits_events_proper, tests/test_api_events.py:test_send_emits_tool_events, tests/test_deepseek_provider.py:test_deepseek_completion_logic, tests/test_deepseek_provider.py:test_deepseek_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoner_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoning_logic, tests/test_deepseek_provider.py:test_deepseek_streaming, tests/test_deepseek_provider.py:test_deepseek_tool_calling, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_full_flow_integration, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_captures_usage_metadata, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_handles_tool_use_events, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_parses_jsonl_output, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_starts_subprocess_with_correct_args, tests/test_gemini_cli_adapter_parity.py:TestGeminiCliAdapterParity.test_send_parses_tool_calls_from_streaming_json, tests/test_gemini_cli_adapter_parity.py:TestGeminiCliAdapterParity.test_send_starts_subprocess_with_model, tests/test_gemini_cli_edge_cases.py:test_gemini_cli_context_bleed_prevention, tests/test_gemini_cli_edge_cases.py:test_gemini_cli_loop_termination, tests/test_gemini_cli_integration.py:test_gemini_cli_full_integration, tests/test_gemini_cli_integration.py:test_gemini_cli_rejection_and_history, tests/test_gemini_cli_parity_regression.py:test_send_invokes_adapter_send, tests/test_gui2_mcp.py:test_mcp_tool_call_is_dispatched, tests/test_tier4_interceptor.py:test_ai_client_passes_qa_callback, tests/test_token_usage.py:test_token_usage_tracking, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
monitor = performance_monitor.get_monitor()
|
||||
if monitor.enabled: monitor.start_component("ai_client.send")
|
||||
@@ -2473,7 +2504,7 @@ def send(
|
||||
|
||||
def _add_bleed_derived(d: dict[str, Any], sys_tok: int = 0, tool_tok: int = 0) -> dict[str, Any]:
|
||||
"""
|
||||
[C: tests/test_token_viz.py:test_add_bleed_derived_aliases, tests/test_token_viz.py:test_add_bleed_derived_breakdown, tests/test_token_viz.py:test_add_bleed_derived_headroom, tests/test_token_viz.py:test_add_bleed_derived_headroom_clamped_to_zero, tests/test_token_viz.py:test_add_bleed_derived_history_clamped_to_zero, tests/test_token_viz.py:test_add_bleed_derived_would_trim_false, tests/test_token_viz.py:test_add_bleed_derived_would_trim_true, tests/test_token_viz.py:test_would_trim_boundary_exact, tests/test_token_viz.py:test_would_trim_just_above_threshold, tests/test_token_viz.py:test_would_trim_just_below_threshold]
|
||||
[C: tests/test_token_viz.py:test_add_bleed_derived_aliases, tests/test_token_viz.py:test_add_bleed_derived_breakdown, tests/test_token_viz.py:test_add_bleed_derived_headroom, tests/test_token_viz.py:test_add_bleed_derived_headroom_clamped_to_zero, tests/test_token_viz.py:test_add_bleed_derived_history_clamped_to_zero, tests/test_token_viz.py:test_add_bleed_derived_would_trim_false, tests/test_token_viz.py:test_add_bleed_derived_would_trim_true, tests/test_token_viz.py:test_would_trim_boundary_exact, tests/test_token_viz.py:test_would_trim_just_above_threshold, tests/test_token_viz.py:test_would_trim_just_below_threshold]
|
||||
"""
|
||||
cur = d.get("current", 0)
|
||||
lim = d.get("limit", 0)
|
||||
@@ -2500,7 +2531,7 @@ if os.environ.get("SLOP_TOOL_PRESET"):
|
||||
|
||||
def run_subagent_summarization(file_path: str, content: str, is_code: bool, outline: str) -> str:
|
||||
"""
|
||||
Performs a stateless summarization request using a sub-agent prompt. [C: src/summarize.py:summarise_file, tests/test_subagent_summarization.py:test_run_subagent_summarization_anthropic, tests/test_subagent_summarization.py:test_run_subagent_summarization_gemini]
|
||||
[C: src/summarize.py:summarise_file, tests/test_subagent_summarization.py:test_run_subagent_summarization_anthropic, tests/test_subagent_summarization.py:test_run_subagent_summarization_gemini]
|
||||
"""
|
||||
prompt_tmpl = mma_prompts.TIER4_SUMMARIZE_CODE_PROMPT if is_code else mma_prompts.TIER4_SUMMARIZE_TEXT_PROMPT
|
||||
prompt = prompt_tmpl.format(file_path=file_path, outline=outline, content=content)
|
||||
@@ -2549,4 +2580,4 @@ def run_subagent_summarization(file_path: str, content: str, is_code: bool, outl
|
||||
return resp_data.get("text", "")
|
||||
return "ERROR: Unsupported provider for sub-agent summarization"
|
||||
|
||||
#endregion: Subagent Summarization
|
||||
#endregion: Subagent Summarization
|
||||
+88
-51
@@ -39,14 +39,15 @@ from typing import Any
|
||||
class ApiHookClient:
|
||||
def __init__(self, base_url: str = "http://127.0.0.1:8999", api_key: str | None = None):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.base_url = base_url.rstrip('/')
|
||||
self.api_key = api_key
|
||||
|
||||
def _make_request(self, method: str, path: str, data: dict | None = None, timeout: float = 5.0) -> dict[str, Any] | None:
|
||||
"""
|
||||
Helper to make HTTP requests to the hook server.
|
||||
|
||||
Helper to make HTTP requests to the hook server.
|
||||
[C: tests/test_api_hook_client.py:test_unsupported_method_error]
|
||||
"""
|
||||
url = f"{self.base_url}{path}"
|
||||
@@ -71,8 +72,9 @@ class ApiHookClient:
|
||||
|
||||
def wait_for_server(self, timeout: int = 15) -> bool:
|
||||
"""
|
||||
Polls the health endpoint until the server responds or timeout occurs.
|
||||
[C: simulation/live_walkthrough.py:main, simulation/ping_pong.py:main, simulation/sim_base.py:BaseSimulation.setup, tests/smoke_status_hook.py:test_status_hook, tests/test_ai_settings_layout.py:test_change_provider_via_hook, tests/test_ai_settings_layout.py:test_set_params_via_custom_callback, tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_conductor_api_hook_integration.py:test_conductor_integrates_api_hook_client_for_verification, tests/test_deepseek_infra.py:test_gui_provider_list_via_hooks, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:test_button_click_is_received, tests/test_external_editor_gui.py:test_patch_modal_shows_with_configured_editor, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_gui2_parity.py:test_gui2_custom_callback_hook_works, tests/test_gui2_parity.py:test_gui2_set_value_hook_works, tests/test_gui_context_presets.py:test_gui_context_preset_save_load, tests/test_hooks.py:test_live_hook_server_responses, tests/test_live_workflow.py:test_full_live_workflow, tests/test_mma_concurrent_tracks_sim.py:test_mma_concurrent_tracks_execution, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow, tests/test_patch_modal_gui.py:test_patch_apply_modal_workflow, tests/test_patch_modal_gui.py:test_patch_modal_appears_on_trigger, tests/test_preset_windows_layout.py:test_api_hook_under_load, tests/test_preset_windows_layout.py:test_preset_windows_opening, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_rag_visual_sim.py:test_rag_settings_persistence_sim, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_tool_management_layout.py:test_tool_management_gettable_fields, tests/test_tool_management_layout.py:test_tool_management_state_updates, tests/test_ui_cache_controls_sim.py:test_ui_cache_controls, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_visual_orchestration.py:test_mma_epic_lifecycle, tests/test_visual_sim_gui_ux.py:test_gui_track_creation, tests/test_visual_sim_gui_ux.py:test_gui_ux_event_routing, tests/test_visual_sim_mma_v2.py:test_mma_complete_lifecycle, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
|
||||
Polls the health endpoint until the server responds or timeout occurs.
|
||||
[C: simulation/live_walkthrough.py:main, simulation/ping_pong.py:main, simulation/sim_base.py:BaseSimulation.setup, tests/smoke_status_hook.py:test_status_hook, tests/test_ai_settings_layout.py:test_change_provider_via_hook, tests/test_ai_settings_layout.py:test_set_params_via_custom_callback, tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_conductor_api_hook_integration.py:test_conductor_integrates_api_hook_client_for_verification, tests/test_deepseek_infra.py:test_gui_provider_list_via_hooks, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:test_button_click_is_received, tests/test_external_editor_gui.py:test_patch_modal_shows_with_configured_editor, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_gui2_parity.py:test_gui2_custom_callback_hook_works, tests/test_gui2_parity.py:test_gui2_set_value_hook_works, tests/test_gui_context_presets.py:test_gui_context_preset_save_load, tests/test_hooks.py:test_live_hook_server_responses, tests/test_live_workflow.py:test_full_live_workflow, tests/test_mma_concurrent_tracks_sim.py:test_mma_concurrent_tracks_execution, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow, tests/test_patch_modal_gui.py:test_patch_apply_modal_workflow, tests/test_patch_modal_gui.py:test_patch_modal_appears_on_trigger, tests/test_phase6_simulation.py:test_ast_inspector_modal_opens, tests/test_phase6_simulation.py:test_batch_operations_shift_click, tests/test_phase6_simulation.py:test_slice_editor_add_remove, tests/test_preset_windows_layout.py:test_api_hook_under_load, tests/test_preset_windows_layout.py:test_preset_windows_opening, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_rag_visual_sim.py:test_rag_settings_persistence_sim, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_tool_management_layout.py:test_tool_management_gettable_fields, tests/test_tool_management_layout.py:test_tool_management_state_updates, tests/test_ui_cache_controls_sim.py:test_ui_cache_controls, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_visual_mma.py:test_visual_mma_components, tests/test_visual_orchestration.py:test_mma_epic_lifecycle, tests/test_visual_sim_gui_ux.py:test_gui_track_creation, tests/test_visual_sim_gui_ux.py:test_gui_ux_event_routing, tests/test_visual_sim_mma_v2.py:test_mma_complete_lifecycle, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
"""
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
@@ -84,8 +86,9 @@ class ApiHookClient:
|
||||
|
||||
def get_status(self) -> dict[str, Any]:
|
||||
"""
|
||||
Checks the health of the hook server.
|
||||
[C: tests/test_api_hook_client.py:test_get_status_success, tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_hooks.py:test_live_hook_server_responses, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_preset_windows_layout.py:make_request, tests/test_preset_windows_layout.py:test_preset_windows_opening, tests/test_ui_cache_controls_sim.py:test_ui_cache_controls]
|
||||
|
||||
Checks the health of the hook server.
|
||||
[C: tests/test_api_hook_client.py:test_get_status_success, tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_hooks.py:test_live_hook_server_responses, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_phase6_simulation.py:test_ast_inspector_modal_opens, tests/test_phase6_simulation.py:test_batch_operations_shift_click, tests/test_phase6_simulation.py:test_slice_editor_add_remove, tests/test_preset_windows_layout.py:make_request, tests/test_preset_windows_layout.py:test_preset_windows_opening, tests/test_ui_cache_controls_sim.py:test_ui_cache_controls]
|
||||
"""
|
||||
res = self._make_request('GET', '/status')
|
||||
if res is None:
|
||||
@@ -96,28 +99,32 @@ class ApiHookClient:
|
||||
|
||||
def post_project(self, project_data: dict) -> dict[str, Any]:
|
||||
"""
|
||||
Updates the current project configuration.
|
||||
|
||||
Updates the current project configuration.
|
||||
[C: simulation/sim_context.py:ContextSimulation.run]
|
||||
"""
|
||||
return self._make_request('POST', '/api/project', data=project_data) or {}
|
||||
|
||||
def get_project(self) -> dict[str, Any]:
|
||||
"""
|
||||
Retrieves the current project state.
|
||||
|
||||
Retrieves the current project state.
|
||||
[C: simulation/sim_context.py:ContextSimulation.run, tests/test_api_hook_client.py:test_get_project_success, tests/test_gui_context_presets.py:test_gui_context_preset_save_load, tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_hooks.py:test_live_hook_server_responses, tests/test_live_workflow.py:test_full_live_workflow]
|
||||
"""
|
||||
return self._make_request('GET', '/api/project') or {}
|
||||
|
||||
def get_session(self) -> dict[str, Any]:
|
||||
"""
|
||||
Retrieves the current discussion session history.
|
||||
|
||||
Retrieves the current discussion session history.
|
||||
[C: simulation/ping_pong.py:main, simulation/sim_context.py:ContextSimulation.run, simulation/sim_execution.py:ExecutionSimulation.run, simulation/sim_tools.py:ToolsSimulation.run, simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async, simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response, tests/test_api_hook_client.py:test_get_session_success, tests/test_gui_stress_performance.py:test_comms_volume_stress_performance, tests/test_live_workflow.py:test_full_live_workflow, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim]
|
||||
"""
|
||||
return self._make_request('GET', '/api/session') or {}
|
||||
|
||||
def post_session(self, session_entries: list[dict]) -> dict[str, Any]:
|
||||
"""
|
||||
Updates the session history.
|
||||
|
||||
Updates the session history.
|
||||
[C: tests/test_gui_stress_performance.py:test_comms_volume_stress_performance, tests/test_live_workflow.py:test_full_live_workflow]
|
||||
"""
|
||||
return self._make_request('POST', '/api/session', data={"session": {"entries": session_entries}}) or {}
|
||||
@@ -129,7 +136,8 @@ class ApiHookClient:
|
||||
|
||||
def clear_events(self) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Retrieves and clears the event queue.
|
||||
|
||||
Retrieves and clears the event queue.
|
||||
[C: simulation/sim_base.py:BaseSimulation.setup]
|
||||
"""
|
||||
return self.get_events()
|
||||
@@ -137,7 +145,7 @@ class ApiHookClient:
|
||||
|
||||
def wait_for_event(self, event_type: str, timeout: int = 5) -> dict[str, Any] | None:
|
||||
"""
|
||||
[C: simulation/sim_base.py:BaseSimulation.wait_for_event, simulation/sim_execution.py:ExecutionSimulation.run, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
[C: simulation/sim_base.py:BaseSimulation.wait_for_event, simulation/sim_execution.py:ExecutionSimulation.run, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
"""
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
@@ -150,69 +158,81 @@ class ApiHookClient:
|
||||
|
||||
def post_gui(self, payload: dict) -> dict[str, Any]:
|
||||
"""
|
||||
Pushes an event to the GUI's AsyncEventQueue via the /api/gui endpoint.
|
||||
[C: tests/test_ai_settings_layout.py:test_set_params_via_custom_callback, tests/test_api_hook_client.py:test_post_gui_success, tests/test_gui2_parity.py:test_gui2_custom_callback_hook_works, tests/test_gui2_parity.py:test_gui2_set_value_hook_works, tests/test_visual_mma.py:test_visual_mma_components]
|
||||
|
||||
Pushes an event to the GUI's AsyncEventQueue via the /api/gui endpoint.
|
||||
[C: tests/test_ai_settings_layout.py:test_set_params_via_custom_callback, tests/test_api_hook_client.py:test_post_gui_success, tests/test_gui2_parity.py:test_gui2_custom_callback_hook_works, tests/test_gui2_parity.py:test_gui2_set_value_hook_works]
|
||||
"""
|
||||
return self._make_request('POST', '/api/gui', data=payload) or {}
|
||||
|
||||
def push_event(self, action: str, payload: dict) -> dict[str, Any]:
|
||||
"""
|
||||
Convenience to push a GUI task.
|
||||
[C: tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_auto_switch_sim.py:trigger_tier, tests/test_external_editor_gui.py:test_button_click_is_received, tests/test_external_editor_gui.py:test_patch_modal_shows_with_configured_editor, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_context_presets.py:test_gui_context_preset_save_load, tests/test_gui_text_viewer.py:test_text_viewer_state_update, tests/test_patch_modal_gui.py:test_patch_apply_modal_workflow, tests/test_patch_modal_gui.py:test_patch_modal_appears_on_trigger, tests/test_preset_windows_layout.py:test_preset_windows_opening, tests/test_saved_presets_sim.py:test_preset_manager_modal, tests/test_saved_presets_sim.py:test_preset_switching, tests/test_tool_management_layout.py:test_tool_management_state_updates, tests/test_tool_presets_sim.py:test_tool_preset_switching, tests/test_visual_mma.py:test_visual_mma_components, tests/test_visual_sim_gui_ux.py:test_gui_ux_event_routing, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
|
||||
Convenience to push a GUI task.
|
||||
[C: tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_auto_switch_sim.py:trigger_tier, tests/test_external_editor_gui.py:test_button_click_is_received, tests/test_external_editor_gui.py:test_patch_modal_shows_with_configured_editor, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_context_presets.py:test_gui_context_preset_save_load, tests/test_gui_text_viewer.py:test_text_viewer_state_update, tests/test_patch_modal_gui.py:test_patch_apply_modal_workflow, tests/test_patch_modal_gui.py:test_patch_modal_appears_on_trigger, tests/test_preset_windows_layout.py:test_preset_windows_opening, tests/test_saved_presets_sim.py:test_preset_manager_modal, tests/test_saved_presets_sim.py:test_preset_switching, tests/test_tool_management_layout.py:test_tool_management_state_updates, tests/test_tool_presets_sim.py:test_tool_preset_switching, tests/test_visual_mma.py:test_visual_mma_components, tests/test_visual_sim_gui_ux.py:test_gui_track_creation, tests/test_visual_sim_gui_ux.py:test_gui_ux_event_routing, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
"""
|
||||
return self.post_gui({"action": action, **payload})
|
||||
|
||||
def click(self, item: str, user_data: Any = None) -> dict[str, Any]:
|
||||
"""
|
||||
Simulates a button click.
|
||||
[C: simulation/live_walkthrough.py:main, simulation/ping_pong.py:main, simulation/sim_base.py:BaseSimulation.setup, simulation/sim_context.py:ContextSimulation.run, simulation/sim_execution.py:ExecutionSimulation.run, simulation/workflow_sim.py:WorkflowSimulator.create_discussion, simulation/workflow_sim.py:WorkflowSimulator.load_prior_log, simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async, simulation/workflow_sim.py:WorkflowSimulator.setup_new_project, simulation/workflow_sim.py:WorkflowSimulator.truncate_history, simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response, tests/test_external_editor_gui.py:test_button_click_is_received, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_live_workflow.py:test_full_live_workflow, tests/test_mma_concurrent_tracks_sim.py:test_mma_concurrent_tracks_execution, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_ui_cache_controls_sim.py:test_ui_cache_controls, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_visual_orchestration.py:test_mma_epic_lifecycle, tests/test_visual_sim_gui_ux.py:test_gui_track_creation, tests/test_visual_sim_mma_v2.py:_drain_approvals, tests/test_visual_sim_mma_v2.py:test_mma_complete_lifecycle, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
|
||||
Simulates a button click.
|
||||
[C: simulation/live_walkthrough.py:main, simulation/ping_pong.py:main, simulation/sim_base.py:BaseSimulation.setup, simulation/sim_context.py:ContextSimulation.run, simulation/sim_execution.py:ExecutionSimulation.run, simulation/workflow_sim.py:WorkflowSimulator.create_discussion, simulation/workflow_sim.py:WorkflowSimulator.load_prior_log, simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async, simulation/workflow_sim.py:WorkflowSimulator.setup_new_project, simulation/workflow_sim.py:WorkflowSimulator.truncate_history, simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response, tests/test_external_editor_gui.py:test_button_click_is_received, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_gui_text_viewer.py:test_text_viewer_state_update, tests/test_live_workflow.py:test_full_live_workflow, tests/test_mma_concurrent_tracks_sim.py:test_mma_concurrent_tracks_execution, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_saved_presets_sim.py:test_preset_manager_modal, tests/test_saved_presets_sim.py:test_preset_switching, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_ui_cache_controls_sim.py:test_ui_cache_controls, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_visual_mma.py:test_visual_mma_components, tests/test_visual_orchestration.py:test_mma_epic_lifecycle, tests/test_visual_sim_gui_ux.py:test_gui_track_creation, tests/test_visual_sim_gui_ux.py:test_gui_ux_event_routing, tests/test_visual_sim_mma_v2.py:_drain_approvals, tests/test_visual_sim_mma_v2.py:test_mma_complete_lifecycle, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
"""
|
||||
return self.post_gui({"action": "click", "item": item, "user_data": user_data})
|
||||
|
||||
def set_value(self, item: str, value: Any) -> dict[str, Any]:
|
||||
"""
|
||||
Sets the value of a GUI widget.
|
||||
[C: simulation/live_walkthrough.py:main, simulation/ping_pong.py:main, simulation/sim_ai_settings.py:AISettingsSimulation.run, simulation/sim_base.py:BaseSimulation.setup, simulation/workflow_sim.py:WorkflowSimulator.create_discussion, simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async, simulation/workflow_sim.py:WorkflowSimulator.setup_new_project, simulation/workflow_sim.py:WorkflowSimulator.truncate_history, tests/smoke_status_hook.py:test_status_hook, tests/test_ai_settings_layout.py:test_change_provider_via_hook, tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_deepseek_infra.py:test_gui_provider_list_via_hooks, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_gui2_performance.py:test_performance_benchmarking, tests/test_live_gui_integration_v2.py:test_api_gui_state_live, tests/test_live_workflow.py:test_full_live_workflow, tests/test_mma_concurrent_tracks_sim.py:test_mma_concurrent_tracks_execution, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_rag_visual_sim.py:test_rag_settings_persistence_sim, tests/test_saved_presets_sim.py:test_preset_manager_modal, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_task_dag_popout_sim.py:test_task_dag_popout, tests/test_tool_presets_sim.py:test_tool_preset_switching, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_usage_analytics_popout_sim.py:test_usage_analytics_popout, tests/test_visual_orchestration.py:test_mma_epic_lifecycle, tests/test_visual_sim_gui_ux.py:test_gui_track_creation, tests/test_visual_sim_mma_v2.py:test_mma_complete_lifecycle, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
|
||||
Sets the value of a GUI widget.
|
||||
[C: simulation/live_walkthrough.py:main, simulation/ping_pong.py:main, simulation/sim_ai_settings.py:AISettingsSimulation.run, simulation/sim_base.py:BaseSimulation.setup, simulation/workflow_sim.py:WorkflowSimulator.create_discussion, simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async, simulation/workflow_sim.py:WorkflowSimulator.setup_new_project, simulation/workflow_sim.py:WorkflowSimulator.truncate_history, tests/smoke_status_hook.py:test_status_hook, tests/test_ai_settings_layout.py:test_change_provider_via_hook, tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_deepseek_infra.py:test_gui_provider_list_via_hooks, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_gui2_performance.py:test_performance_benchmarking, tests/test_live_gui_integration_v2.py:test_api_gui_state_live, tests/test_live_workflow.py:test_full_live_workflow, tests/test_mma_concurrent_tracks_sim.py:test_mma_concurrent_tracks_execution, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_rag_visual_sim.py:test_rag_settings_persistence_sim, tests/test_saved_presets_sim.py:test_preset_manager_modal, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_task_dag_popout_sim.py:test_task_dag_popout, tests/test_tool_presets_sim.py:test_tool_preset_switching, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_usage_analytics_popout_sim.py:test_usage_analytics_popout, tests/test_visual_mma.py:test_visual_mma_components, tests/test_visual_orchestration.py:test_mma_epic_lifecycle, tests/test_visual_sim_mma_v2.py:test_mma_complete_lifecycle, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration, tests/test_z_negative_flows.py:test_mock_error_result, tests/test_z_negative_flows.py:test_mock_malformed_json, tests/test_z_negative_flows.py:test_mock_timeout]
|
||||
"""
|
||||
return self.post_gui({"action": "set_value", "item": item, "value": value})
|
||||
|
||||
def select_tab(self, item: str, value: str) -> dict[str, Any]:
|
||||
"""
|
||||
Selects a specific tab in a tab bar.
|
||||
|
||||
Selects a specific tab in a tab bar.
|
||||
[C: simulation/live_walkthrough.py:main, tests/test_api_hook_extensions.py:test_select_tab_integration]
|
||||
"""
|
||||
return self.set_value(item, value)
|
||||
|
||||
def select_list_item(self, item: str, value: str) -> dict[str, Any]:
|
||||
"""
|
||||
Selects an item in a listbox or combo.
|
||||
|
||||
Selects an item in a listbox or combo.
|
||||
[C: simulation/workflow_sim.py:WorkflowSimulator.create_discussion, simulation/workflow_sim.py:WorkflowSimulator.switch_discussion, tests/test_api_hook_extensions.py:test_select_list_item_integration, tests/test_live_workflow.py:test_full_live_workflow]
|
||||
"""
|
||||
return self.set_value(item, value)
|
||||
|
||||
def drag(self, src_item: str, dst_item: str) -> dict[str, Any]:
|
||||
"""
|
||||
Simulates a drag and drop operation.
|
||||
|
||||
Simulates a drag and drop operation.
|
||||
[C: tests/test_api_hook_client.py:test_drag_success]
|
||||
"""
|
||||
return self.push_event("drag", {"src_item": src_item, "dst_item": dst_item})
|
||||
|
||||
def right_click(self, item: str) -> dict[str, Any]:
|
||||
"""
|
||||
Simulates a right-click on an item.
|
||||
|
||||
Simulates a right-click on an item.
|
||||
[C: tests/test_api_hook_client.py:test_right_click_success]
|
||||
"""
|
||||
return self.push_event("right_click", {"item": item})
|
||||
|
||||
def get_gui_state(self) -> dict[str, Any]:
|
||||
"""
|
||||
Returns the full GUI state available via the hook API.
|
||||
[C: tests/test_ai_settings_layout.py:test_change_provider_via_hook, tests/test_ai_settings_layout.py:test_set_params_via_custom_callback, tests/test_conductor_api_hook_integration.py:simulate_conductor_phase_completion, tests/test_external_editor_gui.py:test_button_click_is_received, tests/test_external_editor_gui.py:test_patch_modal_shows_with_configured_editor, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_text_viewer.py:test_text_viewer_state_update, tests/test_hooks.py:test_live_hook_server_responses, tests/test_live_gui_integration_v2.py:test_api_gui_state_live, tests/test_live_workflow.py:test_full_live_workflow, tests/test_live_workflow.py:wait_for_value, tests/test_patch_modal_gui.py:test_patch_apply_modal_workflow, tests/test_patch_modal_gui.py:test_patch_modal_appears_on_trigger, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_saved_presets_sim.py:test_preset_manager_modal, tests/test_saved_presets_sim.py:test_preset_switching, tests/test_task_dag_popout_sim.py:test_task_dag_popout, tests/test_tool_management_layout.py:test_tool_management_gettable_fields, tests/test_tool_management_layout.py:test_tool_management_state_updates, tests/test_tool_presets_sim.py:test_tool_preset_switching, tests/test_usage_analytics_popout_sim.py:test_usage_analytics_popout]
|
||||
|
||||
Returns the full GUI state available via the hook API.
|
||||
[C: tests/test_ai_settings_layout.py:test_change_provider_via_hook, tests/test_ai_settings_layout.py:test_set_params_via_custom_callback, tests/test_conductor_api_hook_integration.py:simulate_conductor_phase_completion, tests/test_external_editor_gui.py:test_button_click_is_received, tests/test_external_editor_gui.py:test_patch_modal_shows_with_configured_editor, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_text_viewer.py:test_text_viewer_state_update, tests/test_hooks.py:test_live_hook_server_responses, tests/test_live_gui_integration_v2.py:test_api_gui_state_live, tests/test_live_workflow.py:test_full_live_workflow, tests/test_live_workflow.py:wait_for_value, tests/test_patch_modal_gui.py:test_patch_apply_modal_workflow, tests/test_patch_modal_gui.py:test_patch_modal_appears_on_trigger, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_saved_presets_sim.py:test_preset_manager_modal, tests/test_saved_presets_sim.py:test_preset_switching, tests/test_task_dag_popout_sim.py:test_task_dag_popout, tests/test_tool_management_layout.py:test_tool_management_gettable_fields, tests/test_tool_management_layout.py:test_tool_management_state_updates, tests/test_tool_presets_sim.py:test_tool_preset_switching, tests/test_usage_analytics_popout_sim.py:test_usage_analytics_popout, tests/test_visual_mma.py:test_visual_mma_components]
|
||||
"""
|
||||
return self._make_request('GET', '/api/gui/state') or {}
|
||||
|
||||
def get_value(self, item: str) -> Any:
|
||||
"""
|
||||
Gets the value of a GUI item via its mapped field.
|
||||
[C: simulation/sim_ai_settings.py:AISettingsSimulation.run, simulation/sim_base.py:BaseSimulation.get_value, simulation/sim_base.py:BaseSimulation.setup, simulation/sim_base.py:BaseSimulation.wait_for_element, simulation/sim_context.py:ContextSimulation.run, simulation/sim_execution.py:ExecutionSimulation.run, simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async, simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response, tests/smoke_status_hook.py:test_status_hook, tests/smoke_status_hook.py:wait_for_value, tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_deepseek_infra.py:test_gui_provider_list_via_hooks, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_gui2_parity.py:test_gui2_set_value_hook_works, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_rag_visual_sim.py:test_rag_settings_persistence_sim, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_visual_mma.py:test_visual_mma_components, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration]
|
||||
|
||||
Gets the value of a GUI item via its mapped field.
|
||||
[C: simulation/sim_ai_settings.py:AISettingsSimulation.run, simulation/sim_base.py:BaseSimulation.get_value, simulation/sim_base.py:BaseSimulation.setup, simulation/sim_base.py:BaseSimulation.wait_for_element, simulation/sim_context.py:ContextSimulation.run, simulation/sim_execution.py:ExecutionSimulation.run, simulation/workflow_sim.py:WorkflowSimulator.run_discussion_turn_async, simulation/workflow_sim.py:WorkflowSimulator.wait_for_ai_response, tests/smoke_status_hook.py:test_status_hook, tests/smoke_status_hook.py:wait_for_value, tests/test_auto_switch_sim.py:test_auto_switch_sim, tests/test_deepseek_infra.py:test_gui_provider_list_via_hooks, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_gui2_parity.py:test_gui2_click_hook_works, tests/test_gui2_parity.py:test_gui2_set_value_hook_works, tests/test_rag_phase4_final_verify.py:test_phase4_final_verify, tests/test_rag_phase4_stress.py:test_rag_large_codebase_verification_sim, tests/test_rag_visual_sim.py:test_rag_full_lifecycle_sim, tests/test_rag_visual_sim.py:test_rag_settings_persistence_sim, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_system_prompt_sim.py:test_system_prompt_sim, tests/test_undo_redo_sim.py:test_undo_redo_context_mutation, tests/test_undo_redo_sim.py:test_undo_redo_discussion_mutation, tests/test_undo_redo_sim.py:test_undo_redo_lifecycle, tests/test_workspace_profiles_sim.py:test_workspace_profiles_restoration]
|
||||
"""
|
||||
# Try state endpoint first (new preferred way)
|
||||
state = self.get_gui_state()
|
||||
@@ -235,7 +255,8 @@ class ApiHookClient:
|
||||
|
||||
def get_text_value(self, item_tag: str) -> str | None:
|
||||
"""
|
||||
Wraps get_value and returns its string representation, or None.
|
||||
|
||||
Wraps get_value and returns its string representation, or None.
|
||||
[C: tests/test_api_hook_client.py:test_get_text_value]
|
||||
"""
|
||||
val = self.get_value(item_tag)
|
||||
@@ -243,7 +264,8 @@ class ApiHookClient:
|
||||
|
||||
def get_indicator_state(self, item_tag: str) -> dict[str, bool]:
|
||||
"""
|
||||
Returns the visibility/active state of a status indicator.
|
||||
|
||||
Returns the visibility/active state of a status indicator.
|
||||
[C: simulation/live_walkthrough.py:main, tests/test_api_hook_extensions.py:test_get_indicator_state_integration, tests/test_live_workflow.py:test_full_live_workflow]
|
||||
"""
|
||||
val = self.get_value(item_tag)
|
||||
@@ -251,35 +273,40 @@ class ApiHookClient:
|
||||
|
||||
def get_gui_diagnostics(self) -> dict[str, Any]:
|
||||
"""
|
||||
Retrieves performance and diagnostic metrics.
|
||||
[C: tests/test_api_hook_client.py:test_get_performance_success, tests/test_hooks.py:test_live_hook_server_responses, tests/test_selectable_ui.py:test_selectable_label_stability]
|
||||
|
||||
Retrieves performance and diagnostic metrics.
|
||||
[C: tests/test_api_hook_client.py:test_get_performance_success, tests/test_hooks.py:test_live_hook_server_responses, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_visual_sim_gui_ux.py:test_gui_ux_event_routing]
|
||||
"""
|
||||
return self._make_request('GET', '/api/gui/diagnostics') or {}
|
||||
|
||||
def get_performance(self) -> dict[str, Any]:
|
||||
"""
|
||||
Retrieves performance metrics from the dedicated endpoint.
|
||||
[C: tests/test_gui2_performance.py:test_performance_benchmarking, tests/test_gui_performance_requirements.py:test_idle_performance_requirements, tests/test_gui_stress_performance.py:test_comms_volume_stress_performance, tests/test_selectable_ui.py:test_selectable_label_stability, tests/test_visual_sim_gui_ux.py:test_gui_ux_event_routing]
|
||||
|
||||
Retrieves performance metrics from the dedicated endpoint.
|
||||
[C: tests/test_gui2_performance.py:test_performance_benchmarking, tests/test_gui_performance_requirements.py:test_idle_performance_requirements, tests/test_gui_stress_performance.py:test_comms_volume_stress_performance, tests/test_selectable_ui.py:test_selectable_label_stability]
|
||||
"""
|
||||
return self._make_request('GET', '/api/performance') or {}
|
||||
|
||||
def get_mma_status(self) -> dict[str, Any]:
|
||||
"""
|
||||
Retrieves the dedicated MMA engine status.
|
||||
[C: tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_live_workflow.py:test_full_live_workflow, tests/test_mma_concurrent_tracks_sim.py:_poll_mma_status, tests/test_mma_concurrent_tracks_sim.py:test_mma_concurrent_tracks_execution, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_mma_step_mode_sim.py:_poll_mma_status, tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow, tests/test_visual_orchestration.py:test_mma_epic_lifecycle, tests/test_visual_sim_gui_ux.py:test_gui_ux_event_routing, tests/test_visual_sim_mma_v2.py:_poll]
|
||||
|
||||
Retrieves the dedicated MMA engine status.
|
||||
[C: tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_live_workflow.py:test_full_live_workflow, tests/test_mma_concurrent_tracks_sim.py:_poll_mma_status, tests/test_mma_concurrent_tracks_sim.py:test_mma_concurrent_tracks_execution, tests/test_mma_concurrent_tracks_stress_sim.py:test_mma_concurrent_tracks_stress, tests/test_mma_step_mode_sim.py:_poll_mma_status, tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow, tests/test_visual_mma.py:test_visual_mma_components, tests/test_visual_orchestration.py:test_mma_epic_lifecycle, tests/test_visual_sim_gui_ux.py:test_gui_ux_event_routing, tests/test_visual_sim_mma_v2.py:_poll]
|
||||
"""
|
||||
return self._make_request('GET', '/api/gui/mma_status') or {}
|
||||
|
||||
def get_mma_workers(self) -> dict[str, Any]:
|
||||
"""
|
||||
Retrieves status for all active MMA workers.
|
||||
|
||||
Retrieves status for all active MMA workers.
|
||||
[C: tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_mma_concurrent_tracks_sim.py:test_mma_concurrent_tracks_execution, tests/test_mma_concurrent_tracks_stress_sim.py:_poll_mma_workers]
|
||||
"""
|
||||
return self._make_request('GET', '/api/mma/workers') or {}
|
||||
|
||||
def get_context_state(self) -> dict[str, Any]:
|
||||
"""
|
||||
Retrieves the current file and screenshot context state.
|
||||
|
||||
Retrieves the current file and screenshot context state.
|
||||
[C: tests/test_gui_context_presets.py:test_gui_context_preset_save_load]
|
||||
"""
|
||||
return self._make_request('GET', '/api/context/state') or {}
|
||||
@@ -294,7 +321,8 @@ class ApiHookClient:
|
||||
|
||||
def get_node_status(self, node_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
Retrieves status for a specific node in the MMA DAG.
|
||||
|
||||
Retrieves status for a specific node in the MMA DAG.
|
||||
[C: tests/test_api_hook_client.py:test_get_node_status]
|
||||
"""
|
||||
return self._make_request('GET', f'/api/mma/node/{node_id}') or {}
|
||||
@@ -302,8 +330,9 @@ class ApiHookClient:
|
||||
def request_confirmation(self, tool_name: str, args: dict) -> bool | None:
|
||||
"""
|
||||
|
||||
Pushes a manual confirmation request and waits for response.
|
||||
Blocks for up to 60 seconds.
|
||||
|
||||
Pushes a manual confirmation request and waits for response.
|
||||
Blocks for up to 60 seconds.
|
||||
[C: tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_sync_hooks.py:test_api_ask_client_error, tests/test_sync_hooks.py:test_api_ask_client_method, tests/test_sync_hooks.py:test_api_ask_client_rejection]
|
||||
"""
|
||||
# Long timeout as this waits for human input (60 seconds)
|
||||
@@ -314,8 +343,9 @@ class ApiHookClient:
|
||||
|
||||
def reset_session(self) -> None:
|
||||
"""
|
||||
Resets the current session via button click.
|
||||
[C: src/app_controller.py:AppController._handle_reset_session, src/app_controller.py:AppController.current_model, src/app_controller.py:AppController.current_provider, src/app_controller.py:AppController.init_state, src/gui_2.py:App._render_provider_panel, src/gui_2.py:App._show_menus, src/multi_agent_conductor.py:run_worker_lifecycle, tests/conftest.py:live_gui, tests/conftest.py:reset_ai_client, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_ai_client_cli.py:test_ai_client_send_gemini_cli, tests/test_api_events.py:test_send_emits_events_proper, tests/test_api_events.py:test_send_emits_tool_events, tests/test_deepseek_provider.py:test_deepseek_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoner_payload_verification, tests/test_gemini_cli_integration.py:test_gemini_cli_full_integration, tests/test_gemini_cli_integration.py:test_gemini_cli_rejection_and_history, tests/test_gemini_metrics.py:test_get_gemini_cache_stats_with_mock_client, tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_minimax_provider.py:test_minimax_history_bleed_stats, tests/test_mma_agent_focus_phase1.py:test_append_comms_has_source_tier_key, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_none_when_unset, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_set_when_current_tier_set, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_tier2, tests/test_session_logger_reset.py:test_reset_session, tests/test_token_usage.py:test_token_usage_tracking]
|
||||
|
||||
Resets the current session via button click.
|
||||
[C: src/app_controller.py:AppController._handle_reset_session, src/app_controller.py:AppController.current_model, src/app_controller.py:AppController.current_provider, src/app_controller.py:AppController.init_state, src/gui_2.py:App._render_provider_panel, src/gui_2.py:App._show_menus, src/multi_agent_conductor.py:run_worker_lifecycle, tests/conftest.py:live_gui, tests/conftest.py:reset_ai_client, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_ai_client_cli.py:test_ai_client_send_gemini_cli, tests/test_api_events.py:test_send_emits_events_proper, tests/test_api_events.py:test_send_emits_tool_events, tests/test_deepseek_provider.py:test_deepseek_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoner_payload_verification, tests/test_gemini_cli_integration.py:test_gemini_cli_full_integration, tests/test_gemini_cli_integration.py:test_gemini_cli_rejection_and_history, tests/test_gemini_metrics.py:test_get_gemini_cache_stats_with_mock_client, tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation, tests/test_mma_agent_focus_phase1.py:test_append_comms_has_source_tier_key, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_none_when_unset, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_set_when_current_tier_set, tests/test_mma_agent_focus_phase1.py:test_append_comms_source_tier_tier2, tests/test_session_logger_reset.py:test_reset_session, tests/test_token_usage.py:test_token_usage_tracking]
|
||||
"""
|
||||
self.click("btn_reset")
|
||||
|
||||
@@ -328,14 +358,16 @@ class ApiHookClient:
|
||||
|
||||
def apply_patch(self) -> dict[str, Any]:
|
||||
"""
|
||||
Applies the pending patch.
|
||||
|
||||
Applies the pending patch.
|
||||
[C: tests/test_patch_modal.py:test_apply_callback]
|
||||
"""
|
||||
return self._make_request('POST', '/api/patch/apply') or {}
|
||||
|
||||
def reject_patch(self) -> dict[str, Any]:
|
||||
"""
|
||||
Rejects the pending patch.
|
||||
|
||||
Rejects the pending patch.
|
||||
[C: tests/test_patch_modal.py:test_reject_callback, tests/test_patch_modal.py:test_reject_patch]
|
||||
"""
|
||||
return self._make_request('POST', '/api/patch/reject') or {}
|
||||
@@ -346,7 +378,8 @@ class ApiHookClient:
|
||||
|
||||
def spawn_mma_worker(self, data: dict) -> dict:
|
||||
"""
|
||||
Spawns a new MMA worker with the provided configuration.
|
||||
|
||||
Spawns a new MMA worker with the provided configuration.
|
||||
[C: tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation]
|
||||
"""
|
||||
return self._make_request('POST', '/api/mma/workers/spawn', data=data) or {}
|
||||
@@ -357,7 +390,8 @@ class ApiHookClient:
|
||||
|
||||
def pause_mma_pipeline(self) -> dict:
|
||||
"""
|
||||
Pauses the MMA execution pipeline.
|
||||
|
||||
Pauses the MMA execution pipeline.
|
||||
[C: tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow]
|
||||
"""
|
||||
return self._make_request('POST', '/api/mma/pipeline/pause') or {}
|
||||
@@ -368,21 +402,24 @@ class ApiHookClient:
|
||||
|
||||
def inject_context(self, data: dict) -> dict:
|
||||
"""
|
||||
Injects custom file context into the application.
|
||||
|
||||
Injects custom file context into the application.
|
||||
[C: tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation]
|
||||
"""
|
||||
return self._make_request('POST', '/api/context/inject', data=data) or {}
|
||||
|
||||
def mutate_mma_dag(self, data: dict) -> dict:
|
||||
"""
|
||||
Mutates the MMA DAG (Directed Acyclic Graph) structure.
|
||||
|
||||
Mutates the MMA DAG (Directed Acyclic Graph) structure.
|
||||
[C: tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation]
|
||||
"""
|
||||
return self._make_request('POST', '/api/mma/dag/mutate', data=data) or {}
|
||||
|
||||
def approve_mma_ticket(self, ticket_id: str) -> dict:
|
||||
"""
|
||||
Manually approves a specific ticket for execution in Step Mode.
|
||||
|
||||
Manually approves a specific ticket for execution in Step Mode.
|
||||
[C: tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow]
|
||||
"""
|
||||
return self._make_request('POST', '/api/mma/ticket/approve', data={"ticket_id": ticket_id}) or {}
|
||||
return self._make_request('POST', '/api/mma/ticket/approve', data={"ticket_id": ticket_id}) or {}
|
||||
+15
-14
@@ -70,7 +70,8 @@ class HookServerInstance(ThreadingHTTPServer):
|
||||
"""Custom HTTPServer that carries a reference to the main App instance."""
|
||||
def __init__(self, server_address: tuple[str, int], RequestHandlerClass: type, app: Any) -> None:
|
||||
"""
|
||||
Initializes the server instance with an app reference.
|
||||
|
||||
Initializes the server instance with an app reference.
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
super().__init__(server_address, RequestHandlerClass)
|
||||
@@ -418,7 +419,7 @@ class HookHandler(BaseHTTPRequestHandler):
|
||||
result = {"status": "done"}
|
||||
def apply_patch():
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_apply_callback]
|
||||
[C: tests/test_patch_modal.py:test_apply_callback]
|
||||
"""
|
||||
try:
|
||||
if hasattr(app, "_apply_pending_patch"):
|
||||
@@ -447,7 +448,7 @@ class HookHandler(BaseHTTPRequestHandler):
|
||||
result = {"status": "done"}
|
||||
def reject_patch():
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_reject_callback, tests/test_patch_modal.py:test_reject_patch]
|
||||
[C: tests/test_patch_modal.py:test_reject_callback, tests/test_patch_modal.py:test_reject_patch]
|
||||
"""
|
||||
try:
|
||||
app._show_patch_modal = False
|
||||
@@ -566,7 +567,7 @@ class HookHandler(BaseHTTPRequestHandler):
|
||||
elif self.path == "/api/mma/workers/kill":
|
||||
def kill_worker():
|
||||
"""
|
||||
[C: src/app_controller.py:AppController.kill_worker, src/gui_2.py:App._cb_kill_ticket, tests/test_conductor_engine_abort.py:test_kill_worker_sets_abort_and_joins_thread]
|
||||
[C: src/app_controller.py:AppController.kill_worker, src/gui_2.py:App._cb_kill_ticket, tests/test_conductor_engine_abort.py:test_kill_worker_sets_abort_and_joins_thread]
|
||||
"""
|
||||
try:
|
||||
worker_id = data.get("worker_id")
|
||||
@@ -608,7 +609,7 @@ class HookHandler(BaseHTTPRequestHandler):
|
||||
elif self.path == "/api/context/inject":
|
||||
def inject_context():
|
||||
"""
|
||||
[C: tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation]
|
||||
[C: tests/test_headless_simulation.py:test_mma_track_lifecycle_simulation]
|
||||
"""
|
||||
files = _get_app_attr(app, "files")
|
||||
if isinstance(files, list):
|
||||
@@ -672,7 +673,7 @@ class HookHandler(BaseHTTPRequestHandler):
|
||||
class HookServer:
|
||||
def __init__(self, app: Any, port: int = 8999) -> None:
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.app = app
|
||||
self.port = port
|
||||
@@ -682,7 +683,7 @@ class HookServer:
|
||||
|
||||
def start(self) -> None:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._cb_accept_tracks, src/app_controller.py:AppController._cb_plan_epic, src/app_controller.py:AppController._cb_start_track, src/app_controller.py:AppController._fetch_models, src/app_controller.py:AppController._handle_approve_ask, src/app_controller.py:AppController._handle_generate_send, src/app_controller.py:AppController._handle_md_only, src/app_controller.py:AppController._handle_reject_ask, src/app_controller.py:AppController._init_ai_and_hooks, src/app_controller.py:AppController._process_event_queue, src/app_controller.py:AppController._prune_old_logs, src/app_controller.py:AppController._rebuild_rag_index, src/app_controller.py:AppController._run_event_loop, src/app_controller.py:AppController._start_track_logic, src/app_controller.py:AppController.cb_prune_logs, src/app_controller.py:AppController.start_services, src/gui_2.py:App._render_discussion_panel, src/mcp_client.py:ExternalMCPManager.add_server, src/multi_agent_conductor.py:WorkerPool.spawn, src/performance_monitor.py:PerformanceMonitor.__init__, tests/test_ai_client_concurrency.py:test_ai_client_tier_isolation, tests/test_conductor_engine_abort.py:test_kill_worker_sets_abort_and_joins_thread, tests/test_conductor_engine_v2.py:side_effect, tests/test_spawn_interception_v2.py:test_confirm_spawn_pushed_to_queue, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
[C: src/app_controller.py:AppController._cb_accept_tracks, src/app_controller.py:AppController._cb_plan_epic, src/app_controller.py:AppController._cb_start_track, src/app_controller.py:AppController._fetch_models, src/app_controller.py:AppController._handle_approve_ask, src/app_controller.py:AppController._handle_generate_send, src/app_controller.py:AppController._handle_md_only, src/app_controller.py:AppController._handle_reject_ask, src/app_controller.py:AppController._init_ai_and_hooks, src/app_controller.py:AppController._process_event_queue, src/app_controller.py:AppController._prune_old_logs, src/app_controller.py:AppController._rebuild_rag_index, src/app_controller.py:AppController._run_event_loop, src/app_controller.py:AppController._start_track_logic, src/app_controller.py:AppController.cb_prune_logs, src/app_controller.py:AppController.init_state, src/app_controller.py:AppController.start_services, src/gui_2.py:App._render_discussion_entry_read_mode, src/gui_2.py:App._update_context_file_stats, src/mcp_client.py:ExternalMCPManager.add_server, src/multi_agent_conductor.py:WorkerPool.spawn, src/performance_monitor.py:PerformanceMonitor.__init__, tests/test_ai_client_concurrency.py:test_ai_client_tier_isolation, tests/test_conductor_engine_abort.py:test_kill_worker_sets_abort_and_joins_thread, tests/test_conductor_engine_v2.py:side_effect, tests/test_spawn_interception_v2.py:test_confirm_spawn_pushed_to_queue, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
if self.thread and self.thread.is_alive():
|
||||
return
|
||||
@@ -710,7 +711,7 @@ class HookServer:
|
||||
|
||||
def stop(self) -> None:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController.shutdown, src/mcp_client.py:ExternalMCPManager.stop_all, tests/test_performance_monitor.py:test_perf_monitor_basic_timing, tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
[C: src/app_controller.py:AppController.shutdown, src/mcp_client.py:ExternalMCPManager.stop_all, tests/test_performance_monitor.py:test_perf_monitor_basic_timing, tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
if self.websocket_server:
|
||||
self.websocket_server.stop()
|
||||
@@ -725,7 +726,7 @@ class WebSocketServer:
|
||||
"""WebSocket gateway for real-time event streaming."""
|
||||
def __init__(self, app: Any, port: int = 9000) -> None:
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.app = app
|
||||
self.port = port
|
||||
@@ -760,7 +761,7 @@ class WebSocketServer:
|
||||
self._stop_event = asyncio.Event()
|
||||
async def main():
|
||||
"""
|
||||
[C: simulation/live_walkthrough.py:module, simulation/ping_pong.py:module, src/gui_2.py:module, tests/mock_concurrent_mma.py:module, tests/mock_gemini_cli.py:module, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_allow_decision, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_deny_decision, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_unreachable_hook_server, tests/test_cli_tool_bridge.py:module, tests/test_cli_tool_bridge_mapping.py:TestCliToolBridgeMapping.test_mapping_from_api_format, tests/test_cli_tool_bridge_mapping.py:module, tests/test_discussion_takes.py:module, tests/test_external_editor_gui.py:module, tests/test_headless_service.py:TestHeadlessStartup.test_headless_flag_triggers_run, tests/test_headless_service.py:TestHeadlessStartup.test_normal_startup_calls_app_run, tests/test_mma_skeleton.py:module, tests/test_orchestrator_pm.py:module, tests/test_orchestrator_pm_history.py:module, tests/test_post_process.py:module, tests/test_presets.py:module, tests/test_project_serialization.py:module, tests/test_run_worker_lifecycle_abort.py:module, tests/test_symbol_lookup.py:module, tests/test_system_prompt_exposure.py:module, tests/test_theme_nerv_fx.py:module]
|
||||
[C: simulation/live_walkthrough.py:module, simulation/ping_pong.py:module, src/gui_2.py:module, tests/mock_concurrent_mma.py:module, tests/mock_gemini_cli.py:module, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_allow_decision, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_deny_decision, tests/test_cli_tool_bridge.py:TestCliToolBridge.test_unreachable_hook_server, tests/test_cli_tool_bridge.py:module, tests/test_cli_tool_bridge_mapping.py:TestCliToolBridgeMapping.test_mapping_from_api_format, tests/test_cli_tool_bridge_mapping.py:module, tests/test_discussion_takes.py:module, tests/test_external_editor_gui.py:module, tests/test_headless_service.py:TestHeadlessStartup.test_headless_flag_triggers_run, tests/test_headless_service.py:TestHeadlessStartup.test_normal_startup_calls_app_run, tests/test_mma_skeleton.py:module, tests/test_orchestrator_pm.py:module, tests/test_orchestrator_pm_history.py:module, tests/test_presets.py:module, tests/test_project_serialization.py:module, tests/test_run_worker_lifecycle_abort.py:module, tests/test_symbol_lookup.py:module, tests/test_system_prompt_exposure.py:module, tests/test_theme_nerv_fx.py:module]
|
||||
"""
|
||||
async with serve(self._handler, "127.0.0.1", self.port) as server:
|
||||
self.server = server
|
||||
@@ -769,7 +770,7 @@ class WebSocketServer:
|
||||
|
||||
def start(self) -> None:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._cb_accept_tracks, src/app_controller.py:AppController._cb_plan_epic, src/app_controller.py:AppController._cb_start_track, src/app_controller.py:AppController._fetch_models, src/app_controller.py:AppController._handle_approve_ask, src/app_controller.py:AppController._handle_generate_send, src/app_controller.py:AppController._handle_md_only, src/app_controller.py:AppController._handle_reject_ask, src/app_controller.py:AppController._init_ai_and_hooks, src/app_controller.py:AppController._process_event_queue, src/app_controller.py:AppController._prune_old_logs, src/app_controller.py:AppController._rebuild_rag_index, src/app_controller.py:AppController._run_event_loop, src/app_controller.py:AppController._start_track_logic, src/app_controller.py:AppController.cb_prune_logs, src/app_controller.py:AppController.start_services, src/gui_2.py:App._render_discussion_panel, src/mcp_client.py:ExternalMCPManager.add_server, src/multi_agent_conductor.py:WorkerPool.spawn, src/performance_monitor.py:PerformanceMonitor.__init__, tests/test_ai_client_concurrency.py:test_ai_client_tier_isolation, tests/test_conductor_engine_abort.py:test_kill_worker_sets_abort_and_joins_thread, tests/test_conductor_engine_v2.py:side_effect, tests/test_spawn_interception_v2.py:test_confirm_spawn_pushed_to_queue, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
[C: src/app_controller.py:AppController._cb_accept_tracks, src/app_controller.py:AppController._cb_plan_epic, src/app_controller.py:AppController._cb_start_track, src/app_controller.py:AppController._fetch_models, src/app_controller.py:AppController._handle_approve_ask, src/app_controller.py:AppController._handle_generate_send, src/app_controller.py:AppController._handle_md_only, src/app_controller.py:AppController._handle_reject_ask, src/app_controller.py:AppController._init_ai_and_hooks, src/app_controller.py:AppController._process_event_queue, src/app_controller.py:AppController._prune_old_logs, src/app_controller.py:AppController._rebuild_rag_index, src/app_controller.py:AppController._run_event_loop, src/app_controller.py:AppController._start_track_logic, src/app_controller.py:AppController.cb_prune_logs, src/app_controller.py:AppController.init_state, src/app_controller.py:AppController.start_services, src/gui_2.py:App._render_discussion_entry_read_mode, src/gui_2.py:App._update_context_file_stats, src/mcp_client.py:ExternalMCPManager.add_server, src/multi_agent_conductor.py:WorkerPool.spawn, src/performance_monitor.py:PerformanceMonitor.__init__, tests/test_ai_client_concurrency.py:test_ai_client_tier_isolation, tests/test_conductor_engine_abort.py:test_kill_worker_sets_abort_and_joins_thread, tests/test_conductor_engine_v2.py:side_effect, tests/test_spawn_interception_v2.py:test_confirm_spawn_pushed_to_queue, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
if self.thread and self.thread.is_alive():
|
||||
return
|
||||
@@ -778,7 +779,7 @@ class WebSocketServer:
|
||||
|
||||
def stop(self) -> None:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController.shutdown, src/mcp_client.py:ExternalMCPManager.stop_all, tests/test_performance_monitor.py:test_perf_monitor_basic_timing, tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
[C: src/app_controller.py:AppController.shutdown, src/mcp_client.py:ExternalMCPManager.stop_all, tests/test_performance_monitor.py:test_perf_monitor_basic_timing, tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
if self.loop and self._stop_event:
|
||||
self.loop.call_soon_threadsafe(self._stop_event.set)
|
||||
@@ -787,10 +788,10 @@ class WebSocketServer:
|
||||
|
||||
def broadcast(self, channel: str, payload: dict[str, Any]) -> None:
|
||||
"""
|
||||
[C: src/app_controller.py:AppController._process_pending_gui_tasks, src/events.py:AsyncEventQueue.put, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
[C: src/app_controller.py:AppController._process_pending_gui_tasks, src/events.py:AsyncEventQueue.put, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
if not self.loop or channel not in self.clients:
|
||||
return
|
||||
message = json.dumps({"channel": channel, "payload": payload})
|
||||
for ws in list(self.clients[channel]):
|
||||
asyncio.run_coroutine_threadsafe(ws.send(message), self.loop)
|
||||
asyncio.run_coroutine_threadsafe(ws.send(message), self.loop)
|
||||
+217
-155
File diff suppressed because it is too large
Load Diff
+11
-6
@@ -13,7 +13,7 @@ class Bead:
|
||||
class BeadsClient:
|
||||
def __init__(self, working_dir: Path):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.working_dir = Path(working_dir)
|
||||
self.repo_dir = self.working_dir / ".beads_mock"
|
||||
@@ -21,7 +21,8 @@ class BeadsClient:
|
||||
|
||||
def init_repo(self) -> None:
|
||||
"""
|
||||
Initialize the mock repository.
|
||||
|
||||
Initialize the mock repository.
|
||||
[C: tests/test_aggregate_beads.py:test_build_beads_compaction, tests/test_beads_client.py:test_beads_init_and_query, tests/test_gui_dag_beads.py:test_load_active_tickets_from_beads, tests/test_mcp_client_beads.py:test_bd_mcp_tools]
|
||||
"""
|
||||
self.repo_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -30,14 +31,16 @@ class BeadsClient:
|
||||
|
||||
def is_initialized(self) -> bool:
|
||||
"""
|
||||
Check if the repository is initialized.
|
||||
|
||||
Check if the repository is initialized.
|
||||
[C: src/mcp_client.py:dispatch, tests/test_beads_client.py:test_beads_init_and_query]
|
||||
"""
|
||||
return self.beads_file.exists()
|
||||
|
||||
def create_bead(self, title: str, description: str) -> str:
|
||||
"""
|
||||
Create a new bead and return its ID.
|
||||
|
||||
Create a new bead and return its ID.
|
||||
[C: src/mcp_client.py:dispatch, tests/test_aggregate_beads.py:test_build_beads_compaction, tests/test_beads_client.py:test_beads_init_and_query, tests/test_gui_dag_beads.py:test_load_active_tickets_from_beads]
|
||||
"""
|
||||
beads = self._read_beads()
|
||||
@@ -49,7 +52,8 @@ class BeadsClient:
|
||||
|
||||
def update_bead(self, bead_id: str, status: str) -> bool:
|
||||
"""
|
||||
Update the status of an existing bead.
|
||||
|
||||
Update the status of an existing bead.
|
||||
[C: src/mcp_client.py:dispatch, tests/test_aggregate_beads.py:test_build_beads_compaction, tests/test_beads_client.py:test_beads_init_and_query]
|
||||
"""
|
||||
beads = self._read_beads()
|
||||
@@ -62,7 +66,8 @@ class BeadsClient:
|
||||
|
||||
def list_beads(self) -> List[Bead]:
|
||||
"""
|
||||
List all beads.
|
||||
|
||||
List all beads.
|
||||
[C: src/gui_2.py:App._render_beads_tab, src/mcp_client.py:dispatch, tests/test_beads_client.py:test_beads_init_and_query]
|
||||
"""
|
||||
return [Bead(**b) for b in self._read_beads()]
|
||||
|
||||
+4
-4
@@ -8,7 +8,7 @@ from imgui_bundle import imgui, nanovg as nvg, hello_imgui
|
||||
class BackgroundShader:
|
||||
def __init__(self):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.enabled = False
|
||||
self.start_time = time.time()
|
||||
@@ -16,7 +16,7 @@ class BackgroundShader:
|
||||
|
||||
def render(self, width: float, height: float):
|
||||
"""
|
||||
[C: src/gui_2.py:App._gui_func, src/gui_2.py:App._render_discussion_panel, src/gui_2.py:App._render_heavy_text, src/gui_2.py:App._render_markdown_test, src/gui_2.py:App._render_response_panel, src/gui_2.py:App._render_snapshot_tab, src/markdown_helper.py:MarkdownRenderer._render_code_block, src/markdown_helper.py:MarkdownRenderer.render, src/markdown_helper.py:render, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
[C: src/gui_2.py:App._gui_func, src/gui_2.py:App._render_discussion_entry_read_mode, src/gui_2.py:App._render_heavy_text, src/gui_2.py:App._render_markdown_test, src/gui_2.py:App._render_prior_session_view, src/gui_2.py:App._render_response_panel, src/gui_2.py:App._render_snapshot_tab, src/gui_2.py:App._render_text_viewer_window, src/markdown_helper.py:MarkdownRenderer._render_code_block, src/markdown_helper.py:MarkdownRenderer.render, src/markdown_helper.py:render, src/theme_2.py:render_post_fx, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
"""
|
||||
if not self.enabled or width <= 0 or height <= 0:
|
||||
return
|
||||
@@ -66,9 +66,9 @@ _bg: Optional[BackgroundShader] = None
|
||||
|
||||
def get_bg():
|
||||
"""
|
||||
[C: src/gui_2.py:App._gui_func, src/gui_2.py:App._render_theme_panel]
|
||||
[C: src/gui_2.py:App._gui_func, src/gui_2.py:App._render_theme_panel]
|
||||
"""
|
||||
global _bg
|
||||
if _bg is None:
|
||||
_bg = BackgroundShader()
|
||||
return _bg
|
||||
return _bg
|
||||
@@ -42,9 +42,10 @@ from typing import Any
|
||||
def generate_tickets(track_brief: str, module_skeletons: str) -> list[dict[str, Any]]:
|
||||
"""
|
||||
|
||||
Tier 2 (Tech Lead) call.
|
||||
Breaks down a Track Brief and module skeletons into discrete Tier 3 Tickets.
|
||||
[C: src/native_orchestrator.py:NativeOrchestrator.generate_tickets, tests/test_conductor_tech_lead.py:TestConductorTechLead.test_generate_tickets_retry_failure, tests/test_conductor_tech_lead.py:TestConductorTechLead.test_generate_tickets_retry_success, tests/test_conductor_tech_lead.py:TestConductorTechLead.test_generate_tickets_success, tests/test_orchestration_logic.py:test_generate_tickets]
|
||||
|
||||
Tier 2 (Tech Lead) call.
|
||||
Breaks down a Track Brief and module skeletons into discrete Tier 3 Tickets.
|
||||
[C: tests/test_conductor_tech_lead.py:TestConductorTechLead.test_generate_tickets_retry_failure, tests/test_conductor_tech_lead.py:TestConductorTechLead.test_generate_tickets_retry_success, tests/test_conductor_tech_lead.py:TestConductorTechLead.test_generate_tickets_success, tests/test_orchestration_logic.py:test_generate_tickets]
|
||||
"""
|
||||
# 1. Set Tier 2 Model (Tech Lead - Flash)
|
||||
# 2. Construct Prompt
|
||||
@@ -98,8 +99,9 @@ from src.models import Ticket
|
||||
def topological_sort(tickets: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
"""
|
||||
|
||||
Sorts a list of tickets based on their 'depends_on' field.
|
||||
Raises ValueError if a circular dependency or missing internal dependency is detected.
|
||||
|
||||
Sorts a list of tickets based on their 'depends_on' field.
|
||||
Raises ValueError if a circular dependency or missing internal dependency is detected.
|
||||
[C: tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_complex, tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_cycle, tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_empty, tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_linear, tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_missing_dependency, tests/test_conductor_tech_lead.py:test_topological_sort_vlog, tests/test_dag_engine.py:test_topological_sort, tests/test_dag_engine.py:test_topological_sort_cycle, tests/test_orchestration_logic.py:test_topological_sort, tests/test_orchestration_logic.py:test_topological_sort_circular, tests/test_perf_dag.py:test_dag_edge_cases, tests/test_perf_dag.py:test_dag_performance]
|
||||
"""
|
||||
# 1. Convert to Ticket objects for TrackDAG
|
||||
@@ -121,4 +123,4 @@ if __name__ == "__main__":
|
||||
test_brief = "Implement a new feature."
|
||||
test_skeletons = "class NewFeature: pass"
|
||||
tickets = generate_tickets(test_brief, test_skeletons)
|
||||
print(json.dumps(tickets, indent=2))
|
||||
print(json.dumps(tickets, indent=2))
|
||||
+5
-4
@@ -47,9 +47,10 @@ MODEL_PRICING = [
|
||||
def estimate_cost(model: str, input_tokens: int, output_tokens: int) -> float:
|
||||
"""
|
||||
|
||||
Estimate the cost of a model call based on input and output tokens.
|
||||
Returns the total cost in USD.
|
||||
[C: src/gui_2.py:App._render_mma_dashboard, src/gui_2.py:App._render_token_budget_panel, tests/test_cost_tracker.py:test_estimate_cost]
|
||||
|
||||
Estimate the cost of a model call based on input and output tokens.
|
||||
Returns the total cost in USD.
|
||||
[C: src/gui_2.py:App._render_mma_track_summary, src/gui_2.py:App._render_mma_usage_section, src/gui_2.py:App._render_token_budget_panel, tests/test_cost_tracker.py:test_estimate_cost]
|
||||
"""
|
||||
if not model:
|
||||
return 0.0
|
||||
@@ -60,4 +61,4 @@ def estimate_cost(model: str, input_tokens: int, output_tokens: int) -> float:
|
||||
output_cost = (output_tokens / 1_000_000) * rates["output_per_mtok"]
|
||||
return input_cost + output_cost
|
||||
|
||||
return 0.0
|
||||
return 0.0
|
||||
+47
-36
@@ -33,16 +33,18 @@ from src.performance_monitor import get_monitor
|
||||
class TrackDAG:
|
||||
"""
|
||||
|
||||
Manages a Directed Acyclic Graph of implementation tickets.
|
||||
Provides methods for dependency resolution, cycle detection, and topological sorting.
|
||||
|
||||
Manages a Directed Acyclic Graph of implementation tickets.
|
||||
Provides methods for dependency resolution, cycle detection, and topological sorting.
|
||||
"""
|
||||
|
||||
def __init__(self, tickets: List[Ticket]) -> None:
|
||||
"""
|
||||
|
||||
Initializes the TrackDAG with a list of Ticket objects.
|
||||
Args:
|
||||
tickets: A list of Ticket instances defining the graph nodes and edges.
|
||||
|
||||
Initializes the TrackDAG with a list of Ticket objects.
|
||||
Args:
|
||||
tickets: A list of Ticket instances defining the graph nodes and edges.
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.tickets = tickets
|
||||
@@ -51,8 +53,9 @@ class TrackDAG:
|
||||
def cascade_blocks(self) -> None:
|
||||
"""
|
||||
|
||||
Transitively marks `todo` tickets as `blocked` if any dependency is `blocked`.
|
||||
Propagates 'blocked' status from initially blocked nodes to their dependents.
|
||||
|
||||
Transitively marks `todo` tickets as `blocked` if any dependency is `blocked`.
|
||||
Propagates 'blocked' status from initially blocked nodes to their dependents.
|
||||
[C: tests/test_perf_dag.py:test_dag_performance]
|
||||
"""
|
||||
with get_monitor().scope("dag_cascade_blocks"):
|
||||
@@ -88,9 +91,10 @@ class TrackDAG:
|
||||
def get_ready_tasks(self) -> List[Ticket]:
|
||||
"""
|
||||
|
||||
Returns a list of tickets that are in 'todo' status and whose dependencies are all 'completed'.
|
||||
Returns:
|
||||
A list of Ticket objects ready for execution.
|
||||
|
||||
Returns a list of tickets that are in 'todo' status and whose dependencies are all 'completed'.
|
||||
Returns:
|
||||
A list of Ticket objects ready for execution.
|
||||
[C: src/models.py:Track.get_executable_tickets, tests/test_dag_engine.py:test_get_ready_tasks_branching, tests/test_dag_engine.py:test_get_ready_tasks_linear, tests/test_dag_engine.py:test_get_ready_tasks_multiple_deps, tests/test_orchestration_logic.py:test_track_executable_tickets]
|
||||
"""
|
||||
ready = []
|
||||
@@ -102,9 +106,10 @@ class TrackDAG:
|
||||
def has_cycle(self) -> bool:
|
||||
"""
|
||||
|
||||
Performs an iterative Depth-First Search to detect cycles in the dependency graph.
|
||||
Returns:
|
||||
True if a cycle is detected, False otherwise.
|
||||
|
||||
Performs an iterative Depth-First Search to detect cycles in the dependency graph.
|
||||
Returns:
|
||||
True if a cycle is detected, False otherwise.
|
||||
[C: src/gui_2.py:App._render_task_dag_panel, tests/test_dag_engine.py:test_has_cycle_complex_no_cycle, tests/test_dag_engine.py:test_has_cycle_direct_cycle, tests/test_dag_engine.py:test_has_cycle_indirect_cycle, tests/test_dag_engine.py:test_has_cycle_no_cycle, tests/test_perf_dag.py:test_dag_edge_cases, tests/test_perf_dag.py:test_dag_performance]
|
||||
"""
|
||||
with get_monitor().scope("dag_has_cycle"):
|
||||
@@ -135,12 +140,13 @@ class TrackDAG:
|
||||
def topological_sort(self) -> List[str]:
|
||||
"""
|
||||
|
||||
Returns a list of ticket IDs in topological order (dependencies before dependents).
|
||||
Uses Kahn's algorithm for efficient O(V+E) sorting and cycle detection.
|
||||
Returns:
|
||||
A list of ticket ID strings.
|
||||
Raises:
|
||||
ValueError: If a dependency cycle is detected.
|
||||
|
||||
Returns a list of ticket IDs in topological order (dependencies before dependents).
|
||||
Uses Kahn's algorithm for efficient O(V+E) sorting and cycle detection.
|
||||
Returns:
|
||||
A list of ticket ID strings.
|
||||
Raises:
|
||||
ValueError: If a dependency cycle is detected.
|
||||
[C: tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_complex, tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_cycle, tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_empty, tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_linear, tests/test_conductor_tech_lead.py:TestTopologicalSort.test_topological_sort_missing_dependency, tests/test_conductor_tech_lead.py:test_topological_sort_vlog, tests/test_dag_engine.py:test_topological_sort, tests/test_dag_engine.py:test_topological_sort_cycle, tests/test_orchestration_logic.py:test_topological_sort, tests/test_orchestration_logic.py:test_topological_sort_circular, tests/test_perf_dag.py:test_dag_edge_cases, tests/test_perf_dag.py:test_dag_performance]
|
||||
"""
|
||||
with get_monitor().scope("dag_topological_sort"):
|
||||
@@ -171,17 +177,19 @@ class TrackDAG:
|
||||
class ExecutionEngine:
|
||||
"""
|
||||
|
||||
A state machine that governs the progression of tasks within a TrackDAG.
|
||||
Handles automatic queueing and manual task approval.
|
||||
|
||||
A state machine that governs the progression of tasks within a TrackDAG.
|
||||
Handles automatic queueing and manual task approval.
|
||||
"""
|
||||
|
||||
def __init__(self, dag: TrackDAG, auto_queue: bool = False) -> None:
|
||||
"""
|
||||
|
||||
Initializes the ExecutionEngine.
|
||||
Args:
|
||||
dag: The TrackDAG instance to manage.
|
||||
auto_queue: If True, ready tasks will automatically move to 'in_progress'.
|
||||
|
||||
Initializes the ExecutionEngine.
|
||||
Args:
|
||||
dag: The TrackDAG instance to manage.
|
||||
auto_queue: If True, ready tasks will automatically move to 'in_progress'.
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.dag = dag
|
||||
@@ -190,10 +198,11 @@ class ExecutionEngine:
|
||||
def tick(self) -> List[Ticket]:
|
||||
"""
|
||||
|
||||
Evaluates the DAG and returns a list of tasks that are currently 'ready' for execution.
|
||||
If auto_queue is enabled, tasks without 'step_mode' will be marked as 'in_progress'.
|
||||
Returns:
|
||||
A list of ready Ticket objects.
|
||||
|
||||
Evaluates the DAG and returns a list of tasks that are currently 'ready' for execution.
|
||||
If auto_queue is enabled, tasks without 'step_mode' will be marked as 'in_progress'.
|
||||
Returns:
|
||||
A list of ready Ticket objects.
|
||||
[C: src/multi_agent_conductor.py:ConductorEngine.run, tests/test_arch_boundary_phase3.py:TestArchBoundaryPhase3.test_cascade_blocks_multi_hop, tests/test_arch_boundary_phase3.py:TestArchBoundaryPhase3.test_cascade_blocks_simple, tests/test_arch_boundary_phase3.py:TestArchBoundaryPhase3.test_execution_engine_tick_cascades_blocks, tests/test_arch_boundary_phase3.py:TestArchBoundaryPhase3.test_in_progress_not_blocked, tests/test_arch_boundary_phase3.py:TestArchBoundaryPhase3.test_manual_unblock_restores_todo, tests/test_execution_engine.py:test_execution_engine_auto_queue, tests/test_execution_engine.py:test_execution_engine_basic_flow, tests/test_execution_engine.py:test_execution_engine_step_mode]
|
||||
"""
|
||||
with get_monitor().scope("dag_tick"):
|
||||
@@ -204,9 +213,10 @@ class ExecutionEngine:
|
||||
def approve_task(self, task_id: str) -> None:
|
||||
"""
|
||||
|
||||
Manually transitions a task from 'todo' to 'in_progress' if its dependencies are met.
|
||||
Args:
|
||||
task_id: The ID of the task to approve.
|
||||
|
||||
Manually transitions a task from 'todo' to 'in_progress' if its dependencies are met.
|
||||
Args:
|
||||
task_id: The ID of the task to approve.
|
||||
[C: src/multi_agent_conductor.py:ConductorEngine.approve_task, tests/test_execution_engine.py:test_execution_engine_approve_task, tests/test_execution_engine.py:test_execution_engine_step_mode]
|
||||
"""
|
||||
ticket = self.dag.ticket_map.get(task_id)
|
||||
@@ -216,10 +226,11 @@ class ExecutionEngine:
|
||||
def update_task_status(self, task_id: str, status: str) -> None:
|
||||
"""
|
||||
|
||||
Force-updates the status of a specific task.
|
||||
Args:
|
||||
task_id: The ID of the task.
|
||||
status: The new status string (e.g., 'todo', 'in_progress', 'completed', 'blocked').
|
||||
|
||||
Force-updates the status of a specific task.
|
||||
Args:
|
||||
task_id: The ID of the task.
|
||||
status: The new status string (e.g., 'todo', 'in_progress', 'completed', 'blocked').
|
||||
[C: src/multi_agent_conductor.py:ConductorEngine.update_task_status, tests/test_arch_boundary_phase3.py:TestArchBoundaryPhase3.test_manual_unblock_restores_todo, tests/test_execution_engine.py:test_execution_engine_auto_queue, tests/test_execution_engine.py:test_execution_engine_basic_flow, tests/test_execution_engine.py:test_execution_engine_status_persistence, tests/test_execution_engine.py:test_execution_engine_update_nonexistent_task]
|
||||
"""
|
||||
ticket = self.dag.ticket_map.get(task_id)
|
||||
|
||||
+5
-5
@@ -21,7 +21,7 @@ class DiffFile:
|
||||
|
||||
def parse_hunk_header(line: str) -> Optional[tuple[int, int, int, int]]:
|
||||
"""
|
||||
[C: tests/test_diff_viewer.py:test_parse_hunk_header]
|
||||
[C: tests/test_diff_viewer.py:test_parse_hunk_header]
|
||||
"""
|
||||
if not line.startswith("@@"):
|
||||
return None
|
||||
@@ -45,7 +45,7 @@ def parse_hunk_header(line: str) -> Optional[tuple[int, int, int, int]]:
|
||||
|
||||
def parse_diff(diff_text: str) -> List[DiffFile]:
|
||||
"""
|
||||
[C: src/gui_2.py:App.request_patch_from_tier4, tests/test_diff_viewer.py:test_diff_line_classification, tests/test_diff_viewer.py:test_parse_diff_empty, tests/test_diff_viewer.py:test_parse_diff_none, tests/test_diff_viewer.py:test_parse_diff_with_context, tests/test_diff_viewer.py:test_parse_multiple_files, tests/test_diff_viewer.py:test_parse_simple_diff, tests/test_diff_viewer.py:test_render_diff_text_immediate]
|
||||
[C: src/gui_2.py:App.request_patch_from_tier4, tests/test_diff_viewer.py:test_diff_line_classification, tests/test_diff_viewer.py:test_parse_diff_empty, tests/test_diff_viewer.py:test_parse_diff_none, tests/test_diff_viewer.py:test_parse_diff_with_context, tests/test_diff_viewer.py:test_parse_multiple_files, tests/test_diff_viewer.py:test_parse_simple_diff]
|
||||
"""
|
||||
if not diff_text or not diff_text.strip():
|
||||
return []
|
||||
@@ -113,7 +113,7 @@ def parse_diff(diff_text: str) -> List[DiffFile]:
|
||||
|
||||
def get_line_color(line: str) -> Optional[str]:
|
||||
"""
|
||||
[C: tests/test_diff_viewer.py:test_get_line_color]
|
||||
[C: tests/test_diff_viewer.py:test_get_line_color]
|
||||
"""
|
||||
if line.startswith("+"):
|
||||
return "green"
|
||||
@@ -125,7 +125,7 @@ def get_line_color(line: str) -> Optional[str]:
|
||||
|
||||
def apply_patch_to_file(patch_text: str, base_dir: str = ".") -> Tuple[bool, str]:
|
||||
"""
|
||||
[C: src/gui_2.py:App._apply_pending_patch, tests/test_diff_viewer.py:test_apply_patch_simple, tests/test_diff_viewer.py:test_apply_patch_with_context]
|
||||
[C: src/gui_2.py:App._apply_pending_patch, tests/test_diff_viewer.py:test_apply_patch_simple, tests/test_diff_viewer.py:test_apply_patch_with_context]
|
||||
"""
|
||||
import difflib
|
||||
|
||||
@@ -171,4 +171,4 @@ def apply_patch_to_file(patch_text: str, base_dir: str = ".") -> Tuple[bool, str
|
||||
except Exception as e:
|
||||
return False, f"Error patching {file_path}: {e}"
|
||||
|
||||
return True, "\n".join(results)
|
||||
return True, "\n".join(results)
|
||||
+44
-32
File diff suppressed because one or more lines are too long
@@ -13,13 +13,13 @@ from src.models import ExternalEditorConfig, TextEditorConfig
|
||||
class ExternalEditorLauncher:
|
||||
def __init__(self, config: ExternalEditorConfig):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.config = config
|
||||
|
||||
def get_editor(self, editor_name: Optional[str] = None) -> Optional[TextEditorConfig]:
|
||||
"""
|
||||
[C: tests/test_external_editor.py:TestExternalEditorLauncher.test_get_editor_by_name, tests/test_external_editor.py:TestExternalEditorLauncher.test_get_editor_returns_default, tests/test_external_editor.py:TestExternalEditorLauncher.test_get_editor_unknown_name]
|
||||
[C: tests/test_external_editor.py:TestExternalEditorLauncher.test_get_editor_by_name, tests/test_external_editor.py:TestExternalEditorLauncher.test_get_editor_returns_default, tests/test_external_editor.py:TestExternalEditorLauncher.test_get_editor_unknown_name]
|
||||
"""
|
||||
if editor_name:
|
||||
return self.config.editors.get(editor_name)
|
||||
@@ -29,7 +29,7 @@ class ExternalEditorLauncher:
|
||||
self, editor: TextEditorConfig, original_path: str, modified_path: str
|
||||
) -> List[str]:
|
||||
"""
|
||||
[C: tests/test_external_editor.py:TestExternalEditorLauncher.test_build_diff_command, tests/test_external_editor_gui.py:test_verify_command_format, tests/test_external_editor_gui.py:test_verify_vscode_command_format]
|
||||
[C: tests/test_external_editor.py:TestExternalEditorLauncher.test_build_diff_command, tests/test_external_editor_gui.py:test_verify_command_format, tests/test_external_editor_gui.py:test_verify_vscode_command_format]
|
||||
"""
|
||||
cmd = [editor.path] + editor.diff_args + [original_path, modified_path]
|
||||
return cmd
|
||||
@@ -38,7 +38,7 @@ class ExternalEditorLauncher:
|
||||
self, editor_name: Optional[str], original_path: str, modified_path: str
|
||||
) -> Optional[subprocess.Popen]:
|
||||
"""
|
||||
[C: src/gui_2.py:App._open_patch_in_external_editor, tests/test_external_editor.py:TestExternalEditorLauncher.test_launch_diff_file_not_found, tests/test_external_editor.py:TestExternalEditorLauncher.test_launch_diff_missing_editor, tests/test_external_editor.py:TestExternalEditorLauncher.test_launch_diff_success]
|
||||
[C: src/gui_2.py:App._open_patch_in_external_editor, tests/test_external_editor.py:TestExternalEditorLauncher.test_launch_diff_file_not_found, tests/test_external_editor.py:TestExternalEditorLauncher.test_launch_diff_missing_editor, tests/test_external_editor.py:TestExternalEditorLauncher.test_launch_diff_success]
|
||||
"""
|
||||
editor = self.get_editor(editor_name)
|
||||
if not editor:
|
||||
@@ -117,7 +117,7 @@ def auto_detect_vscode() -> Optional[TextEditorConfig]:
|
||||
|
||||
def get_default_launcher() -> ExternalEditorLauncher:
|
||||
"""
|
||||
[C: src/gui_2.py:App._open_patch_in_external_editor, src/gui_2.py:App._render_external_editor_panel]
|
||||
[C: src/gui_2.py:App._open_patch_in_external_editor, src/gui_2.py:App._render_external_editor_panel]
|
||||
"""
|
||||
from src import models
|
||||
config = models.load_config()
|
||||
@@ -142,7 +142,7 @@ def get_default_launcher() -> ExternalEditorLauncher:
|
||||
|
||||
def create_temp_modified_file(content: str) -> str:
|
||||
"""
|
||||
[C: src/gui_2.py:App._open_patch_in_external_editor, tests/test_external_editor.py:TestHelperFunctions.test_create_temp_modified_file]
|
||||
[C: src/gui_2.py:App._open_patch_in_external_editor, tests/test_external_editor.py:TestHelperFunctions.test_create_temp_modified_file]
|
||||
"""
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix="_modified", delete=False, encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
|
||||
+37
-20
@@ -47,13 +47,14 @@ _ast_cache: Dict[str, Tuple[float, tree_sitter.Tree]] = {}
|
||||
class ASTParser:
|
||||
"""
|
||||
|
||||
Parser for extracting AST-based views of source code.
|
||||
Currently supports Python.
|
||||
|
||||
Parser for extracting AST-based views of source code.
|
||||
Currently supports Python.
|
||||
"""
|
||||
#region: Core Operations
|
||||
def __init__(self, language: str) -> None:
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
if language not in ("python", "cpp", "c"):
|
||||
raise ValueError(f"Language '{language}' not supported yet.")
|
||||
@@ -69,7 +70,8 @@ class ASTParser:
|
||||
|
||||
def parse(self, code: str) -> tree_sitter.Tree:
|
||||
"""
|
||||
Parse the given code and return the tree-sitter Tree.
|
||||
|
||||
Parse the given code and return the tree-sitter Tree.
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:derive_code_path, src/mcp_client.py:py_check_syntax, src/mcp_client.py:py_get_class_summary, src/mcp_client.py:py_get_definition, src/mcp_client.py:py_get_docstring, src/mcp_client.py:py_get_imports, src/mcp_client.py:py_get_signature, src/mcp_client.py:py_get_symbol_info, src/mcp_client.py:py_get_var_declaration, src/mcp_client.py:py_set_signature, src/mcp_client.py:py_set_var_declaration, src/mcp_client.py:py_update_definition, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/rag_engine.py:RAGEngine._chunk_code, src/summarize.py:_summarise_python, tests/test_ast_parser.py:test_ast_parser_parse, tests/test_tree_sitter_setup.py:test_tree_sitter_python_setup]
|
||||
"""
|
||||
return self.parser.parse(bytes(code, "utf8"))
|
||||
@@ -185,7 +187,8 @@ class ASTParser:
|
||||
def get_skeleton(self, code: str, path: Optional[str] = None) -> str:
|
||||
"""
|
||||
|
||||
Returns a skeleton of a Python file (preserving docstrings, stripping function bodies).
|
||||
|
||||
Returns a skeleton of a Python file (preserving docstrings, stripping function bodies).
|
||||
[C: src/mcp_client.py:py_get_skeleton, src/mcp_client.py:ts_c_get_skeleton, src/mcp_client.py:ts_cpp_get_skeleton, src/multi_agent_conductor.py:run_worker_lifecycle, tests/test_ast_parser.py:test_ast_parser_get_skeleton_c, tests/test_ast_parser.py:test_ast_parser_get_skeleton_cpp, tests/test_ast_parser.py:test_ast_parser_get_skeleton_python, tests/test_context_pruner.py:test_ast_caching, tests/test_context_pruner.py:test_performance_large_file]
|
||||
"""
|
||||
code_bytes = code.encode("utf8")
|
||||
@@ -200,7 +203,7 @@ class ASTParser:
|
||||
|
||||
def walk(node: tree_sitter.Node) -> None:
|
||||
"""
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:py_find_usages, src/mcp_client.py:py_get_hierarchy, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/outline_tool.py:CodeOutliner.walk, src/summarize.py:_summarise_python]
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:py_find_usages, src/mcp_client.py:py_get_hierarchy, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/outline_tool.py:CodeOutliner.walk, src/summarize.py:_summarise_python]
|
||||
"""
|
||||
if node.type == "function_definition":
|
||||
body = node.child_by_field_name("body")
|
||||
@@ -238,9 +241,10 @@ class ASTParser:
|
||||
def get_curated_view(self, code: str, path: Optional[str] = None) -> str:
|
||||
"""
|
||||
|
||||
Returns a curated skeleton of a Python file.
|
||||
Preserves function bodies if they have @core_logic decorator or # [HOT] comment.
|
||||
Otherwise strips bodies but preserves docstrings.
|
||||
|
||||
Returns a curated skeleton of a Python file.
|
||||
Preserves function bodies if they have @core_logic decorator or # [HOT] comment.
|
||||
Otherwise strips bodies but preserves docstrings.
|
||||
[C: src/multi_agent_conductor.py:run_worker_lifecycle, tests/test_ast_parser.py:test_ast_parser_get_curated_view]
|
||||
"""
|
||||
code_bytes = code.encode("utf8")
|
||||
@@ -279,7 +283,7 @@ class ASTParser:
|
||||
|
||||
def walk(node: tree_sitter.Node) -> None:
|
||||
"""
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:py_find_usages, src/mcp_client.py:py_get_hierarchy, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/outline_tool.py:CodeOutliner.walk, src/summarize.py:_summarise_python]
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:py_find_usages, src/mcp_client.py:py_get_hierarchy, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/outline_tool.py:CodeOutliner.walk, src/summarize.py:_summarise_python]
|
||||
"""
|
||||
if node.type == "function_definition":
|
||||
body = node.child_by_field_name("body")
|
||||
@@ -317,8 +321,9 @@ class ASTParser:
|
||||
def get_targeted_view(self, code: str, function_names: List[str], path: Optional[str] = None) -> str:
|
||||
"""
|
||||
|
||||
Returns a targeted view of the code including only the specified functions
|
||||
and their dependencies up to depth 2.
|
||||
|
||||
Returns a targeted view of the code including only the specified functions
|
||||
and their dependencies up to depth 2.
|
||||
[C: src/multi_agent_conductor.py:run_worker_lifecycle, tests/test_ast_parser.py:test_ast_parser_get_targeted_view, tests/test_context_pruner.py:test_class_targeted_extraction, tests/test_context_pruner.py:test_targeted_extraction]
|
||||
"""
|
||||
code_bytes = code.encode("utf8")
|
||||
@@ -482,8 +487,9 @@ class ASTParser:
|
||||
#region: Symbol Extraction
|
||||
def get_definition(self, code: str, name: str, path: Optional[str] = None) -> str:
|
||||
"""
|
||||
Returns the full source code for a specific definition by name.
|
||||
Supports 'ClassName::method' or 'method' for C++.
|
||||
|
||||
Returns the full source code for a specific definition by name.
|
||||
Supports 'ClassName::method' or 'method' for C++.
|
||||
[C: src/mcp_client.py:trace, src/mcp_client.py:ts_c_get_definition, src/mcp_client.py:ts_cpp_get_definition, tests/test_ast_parser.py:test_ast_parser_get_definition_c, tests/test_ast_parser.py:test_ast_parser_get_definition_cpp, tests/test_ast_parser.py:test_ast_parser_get_definition_cpp_template]
|
||||
"""
|
||||
code_bytes = code.encode("utf8")
|
||||
@@ -492,6 +498,9 @@ class ASTParser:
|
||||
parts = re.split(r'::|\.', name)
|
||||
|
||||
def walk(node: tree_sitter.Node, target_parts: List[str]) -> Optional[tree_sitter.Node]:
|
||||
"""
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:py_find_usages, src/mcp_client.py:py_get_hierarchy, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/outline_tool.py:CodeOutliner.walk, src/summarize.py:_summarise_python]
|
||||
"""
|
||||
if not target_parts:
|
||||
return None
|
||||
target = target_parts[0]
|
||||
@@ -578,8 +587,9 @@ class ASTParser:
|
||||
def get_signature(self, code: str, name: str, path: Optional[str] = None) -> str:
|
||||
"""
|
||||
|
||||
Returns only the signature part of a function or method.
|
||||
For C/C++, this is the code from the start of the definition until the block start '{'.
|
||||
|
||||
Returns only the signature part of a function or method.
|
||||
For C/C++, this is the code from the start of the definition until the block start '{'.
|
||||
[C: src/mcp_client.py:ts_c_get_signature, src/mcp_client.py:ts_cpp_get_signature, tests/test_ast_parser.py:test_ast_parser_get_signature_c, tests/test_ast_parser.py:test_ast_parser_get_signature_cpp]
|
||||
"""
|
||||
code_bytes = code.encode("utf8")
|
||||
@@ -588,6 +598,9 @@ class ASTParser:
|
||||
parts = re.split(r'::|\.', name)
|
||||
|
||||
def walk(node: tree_sitter.Node, target_parts: List[str]) -> Optional[tree_sitter.Node]:
|
||||
"""
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:py_find_usages, src/mcp_client.py:py_get_hierarchy, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/outline_tool.py:CodeOutliner.walk, src/summarize.py:_summarise_python]
|
||||
"""
|
||||
if not target_parts:
|
||||
return None
|
||||
target = target_parts[0]
|
||||
@@ -687,7 +700,8 @@ class ASTParser:
|
||||
def get_code_outline(self, code: str, path: Optional[str] = None) -> str:
|
||||
"""
|
||||
|
||||
Returns a hierarchical outline of the code (classes, structs, functions, methods).
|
||||
|
||||
Returns a hierarchical outline of the code (classes, structs, functions, methods).
|
||||
[C: src/mcp_client.py:ts_c_get_code_outline, src/mcp_client.py:ts_cpp_get_code_outline, tests/test_ast_parser.py:test_ast_parser_get_code_outline_c, tests/test_ast_parser.py:test_ast_parser_get_code_outline_cpp]
|
||||
"""
|
||||
code_bytes = code.encode("utf8")
|
||||
@@ -696,7 +710,7 @@ class ASTParser:
|
||||
|
||||
def walk(node: tree_sitter.Node, indent: int = 0) -> None:
|
||||
"""
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:py_find_usages, src/mcp_client.py:py_get_hierarchy, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/outline_tool.py:CodeOutliner.walk, src/summarize.py:_summarise_python]
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:py_find_usages, src/mcp_client.py:py_get_hierarchy, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/outline_tool.py:CodeOutliner.walk, src/summarize.py:_summarise_python]
|
||||
"""
|
||||
ntype = node.type
|
||||
label = ""
|
||||
@@ -728,7 +742,8 @@ class ASTParser:
|
||||
def update_definition(self, code: str, name: str, new_content: str, path: Optional[str] = None) -> str:
|
||||
"""
|
||||
|
||||
Surgically replace the definition of a class or function by name.
|
||||
|
||||
Surgically replace the definition of a class or function by name.
|
||||
[C: src/mcp_client.py:ts_c_update_definition, src/mcp_client.py:ts_cpp_update_definition, tests/test_ast_parser.py:test_ast_parser_update_definition_cpp]
|
||||
"""
|
||||
code_bytes = code.encode("utf8")
|
||||
@@ -737,6 +752,9 @@ class ASTParser:
|
||||
parts = re.split(r'::|\.', name)
|
||||
|
||||
def walk(node: tree_sitter.Node, target_parts: List[str]) -> Optional[tree_sitter.Node]:
|
||||
"""
|
||||
[C: src/mcp_client.py:_search_file, src/mcp_client.py:py_find_usages, src/mcp_client.py:py_get_hierarchy, src/mcp_client.py:trace, src/outline_tool.py:CodeOutliner.outline, src/outline_tool.py:CodeOutliner.walk, src/summarize.py:_summarise_python]
|
||||
"""
|
||||
if not target_parts:
|
||||
return None
|
||||
target = target_parts[0]
|
||||
@@ -830,4 +848,3 @@ def reset_client() -> None:
|
||||
def get_file_id(path: Path) -> Optional[str]:
|
||||
return None
|
||||
#endregion: Module Level Utilities
|
||||
|
||||
|
||||
+8
-2
@@ -16,7 +16,10 @@ class FuzzyAnchor:
|
||||
|
||||
@classmethod
|
||||
def create_slice(cls, text: str, start_line: int, end_line: int) -> dict:
|
||||
"""start_line and end_line are 1-based."""
|
||||
"""
|
||||
start_line and end_line are 1-based.
|
||||
[C: src/gui_2.py:App._populate_auto_slices, src/gui_2.py:App._render_text_viewer_window, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_create_slice_basic, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_anchor_mismatch_returns_none, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_exact_match, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_line_deleted_before_returns_none, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_line_inserted_before, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_multiple_lines_changed, tests/test_slice_editor_behavior.py:test_add_slice_with_annotations]
|
||||
"""
|
||||
lines = text.splitlines()
|
||||
s_idx = max(0, start_line - 1)
|
||||
e_idx = min(len(lines), end_line)
|
||||
@@ -33,6 +36,9 @@ class FuzzyAnchor:
|
||||
|
||||
@classmethod
|
||||
def resolve_slice(cls, text: str, slice_data: dict) -> Optional[Tuple[int, int]]:
|
||||
"""
|
||||
[C: tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_anchor_mismatch_returns_none, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_exact_match, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_line_deleted_before_returns_none, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_line_inserted_before, tests/test_fuzzy_anchor.py:TestFuzzyAnchor.test_resolve_slice_multiple_lines_changed]
|
||||
"""
|
||||
lines = text.splitlines()
|
||||
# 1. Try exact match
|
||||
s_idx = slice_data["start_line"] - 1
|
||||
@@ -79,4 +85,4 @@ class FuzzyAnchor:
|
||||
if best_e != -1:
|
||||
return (best_s + 1, best_e)
|
||||
|
||||
return None
|
||||
return None
|
||||
@@ -45,11 +45,13 @@ from typing import Optional, Callable, Any
|
||||
class GeminiCliAdapter:
|
||||
"""
|
||||
|
||||
Adapter for the Gemini CLI that parses streaming JSON output.
|
||||
|
||||
Adapter for the Gemini CLI that parses streaming JSON output.
|
||||
"""
|
||||
def __init__(self, binary_path: str = "gemini"):
|
||||
"""
|
||||
Initializes the adapter with the path to the gemini CLI executable.
|
||||
|
||||
Initializes the adapter with the path to the gemini CLI executable.
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.binary_path = binary_path
|
||||
@@ -61,9 +63,10 @@ class GeminiCliAdapter:
|
||||
model: str | None = None, stream_callback: Optional[Callable[[str], None]] = None) -> dict[str, Any]:
|
||||
"""
|
||||
|
||||
Sends a message to the Gemini CLI and processes the streaming JSON output.
|
||||
Uses non-blocking line-by-line reading to allow stream_callback.
|
||||
[C: simulation/user_agent.py:UserSimAgent.generate_response, src/multi_agent_conductor.py:run_worker_lifecycle, src/native_orchestrator.py:NativeOrchestrator.execute_ticket, src/orchestrator_pm.py:generate_tracks, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_ai_client_cli.py:test_ai_client_send_gemini_cli, tests/test_api_events.py:test_send_emits_events_proper, tests/test_api_events.py:test_send_emits_tool_events, tests/test_deepseek_provider.py:test_deepseek_completion_logic, tests/test_deepseek_provider.py:test_deepseek_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoner_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoning_logic, tests/test_deepseek_provider.py:test_deepseek_streaming, tests/test_deepseek_provider.py:test_deepseek_tool_calling, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_full_flow_integration, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_captures_usage_metadata, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_handles_tool_use_events, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_parses_jsonl_output, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_starts_subprocess_with_correct_args, tests/test_gemini_cli_adapter_parity.py:TestGeminiCliAdapterParity.test_send_parses_tool_calls_from_streaming_json, tests/test_gemini_cli_adapter_parity.py:TestGeminiCliAdapterParity.test_send_starts_subprocess_with_model, tests/test_gemini_cli_edge_cases.py:test_gemini_cli_context_bleed_prevention, tests/test_gemini_cli_edge_cases.py:test_gemini_cli_loop_termination, tests/test_gemini_cli_integration.py:test_gemini_cli_full_integration, tests/test_gemini_cli_integration.py:test_gemini_cli_rejection_and_history, tests/test_gemini_cli_parity_regression.py:test_get_history_bleed_stats, tests/test_gemini_cli_parity_regression.py:test_send_invokes_adapter_send, tests/test_gui2_mcp.py:test_mcp_tool_call_is_dispatched, tests/test_tier4_interceptor.py:test_ai_client_passes_qa_callback, tests/test_token_usage.py:test_token_usage_tracking, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
|
||||
Sends a message to the Gemini CLI and processes the streaming JSON output.
|
||||
Uses non-blocking line-by-line reading to allow stream_callback.
|
||||
[C: simulation/user_agent.py:UserSimAgent.generate_response, src/multi_agent_conductor.py:run_worker_lifecycle, src/orchestrator_pm.py:generate_tracks, tests/test_ai_cache_tracking.py:test_gemini_cache_tracking, tests/test_ai_client_cli.py:test_ai_client_send_gemini_cli, tests/test_api_events.py:test_send_emits_events_proper, tests/test_api_events.py:test_send_emits_tool_events, tests/test_deepseek_provider.py:test_deepseek_completion_logic, tests/test_deepseek_provider.py:test_deepseek_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoner_payload_verification, tests/test_deepseek_provider.py:test_deepseek_reasoning_logic, tests/test_deepseek_provider.py:test_deepseek_streaming, tests/test_deepseek_provider.py:test_deepseek_tool_calling, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_full_flow_integration, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_captures_usage_metadata, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_handles_tool_use_events, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_parses_jsonl_output, tests/test_gemini_cli_adapter.py:TestGeminiCliAdapter.test_send_starts_subprocess_with_correct_args, tests/test_gemini_cli_adapter_parity.py:TestGeminiCliAdapterParity.test_send_parses_tool_calls_from_streaming_json, tests/test_gemini_cli_adapter_parity.py:TestGeminiCliAdapterParity.test_send_starts_subprocess_with_model, tests/test_gemini_cli_edge_cases.py:test_gemini_cli_context_bleed_prevention, tests/test_gemini_cli_edge_cases.py:test_gemini_cli_loop_termination, tests/test_gemini_cli_integration.py:test_gemini_cli_full_integration, tests/test_gemini_cli_integration.py:test_gemini_cli_rejection_and_history, tests/test_gemini_cli_parity_regression.py:test_send_invokes_adapter_send, tests/test_gui2_mcp.py:test_mcp_tool_call_is_dispatched, tests/test_tier4_interceptor.py:test_ai_client_passes_qa_callback, tests/test_token_usage.py:test_token_usage_tracking, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
start_time = time.time()
|
||||
command_parts = [self.binary_path]
|
||||
@@ -192,9 +195,10 @@ class GeminiCliAdapter:
|
||||
def count_tokens(self, contents: list[str]) -> int:
|
||||
"""
|
||||
|
||||
Provides a character-based token estimation for the Gemini CLI.
|
||||
Uses 4 chars/token as a conservative average.
|
||||
|
||||
Provides a character-based token estimation for the Gemini CLI.
|
||||
Uses 4 chars/token as a conservative average.
|
||||
[C: tests/test_gemini_cli_adapter_parity.py:TestGeminiCliAdapterParity.test_count_tokens_fallback]
|
||||
"""
|
||||
total_chars = len("\n".join(contents))
|
||||
return total_chars // 4
|
||||
return total_chars // 4
|
||||
+67
-22
@@ -101,7 +101,7 @@ class App:
|
||||
def __init__(self) -> None:
|
||||
# Initialize controller and delegate state
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
# --- Core Dependencies & State ---
|
||||
self.controller = app_controller.AppController()
|
||||
@@ -265,8 +265,9 @@ class App:
|
||||
|
||||
def run(self) -> None:
|
||||
"""
|
||||
Initializes the ImGui runner and starts the main application loop.
|
||||
[C: simulation/sim_base.py:run_sim, src/mcp_client.py:get_git_diff, src/project_manager.py:get_git_commit, src/project_manager.py:get_git_log, src/rag_engine.py:RAGEngine._search_mcp, src/shell_runner.py:run_powershell, tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
|
||||
Initializes the ImGui runner and starts the main application loop.
|
||||
[C: simulation/sim_base.py:run_sim, src/mcp_client.py:get_git_diff, src/project_manager.py:get_git_commit, src/rag_engine.py:RAGEngine._search_mcp, src/shell_runner.py:run_powershell, tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
"""
|
||||
if "--headless" in sys.argv:
|
||||
print("Headless mode active")
|
||||
@@ -544,7 +545,8 @@ class App:
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Cleanly shuts down the app's background tasks and saves state.
|
||||
|
||||
Cleanly shuts down the app's background tasks and saves state.
|
||||
[C: tests/conftest.py:app_instance, tests/conftest.py:mock_app]
|
||||
"""
|
||||
try:
|
||||
@@ -556,7 +558,7 @@ class App:
|
||||
|
||||
def save_context_preset(self, name: str) -> None:
|
||||
"""
|
||||
[C: tests/test_context_presets.py:test_save_context_preset]
|
||||
[C: tests/test_context_presets.py:test_save_context_preset]
|
||||
"""
|
||||
sys.stderr.write(f"[DEBUG] save_context_preset called with: {name}\n")
|
||||
sys.stderr.flush()
|
||||
@@ -577,7 +579,7 @@ class App:
|
||||
|
||||
def load_context_preset(self, name: str) -> None:
|
||||
"""
|
||||
[C: tests/test_context_presets.py:test_load_context_preset, tests/test_context_presets.py:test_load_nonexistent_preset]
|
||||
[C: tests/test_context_presets.py:test_load_context_preset, tests/test_context_presets.py:test_load_nonexistent_preset]
|
||||
"""
|
||||
presets = self.controller.project.get('context_presets', {})
|
||||
if name in presets:
|
||||
@@ -588,7 +590,7 @@ class App:
|
||||
|
||||
def delete_context_preset(self, name: str) -> None:
|
||||
"""
|
||||
[C: tests/test_context_presets.py:test_delete_context_preset, tests/test_context_presets.py:test_delete_nonexistent_preset_no_error]
|
||||
[C: tests/test_context_presets.py:test_delete_context_preset, tests/test_context_presets.py:test_delete_nonexistent_preset_no_error]
|
||||
"""
|
||||
if 'context_presets' in self.controller.project:
|
||||
self.controller.project['context_presets'].pop(name, None)
|
||||
@@ -890,7 +892,7 @@ class App:
|
||||
|
||||
def _show_menus(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_window_controls.py:test_gui_window_controls_minimize_maximize_close]
|
||||
[C: tests/test_gui_window_controls.py:test_gui_window_controls_minimize_maximize_close]
|
||||
"""
|
||||
with imscope.menu("manual slop") as (active):
|
||||
if active and imgui.menu_item("Quit", "Ctrl+Q", False)[0]:
|
||||
@@ -1026,7 +1028,8 @@ class App:
|
||||
|
||||
def _handle_history_logic(self) -> None:
|
||||
"""
|
||||
Logic for capturing UI state for undo/redo.
|
||||
|
||||
Logic for capturing UI state for undo/redo.
|
||||
"""
|
||||
if self._is_applying_snapshot:
|
||||
return
|
||||
@@ -1209,7 +1212,7 @@ class App:
|
||||
|
||||
def _render_shader_live_editor(self) -> None:
|
||||
"""
|
||||
[C: tests/test_shader_live_editor.py:test_shader_live_editor_renders]
|
||||
[C: tests/test_shader_live_editor.py:test_shader_live_editor_renders]
|
||||
"""
|
||||
if self.show_windows.get('Shader Editor', False):
|
||||
with imscope.window('Shader Editor', self.show_windows['Shader Editor']) as (exp, opened):
|
||||
@@ -1524,7 +1527,7 @@ class App:
|
||||
|
||||
def _render_log_management(self) -> None:
|
||||
"""
|
||||
[C: tests/test_log_management_ui.py:test_render_log_management_logic]
|
||||
[C: tests/test_log_management_ui.py:test_render_log_management_logic]
|
||||
"""
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_log_management")
|
||||
with imscope.window("Log Management", self.show_windows["Log Management"]) as (exp, opened):
|
||||
@@ -1791,7 +1794,7 @@ class App:
|
||||
|
||||
def _save_paths(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_paths.py:test_save_paths]
|
||||
[C: tests/test_gui_paths.py:test_save_paths]
|
||||
"""
|
||||
self.config["paths"] = {
|
||||
"logs_dir": self.ui_logs_dir,
|
||||
@@ -2567,6 +2570,9 @@ class App:
|
||||
#region: Context Management
|
||||
|
||||
def _render_files_and_media(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_fast_render.py:test_render_files_and_media_fast]
|
||||
"""
|
||||
avail = imgui.get_content_region_avail().y
|
||||
if not hasattr(self, 'files_screenshots_split'): self.files_screenshots_split = 0.65
|
||||
split_y = int(avail * self.files_screenshots_split)
|
||||
@@ -2787,6 +2793,9 @@ class App:
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_screenshots_panel")
|
||||
|
||||
def _render_context_composition_panel(self) -> None:
|
||||
"""
|
||||
[C: tests/test_auto_slices.py:test_add_all_triggers_auto_slices, tests/test_gui_fast_render.py:test_render_context_composition_panel_fast]
|
||||
"""
|
||||
if imgui.collapsing_header("Context Composition##panel"):
|
||||
total_lines, total_ast = self._update_context_file_stats()
|
||||
self._render_context_batch_actions(total_lines, total_ast)
|
||||
@@ -2799,6 +2808,9 @@ class App:
|
||||
self._render_context_presets()
|
||||
|
||||
def _render_ast_inspector_modal(self) -> None:
|
||||
"""
|
||||
[C: tests/test_ast_inspector_extended.py:test_ast_inspector_line_range_parsing]
|
||||
"""
|
||||
if self._show_ast_inspector:
|
||||
imgui.open_popup('AST Inspector')
|
||||
self._show_ast_inspector = False
|
||||
@@ -2933,6 +2945,9 @@ class App:
|
||||
if not opened: self.ui_inspecting_ast_file = None
|
||||
|
||||
def _render_add_context_files_modal(self) -> None:
|
||||
"""
|
||||
[C: tests/test_auto_slices.py:test_add_selected_triggers_auto_slices]
|
||||
"""
|
||||
if imgui.begin_popup_modal("Select Context Files", None, imgui.WindowFlags_.always_auto_resize)[0]:
|
||||
imgui.text("Select files from project to add to context:")
|
||||
if imgui.begin_child("ctx_picker_list", imgui.ImVec2(600, 300), True):
|
||||
@@ -3018,6 +3033,9 @@ class App:
|
||||
if imgui.button(f"Delete##{name}"): self.delete_context_preset(name)
|
||||
|
||||
def _populate_auto_slices(self, f_item: models.FileItem) -> None:
|
||||
"""
|
||||
[C: tests/test_auto_slices.py:test_populate_auto_slices_basic]
|
||||
"""
|
||||
from src import mcp_client
|
||||
import re
|
||||
mcp_client.configure([{"path": f_item.path}])
|
||||
@@ -3415,7 +3433,7 @@ class App:
|
||||
|
||||
def _render_discussion_panel(self) -> None:
|
||||
"""
|
||||
[C: tests/test_discussion_takes_gui.py:test_render_discussion_tabs, tests/test_discussion_takes_gui.py:test_switching_discussion_via_tabs, tests/test_gui_discussion_tabs.py:test_discussion_tabs_rendered, tests/test_gui_phase4.py:test_track_discussion_toggle, tests/test_gui_symbol_navigation.py:test_render_discussion_panel_symbol_lookup]
|
||||
[C: tests/test_discussion_takes_gui.py:test_render_discussion_tabs, tests/test_discussion_takes_gui.py:test_switching_discussion_via_tabs, tests/test_gui_discussion_tabs.py:test_discussion_tabs_rendered, tests/test_gui_fast_render.py:test_render_discussion_panel_fast, tests/test_gui_phase4.py:test_track_discussion_toggle, tests/test_gui_symbol_navigation.py:test_render_discussion_panel_symbol_lookup]
|
||||
"""
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_discussion_panel")
|
||||
self._render_thinking_indicator()
|
||||
@@ -3654,7 +3672,8 @@ class App:
|
||||
|
||||
def _render_synthesis_panel(self) -> None:
|
||||
"""
|
||||
Renders a panel for synthesizing multiple discussion takes.
|
||||
|
||||
Renders a panel for synthesizing multiple discussion takes.
|
||||
[C: tests/test_gui_synthesis.py:test_render_synthesis_panel]
|
||||
"""
|
||||
imgui.text("Select takes to synthesize:")
|
||||
@@ -4358,7 +4377,7 @@ def hello():
|
||||
|
||||
def _reorder_ticket(self, src_idx: int, dst_idx: int) -> None:
|
||||
"""
|
||||
[C: tests/test_ticket_queue.py:TestReorder.test_reorder_ticket_invalid, tests/test_ticket_queue.py:TestReorder.test_reorder_ticket_valid]
|
||||
[C: tests/test_ticket_queue.py:TestReorder.test_reorder_ticket_invalid, tests/test_ticket_queue.py:TestReorder.test_reorder_ticket_valid]
|
||||
"""
|
||||
if src_idx == dst_idx: return
|
||||
new_tickets = list(self.active_tickets)
|
||||
@@ -4380,7 +4399,7 @@ def hello():
|
||||
|
||||
def _render_ticket_queue(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_kill_button.py:test_render_ticket_queue_table_columns]
|
||||
[C: tests/test_gui_kill_button.py:test_render_ticket_queue_table_columns]
|
||||
"""
|
||||
imgui.text("Ticket Queue Management")
|
||||
if not self.active_track:
|
||||
@@ -4678,7 +4697,10 @@ def hello():
|
||||
imgui.text_colored(imgui.ImVec4(1, 0, 0, 1), f"Error loading beads: {e}")
|
||||
|
||||
def _render_mma_dashboard(self) -> None:
|
||||
"""Main MMA dashboard interface."""
|
||||
"""
|
||||
Main MMA dashboard interface.
|
||||
[C: tests/test_gui_progress.py:test_render_mma_dashboard_progress, tests/test_mma_approval_indicators.py:TestMMAApprovalIndicators.test_approval_badge_shown_when_ask_dialog_pending, tests/test_mma_approval_indicators.py:TestMMAApprovalIndicators.test_approval_badge_shown_when_mma_approval_pending, tests/test_mma_approval_indicators.py:TestMMAApprovalIndicators.test_approval_badge_shown_when_spawn_pending, tests/test_mma_approval_indicators.py:TestMMAApprovalIndicators.test_no_approval_badge_when_idle]
|
||||
"""
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_mma_dashboard")
|
||||
self._render_mma_focus_selector()
|
||||
imgui.separator()
|
||||
@@ -4708,6 +4730,9 @@ def hello():
|
||||
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_mma_dashboard")
|
||||
|
||||
def _render_mma_focus_selector(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_progress.py:test_render_mma_dashboard_progress]
|
||||
"""
|
||||
imgui.text("Focus Agent:"); imgui.same_line()
|
||||
focus_label = self.ui_focus_agent or "All"
|
||||
if imgui.begin_combo("##focus_agent", focus_label, imgui.ComboFlags_.width_fit_preview):
|
||||
@@ -4795,6 +4820,9 @@ def hello():
|
||||
imgui.end_popup()
|
||||
|
||||
def _render_mma_track_summary(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_progress.py:test_render_mma_dashboard_progress]
|
||||
"""
|
||||
is_nerv = theme.is_nerv_active()
|
||||
track_name = self.active_track.description if self.active_track else "None"
|
||||
if getattr(self, "ui_project_execution_mode", "native") == "beads": track_name = "Beads Graph"
|
||||
@@ -4823,15 +4851,24 @@ def hello():
|
||||
imgui.text_colored(C_LBL, "ETA:"); imgui.same_line(); imgui.text_colored(C_VAL, f"~{int(eta_mins)}m ({remaining} tickets remaining)")
|
||||
|
||||
def _render_mma_epic_planner(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_progress.py:test_render_mma_dashboard_progress]
|
||||
"""
|
||||
imgui.text_colored(C_LBL, 'Epic Planning (Tier 1)')
|
||||
_, self.ui_epic_input = imgui.input_text_multiline('##epic_input', self.ui_epic_input, imgui.ImVec2(-1, 80))
|
||||
if imgui.button('Plan Epic (Tier 1)', imgui.ImVec2(-1, 0)): self._cb_plan_epic()
|
||||
|
||||
def _render_mma_conductor_setup(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_progress.py:test_render_mma_dashboard_progress]
|
||||
"""
|
||||
if imgui.button("Run Setup Scan"): self._cb_run_conductor_setup()
|
||||
if self.ui_conductor_setup_summary: imgui.input_text_multiline("##setup_summary", self.ui_conductor_setup_summary, imgui.ImVec2(-1, 120), imgui.InputTextFlags_.read_only)
|
||||
|
||||
def _render_mma_track_browser(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_progress.py:test_render_mma_dashboard_progress]
|
||||
"""
|
||||
imgui.text("Track Browser")
|
||||
if imgui.begin_table("mma_tracks_table", 4, imgui.TableFlags_.borders | imgui.TableFlags_.row_bg | imgui.TableFlags_.resizable):
|
||||
imgui.table_setup_column("Title"); imgui.table_setup_column("Status"); imgui.table_setup_column("Progress"); imgui.table_setup_column("Actions"); imgui.table_headers_row()
|
||||
@@ -4858,6 +4895,9 @@ def hello():
|
||||
self.ui_new_track_name = ""; self.ui_new_track_desc = ""
|
||||
|
||||
def _render_mma_global_controls(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_progress.py:test_render_mma_dashboard_progress]
|
||||
"""
|
||||
changed, self.mma_step_mode = imgui.checkbox("Step Mode (HITL)", self.mma_step_mode)
|
||||
imgui.same_line(); imgui.text(f"Status: {self.mma_status.upper()}")
|
||||
if self.controller and hasattr(self.controller, 'engine') and self.controller.engine and hasattr(self.controller.engine, '_pause_event'):
|
||||
@@ -4876,6 +4916,9 @@ def hello():
|
||||
if imgui.button("Go to Approval"): pass
|
||||
|
||||
def _render_mma_usage_section(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_progress.py:test_render_mma_dashboard_progress]
|
||||
"""
|
||||
imgui.text("Tier Usage (Tokens & Cost)")
|
||||
if imgui.begin_table("mma_usage", 5, imgui.TableFlags_.borders | imgui.TableFlags_.row_bg):
|
||||
imgui.table_setup_column("Tier"); imgui.table_setup_column("Model"); imgui.table_setup_column("Input"); imgui.table_setup_column("Output"); imgui.table_setup_column("Est. Cost"); imgui.table_headers_row()
|
||||
@@ -4939,6 +4982,9 @@ def hello():
|
||||
if imgui.button(f"Delete##{self.ui_selected_ticket_id}"): self.active_tickets = [t for t in self.active_tickets if str(t.get('id', '')) != self.ui_selected_ticket_id]; self.ui_selected_ticket_id = None; self._push_mma_state_update()
|
||||
|
||||
def _render_mma_agent_streams(self) -> None:
|
||||
"""
|
||||
[C: tests/test_gui_progress.py:test_render_mma_dashboard_progress]
|
||||
"""
|
||||
imgui.text("Agent Streams")
|
||||
if imgui.begin_tab_bar("mma_streams_tabs"):
|
||||
for tier, label, sep_flag_attr in [("Tier 1", "Tier 1", "ui_separate_tier1"), ("Tier 2", "Tier 2 (Tech Lead)", "ui_separate_tier2"), ("Tier 3", None, "ui_separate_tier3"), ("Tier 4", "Tier 4 (QA)", "ui_separate_tier4")]:
|
||||
@@ -4957,7 +5003,7 @@ def hello():
|
||||
|
||||
def _render_tier_stream_panel(self, tier_key: str, stream_key: str | None) -> None:
|
||||
"""
|
||||
[C: tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier1_renders_stream_content, tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier3_renders_worker_subheaders]
|
||||
[C: tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier1_renders_stream_content, tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier3_renders_worker_subheaders]
|
||||
"""
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_tier_stream_panel")
|
||||
if self.is_viewing_prior_session:
|
||||
@@ -5066,7 +5112,7 @@ def hello():
|
||||
|
||||
def bulk_execute(self) -> None:
|
||||
"""
|
||||
[C: tests/test_ticket_queue.py:TestBulkOperations.test_bulk_execute]
|
||||
[C: tests/test_ticket_queue.py:TestBulkOperations.test_bulk_execute]
|
||||
"""
|
||||
for tid in self.ui_selected_tickets:
|
||||
t = next((t for t in self.active_tickets if str(t.get('id', '')) == tid), None)
|
||||
@@ -5075,7 +5121,7 @@ def hello():
|
||||
|
||||
def bulk_skip(self) -> None:
|
||||
"""
|
||||
[C: tests/test_ticket_queue.py:TestBulkOperations.test_bulk_skip]
|
||||
[C: tests/test_ticket_queue.py:TestBulkOperations.test_bulk_skip]
|
||||
"""
|
||||
for tid in self.ui_selected_tickets:
|
||||
t = next((t for t in self.active_tickets if str(t.get('id', '')) == tid), None)
|
||||
@@ -5084,7 +5130,7 @@ def hello():
|
||||
|
||||
def bulk_block(self) -> None:
|
||||
"""
|
||||
[C: tests/test_ticket_queue.py:TestBulkOperations.test_bulk_block]
|
||||
[C: tests/test_ticket_queue.py:TestBulkOperations.test_bulk_block]
|
||||
"""
|
||||
for tid in self.ui_selected_tickets:
|
||||
t = next((t for t in self.active_tickets if str(t.get('id', '')) == tid), None)
|
||||
@@ -5145,4 +5191,3 @@ def main() -> None:
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
|
||||
+22
-17
@@ -21,7 +21,7 @@ class UISnapshot:
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""
|
||||
[C: src/models.py:ExternalEditorConfig.to_dict, src/models.py:MCPConfiguration.to_dict, src/models.py:RAGConfig.to_dict, src/models.py:ToolPreset.to_dict, src/models.py:Track.to_dict, src/models.py:TrackState.to_dict, src/personas.py:PersonaManager.save_persona, src/presets.py:PresetManager.save_preset, src/project_manager.py:save_project, src/project_manager.py:save_track_state, src/tool_presets.py:ToolPresetManager.save_bias_profile, src/tool_presets.py:ToolPresetManager.save_preset, src/workspace_manager.py:WorkspaceManager.save_profile, tests/test_bias_models.py:test_bias_profile_model, tests/test_bias_models.py:test_tool_model, tests/test_bias_models.py:test_tool_preset_extension, tests/test_event_serialization.py:test_user_request_event_serialization, tests/test_external_editor.py:TestExternalEditorConfig.test_to_dict, tests/test_external_editor.py:TestTextEditorConfig.test_to_dict, tests/test_file_item_model.py:test_file_item_to_dict, tests/test_gui_events_v2.py:test_user_request_event_payload, tests/test_mcp_config.py:test_mcp_configuration_to_from_dict, tests/test_mcp_config.py:test_mcp_server_config_to_from_dict, tests/test_per_ticket_model.py:test_model_override_serialization, tests/test_persona_id.py:test_ticket_persona_id_serialization, tests/test_persona_models.py:test_persona_defaults, tests/test_persona_models.py:test_persona_serialization, tests/test_thinking_gui.py:test_thinking_segment_model_compatibility, tests/test_ticket_queue.py:test_ticket_to_dict_priority, tests/test_tiered_aggregation.py:test_persona_aggregation_strategy, tests/test_track_state_schema.py:test_track_state_to_dict, tests/test_track_state_schema.py:test_track_state_to_dict_with_none, tests/test_ui_summary_only_removal.py:test_file_item_serialization_with_flags]
|
||||
[C: src/models.py:ContextPreset.to_dict, src/models.py:ExternalEditorConfig.to_dict, src/models.py:MCPConfiguration.to_dict, src/models.py:RAGConfig.to_dict, src/models.py:ToolPreset.to_dict, src/models.py:Track.to_dict, src/models.py:TrackState.to_dict, src/personas.py:PersonaManager.save_persona, src/presets.py:PresetManager.save_preset, src/project_manager.py:save_project, src/project_manager.py:save_track_state, src/tool_presets.py:ToolPresetManager.save_bias_profile, src/tool_presets.py:ToolPresetManager.save_preset, src/workspace_manager.py:WorkspaceManager.save_profile, tests/test_bias_models.py:test_bias_profile_model, tests/test_bias_models.py:test_tool_model, tests/test_bias_models.py:test_tool_preset_extension, tests/test_context_presets_models.py:test_context_preset_serialization, tests/test_context_presets_models.py:test_file_view_preset_serialization, tests/test_custom_slices_annotations.py:test_file_item_custom_slices_round_trip_annotations, tests/test_custom_slices_annotations.py:test_file_item_custom_slices_serialization_with_annotations, tests/test_event_serialization.py:test_user_request_event_serialization, tests/test_external_editor.py:TestExternalEditorConfig.test_to_dict, tests/test_external_editor.py:TestTextEditorConfig.test_to_dict, tests/test_file_item_model.py:test_file_item_to_dict, tests/test_gui_events_v2.py:test_user_request_event_payload, tests/test_history_manager.py:TestHistoryManager.test_snapshot_roundtrip, tests/test_mcp_config.py:test_mcp_configuration_to_from_dict, tests/test_mcp_config.py:test_mcp_server_config_to_from_dict, tests/test_per_ticket_model.py:test_model_override_serialization, tests/test_persona_id.py:test_ticket_persona_id_serialization, tests/test_persona_models.py:test_persona_defaults, tests/test_persona_models.py:test_persona_serialization, tests/test_slice_editor_behavior.py:test_add_slice_with_annotations, tests/test_thinking_gui.py:test_thinking_segment_model_compatibility, tests/test_ticket_queue.py:test_ticket_to_dict_priority, tests/test_tiered_aggregation.py:test_persona_aggregation_strategy, tests/test_track_state_schema.py:test_track_state_to_dict, tests/test_track_state_schema.py:test_track_state_to_dict_with_none, tests/test_ui_summary_only_removal.py:test_file_item_serialization_with_flags]
|
||||
"""
|
||||
return {
|
||||
"ai_input": self.ai_input,
|
||||
@@ -42,7 +42,7 @@ class UISnapshot:
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "UISnapshot":
|
||||
"""
|
||||
[C: src/models.py:ExternalEditorConfig.from_dict, src/models.py:MCPConfiguration.from_dict, src/models.py:RAGConfig.from_dict, src/models.py:ToolPreset.from_dict, src/models.py:Track.from_dict, src/models.py:TrackState.from_dict, src/models.py:load_mcp_config, src/personas.py:PersonaManager.load_all, src/presets.py:PresetManager.load_all, src/project_manager.py:load_project, src/project_manager.py:load_track_state, src/tool_presets.py:ToolPresetManager.load_all_bias_profiles, src/tool_presets.py:ToolPresetManager.load_all_presets, src/workspace_manager.py:WorkspaceManager.load_all_profiles, tests/test_bias_models.py:test_bias_profile_model, tests/test_bias_models.py:test_tool_model, tests/test_bias_models.py:test_tool_preset_extension, tests/test_external_editor.py:TestExternalEditorConfig.test_from_dict_with_dict_editors, tests/test_external_editor.py:TestExternalEditorConfig.test_from_dict_with_string_editors, tests/test_external_editor.py:TestTextEditorConfig.test_from_dict_with_diff_args, tests/test_external_editor.py:TestTextEditorConfig.test_from_dict_without_diff_args, tests/test_file_item_model.py:test_file_item_from_dict, tests/test_file_item_model.py:test_file_item_from_dict_defaults, tests/test_mcp_config.py:test_mcp_configuration_to_from_dict, tests/test_mcp_config.py:test_mcp_server_config_to_from_dict, tests/test_per_ticket_model.py:test_model_override_default_on_deserialize, tests/test_per_ticket_model.py:test_model_override_deserialization, tests/test_persona_id.py:test_ticket_persona_id_deserialization, tests/test_persona_models.py:test_persona_defaults, tests/test_persona_models.py:test_persona_deserialization, tests/test_project_serialization.py:TestProjectSerialization.test_backward_compatibility_strings, tests/test_ticket_queue.py:test_ticket_from_dict_default_priority, tests/test_ticket_queue.py:test_ticket_from_dict_priority, tests/test_tiered_aggregation.py:test_persona_aggregation_strategy, tests/test_track_state_schema.py:test_track_state_from_dict, tests/test_track_state_schema.py:test_track_state_from_dict_empty_and_missing, tests/test_ui_summary_only_removal.py:test_file_item_serialization_with_flags]
|
||||
[C: src/models.py:ContextPreset.from_dict, src/models.py:ExternalEditorConfig.from_dict, src/models.py:MCPConfiguration.from_dict, src/models.py:RAGConfig.from_dict, src/models.py:ToolPreset.from_dict, src/models.py:Track.from_dict, src/models.py:TrackState.from_dict, src/models.py:load_mcp_config, src/personas.py:PersonaManager.load_all, src/presets.py:PresetManager.load_all, src/project_manager.py:load_project, src/project_manager.py:load_track_state, src/tool_presets.py:ToolPresetManager.load_all_bias_profiles, src/tool_presets.py:ToolPresetManager.load_all_presets, src/workspace_manager.py:WorkspaceManager.load_all_profiles, tests/test_bias_models.py:test_bias_profile_model, tests/test_bias_models.py:test_tool_model, tests/test_bias_models.py:test_tool_preset_extension, tests/test_context_presets_models.py:test_context_preset_from_dict_legacy, tests/test_context_presets_models.py:test_context_preset_serialization, tests/test_context_presets_models.py:test_file_view_preset_serialization, tests/test_custom_slices_annotations.py:test_file_item_custom_slices_deserialization_with_annotations, tests/test_custom_slices_annotations.py:test_file_item_custom_slices_round_trip_annotations, tests/test_external_editor.py:TestExternalEditorConfig.test_from_dict_with_dict_editors, tests/test_external_editor.py:TestExternalEditorConfig.test_from_dict_with_string_editors, tests/test_external_editor.py:TestTextEditorConfig.test_from_dict_with_diff_args, tests/test_external_editor.py:TestTextEditorConfig.test_from_dict_without_diff_args, tests/test_file_item_model.py:test_file_item_from_dict, tests/test_file_item_model.py:test_file_item_from_dict_defaults, tests/test_history_manager.py:TestHistoryManager.test_snapshot_roundtrip, tests/test_mcp_config.py:test_mcp_configuration_to_from_dict, tests/test_mcp_config.py:test_mcp_server_config_to_from_dict, tests/test_per_ticket_model.py:test_model_override_default_on_deserialize, tests/test_per_ticket_model.py:test_model_override_deserialization, tests/test_persona_id.py:test_ticket_persona_id_deserialization, tests/test_persona_models.py:test_persona_defaults, tests/test_persona_models.py:test_persona_deserialization, tests/test_project_serialization.py:TestProjectSerialization.test_backward_compatibility_strings, tests/test_slice_editor_behavior.py:test_add_slice_with_annotations, tests/test_ticket_queue.py:test_ticket_from_dict_default_priority, tests/test_ticket_queue.py:test_ticket_from_dict_priority, tests/test_tiered_aggregation.py:test_persona_aggregation_strategy, tests/test_track_state_schema.py:test_track_state_from_dict, tests/test_track_state_schema.py:test_track_state_from_dict_empty_and_missing, tests/test_ui_summary_only_removal.py:test_file_item_serialization_with_flags]
|
||||
"""
|
||||
return cls(
|
||||
ai_input=data.get("ai_input", ""),
|
||||
@@ -69,7 +69,7 @@ class HistoryEntry:
|
||||
class HistoryManager:
|
||||
def __init__(self, max_capacity: int = 100):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.max_capacity = max_capacity
|
||||
self._undo_stack: typing.List[HistoryEntry] = []
|
||||
@@ -78,9 +78,10 @@ class HistoryManager:
|
||||
def push(self, state: typing.Any, description: str) -> None:
|
||||
"""
|
||||
|
||||
Pushes a new state to the undo stack and clears the redo stack.
|
||||
If the undo stack exceeds max_capacity, the oldest state is removed.
|
||||
[C: tests/test_history.py:test_jump_to_undo, tests/test_history.py:test_max_capacity, tests/test_history.py:test_push_state, tests/test_history.py:test_redo_cleared_on_push, tests/test_history.py:test_undo_redo]
|
||||
|
||||
Pushes a new state to the undo stack and clears the redo stack.
|
||||
If the undo stack exceeds max_capacity, the oldest state is removed.
|
||||
[C: tests/test_history.py:test_jump_to_undo, tests/test_history.py:test_max_capacity, tests/test_history.py:test_push_state, tests/test_history.py:test_redo_cleared_on_push, tests/test_history.py:test_undo_redo, tests/test_history_manager.py:TestHistoryManager.test_get_history_returns_descriptions, tests/test_history_manager.py:TestHistoryManager.test_jump_to_undo, tests/test_history_manager.py:TestHistoryManager.test_push_and_undo, tests/test_history_manager.py:TestHistoryManager.test_push_clears_redo_stack, tests/test_history_manager.py:TestHistoryManager.test_undo_and_redo]
|
||||
"""
|
||||
entry = HistoryEntry(state=state, description=description)
|
||||
self._undo_stack.append(entry)
|
||||
@@ -91,9 +92,10 @@ class HistoryManager:
|
||||
def undo(self, current_state: typing.Any, current_description: str = "Current State") -> typing.Optional[HistoryEntry]:
|
||||
"""
|
||||
|
||||
Undoes the last action by moving the current_state to the redo stack
|
||||
and returning the top of the undo stack.
|
||||
[C: tests/test_history.py:test_redo_cleared_on_push, tests/test_history.py:test_undo_redo]
|
||||
|
||||
Undoes the last action by moving the current_state to the redo stack
|
||||
and returning the top of the undo stack.
|
||||
[C: tests/test_history.py:test_redo_cleared_on_push, tests/test_history.py:test_undo_redo, tests/test_history_manager.py:TestHistoryManager.test_push_and_undo, tests/test_history_manager.py:TestHistoryManager.test_push_clears_redo_stack, tests/test_history_manager.py:TestHistoryManager.test_undo_and_redo, tests/test_history_manager.py:TestHistoryManager.test_undo_no_history_returns_none]
|
||||
"""
|
||||
if not self._undo_stack:
|
||||
return None
|
||||
@@ -105,9 +107,10 @@ class HistoryManager:
|
||||
def redo(self, current_state: typing.Any, current_description: str = "Current State") -> typing.Optional[HistoryEntry]:
|
||||
"""
|
||||
|
||||
Redoes the last undone action by moving the current_state to the undo stack
|
||||
and returning the top of the redo stack.
|
||||
[C: tests/test_history.py:test_undo_redo]
|
||||
|
||||
Redoes the last undone action by moving the current_state to the undo stack
|
||||
and returning the top of the redo stack.
|
||||
[C: tests/test_history.py:test_undo_redo, tests/test_history_manager.py:TestHistoryManager.test_redo_no_history_returns_none, tests/test_history_manager.py:TestHistoryManager.test_undo_and_redo]
|
||||
"""
|
||||
if not self._redo_stack:
|
||||
return None
|
||||
@@ -126,8 +129,9 @@ class HistoryManager:
|
||||
|
||||
def get_history(self) -> typing.List[typing.Dict[str, typing.Any]]:
|
||||
"""
|
||||
Returns a list of descriptions and timestamps for the undo stack.
|
||||
[C: tests/test_history.py:test_initial_state, tests/test_history.py:test_push_state]
|
||||
|
||||
Returns a list of descriptions and timestamps for the undo stack.
|
||||
[C: tests/test_history.py:test_initial_state, tests/test_history.py:test_push_state, tests/test_history_manager.py:TestHistoryManager.test_get_history_returns_descriptions]
|
||||
"""
|
||||
return [
|
||||
{"description": e.description, "timestamp": e.timestamp}
|
||||
@@ -137,9 +141,10 @@ class HistoryManager:
|
||||
def jump_to_undo(self, index: int, current_state: typing.Any, current_description: str = "Before Jump") -> typing.Optional[HistoryEntry]:
|
||||
"""
|
||||
|
||||
Jumps to a specific state in the undo stack by moving subsequent states
|
||||
and the current_state to the redo stack.
|
||||
[C: tests/test_history.py:test_jump_to_undo]
|
||||
|
||||
Jumps to a specific state in the undo stack by moving subsequent states
|
||||
and the current_state to the redo stack.
|
||||
[C: tests/test_history.py:test_jump_to_undo, tests/test_history_manager.py:TestHistoryManager.test_jump_to_undo]
|
||||
"""
|
||||
if index < 0 or index >= len(self._undo_stack):
|
||||
return None
|
||||
|
||||
+46
-1
@@ -5,6 +5,9 @@ from imgui_bundle import imgui_node_editor
|
||||
def child(id_str: str, size_x: float = 0, size_y: float = 0, flags: int = 0): return _ScopeChild(id_str, size_x, size_y, flags)
|
||||
class _ScopeChild:
|
||||
def __init__(self, id_str: str, size_x: float | imgui.ImVec2, size_y: float, flags: int):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._id = id_str
|
||||
# Check if size_x is likely an ImVec2 without using isinstance (which breaks with mocks)
|
||||
if hasattr(size_x, 'x') and hasattr(size_x, 'y'):
|
||||
@@ -30,6 +33,9 @@ class _ScopeGroup:
|
||||
def id(str_id: str): return _ScopeId(str_id)
|
||||
class _ScopeId:
|
||||
def __init__(self, str_id: str):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._id = str_id
|
||||
def __enter__(self):
|
||||
imgui.push_id(self._id)
|
||||
@@ -40,6 +46,9 @@ class _ScopeId:
|
||||
def menu(label: str): return _ScopeMenu(label)
|
||||
class _ScopeMenu:
|
||||
def __init__(self, label: str):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._label = label
|
||||
self._active = False
|
||||
def __enter__(self):
|
||||
@@ -53,6 +62,9 @@ class _ScopeMenu:
|
||||
def menu_bar(): return _ScopeMenuBar()
|
||||
class _ScopeMenuBar:
|
||||
def __init__(self):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._active = False
|
||||
def __enter__(self):
|
||||
self._active = imgui.begin_menu_bar()
|
||||
@@ -65,6 +77,9 @@ class _ScopeMenuBar:
|
||||
def node(name: str): return _ScopeNode(name)
|
||||
class _ScopeNode:
|
||||
def __init__(self, name: str):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._name = name
|
||||
def __enter__(self):
|
||||
return imgui_node_editor.begin(self._name)
|
||||
@@ -75,6 +90,9 @@ class _ScopeNode:
|
||||
def popup(id_str: str): return _ScopePopup(id_str)
|
||||
class _ScopePopup:
|
||||
def __init__(self, id_str: str):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._id = id_str
|
||||
self._active = False
|
||||
def __enter__(self):
|
||||
@@ -88,6 +106,9 @@ class _ScopePopup:
|
||||
def popup_modal(name: str, visible: bool = True, flags: int = 0): return _ScopePopupModal(name, visible, flags)
|
||||
class _ScopePopupModal:
|
||||
def __init__(self, name: str, visible: bool, flags: int):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._name = name
|
||||
self._visible = visible
|
||||
self._flags = flags
|
||||
@@ -104,6 +125,9 @@ class _ScopePopupModal:
|
||||
def style_color(col: int, val: Any): return _ScopeStyleColor(col, val)
|
||||
class _ScopeStyleColor:
|
||||
def __init__(self, col: int, val: Any):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._col = col
|
||||
self._val = val
|
||||
def __enter__(self):
|
||||
@@ -115,6 +139,9 @@ class _ScopeStyleColor:
|
||||
def style_var(var: int, val: Any): return _ScopeStyleVar(var, val)
|
||||
class _ScopeStyleVar:
|
||||
def __init__(self, var: int, val: Any):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._var = var
|
||||
self._val = val
|
||||
def __enter__(self):
|
||||
@@ -126,6 +153,9 @@ class _ScopeStyleVar:
|
||||
def table(name: str, columns: int, flags: int = 0): return _ScopeTable(name, columns, flags)
|
||||
class _ScopeTable:
|
||||
def __init__(self, name: str, columns: int, flags: int):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._name = name
|
||||
self._columns = columns
|
||||
self._flags = flags
|
||||
@@ -141,6 +171,9 @@ class _ScopeTable:
|
||||
def tab_bar(id_str: str, flags: int = 0): return _ScopeTabBar(id_str, flags)
|
||||
class _ScopeTabBar:
|
||||
def __init__(self, id_str: str, flags: int):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._id = id_str
|
||||
self._flags = flags
|
||||
self._active = False
|
||||
@@ -155,6 +188,9 @@ class _ScopeTabBar:
|
||||
def tab_item(label: str, flags: int = 0): return _ScopeTabItem(label, flags)
|
||||
class _ScopeTabItem:
|
||||
def __init__(self, label: str, flags: int):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._label = label
|
||||
self._flags = flags
|
||||
self._expanded = False
|
||||
@@ -170,6 +206,9 @@ class _ScopeTabItem:
|
||||
def text_wrap(wrap_pos: float = 0.0): return _ScopeTextWrap(wrap_pos)
|
||||
class _ScopeTextWrap:
|
||||
def __init__(self, wrap_pos: float):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._wrap_pos = wrap_pos
|
||||
def __enter__(self):
|
||||
imgui.push_text_wrap_pos(self._wrap_pos)
|
||||
@@ -188,6 +227,9 @@ class _ScopeTooltip:
|
||||
def tree_node_ex(label: str, flags: int = 0): return _ScopeTreeNodeEx(label, flags)
|
||||
class _ScopeTreeNodeEx:
|
||||
def __init__(self, label: str, flags: int):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._label = label
|
||||
self._flags = flags
|
||||
self._opened = False
|
||||
@@ -202,6 +244,9 @@ class _ScopeTreeNodeEx:
|
||||
def window(name: str, visible: bool = True, flags: int = 0): return _ScopeWindow(name, visible, flags)
|
||||
class _ScopeWindow:
|
||||
def __init__(self, name: str, visible: bool, flags: int):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self._name = name
|
||||
self._visible = visible
|
||||
self._flags = flags
|
||||
@@ -211,4 +256,4 @@ class _ScopeWindow:
|
||||
return self._result
|
||||
def __exit__(self, *args):
|
||||
imgui.end()
|
||||
return False
|
||||
return False
|
||||
+18
-15
@@ -8,19 +8,21 @@ from src.log_registry import LogRegistry
|
||||
class LogPruner:
|
||||
"""
|
||||
|
||||
Handles the automated deletion of old and insignificant session logs.
|
||||
Ensures that only whitelisted or significant sessions (based on size/content)
|
||||
are preserved long-term.
|
||||
|
||||
Handles the automated deletion of old and insignificant session logs.
|
||||
Ensures that only whitelisted or significant sessions (based on size/content)
|
||||
are preserved long-term.
|
||||
"""
|
||||
|
||||
def __init__(self, log_registry: LogRegistry, logs_dir: str) -> None:
|
||||
"""
|
||||
|
||||
Initializes the LogPruner.
|
||||
|
||||
Args:
|
||||
log_registry: An instance of LogRegistry to check session data.
|
||||
logs_dir: The path to the directory containing session sub-directories.
|
||||
|
||||
Initializes the LogPruner.
|
||||
|
||||
Args:
|
||||
log_registry: An instance of LogRegistry to check session data.
|
||||
logs_dir: The path to the directory containing session sub-directories.
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.log_registry = log_registry
|
||||
@@ -29,12 +31,13 @@ class LogPruner:
|
||||
def prune(self, max_age_days: int = 1, min_size_kb: int = 2) -> None:
|
||||
"""
|
||||
|
||||
Prunes old and small session directories from the logs directory.
|
||||
|
||||
Deletes session directories that meet the following criteria:
|
||||
1. The session start time is older than max_age_days.
|
||||
2. The session name is NOT in the whitelist provided by the LogRegistry.
|
||||
3. The total size of all files within the session directory is less than min_size_kb.
|
||||
|
||||
Prunes old and small session directories from the logs directory.
|
||||
|
||||
Deletes session directories that meet the following criteria:
|
||||
1. The session start time is older than max_age_days.
|
||||
2. The session name is NOT in the whitelist provided by the LogRegistry.
|
||||
3. The total size of all files within the session directory is less than min_size_kb.
|
||||
[C: tests/test_log_pruner.py:test_prune_old_insignificant_logs, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_prune_handles_relative_paths_starting_with_logs, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_prune_removes_empty_sessions_regardless_of_age, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_prune_removes_sessions_without_metadata_regardless_of_age, tests/test_logging_e2e.py:test_logging_e2e]
|
||||
"""
|
||||
now = datetime.now()
|
||||
@@ -119,4 +122,4 @@ class LogPruner:
|
||||
except OSError as e:
|
||||
sys.stderr.write(f"[LogPruner] Error removing {resolved_path}: {e}\n")
|
||||
|
||||
self.log_registry.save_registry()
|
||||
self.log_registry.save_registry()
|
||||
+56
-48
@@ -48,17 +48,19 @@ from typing import Any
|
||||
class LogRegistry:
|
||||
"""
|
||||
|
||||
Manages a persistent registry of session logs using a TOML file.
|
||||
Tracks session paths, start times, whitelisting status, and metadata.
|
||||
|
||||
Manages a persistent registry of session logs using a TOML file.
|
||||
Tracks session paths, start times, whitelisting status, and metadata.
|
||||
"""
|
||||
|
||||
def __init__(self, registry_path: str) -> None:
|
||||
"""
|
||||
|
||||
Initializes the LogRegistry with a path to the registry file.
|
||||
|
||||
Args:
|
||||
registry_path (str): The file path to the TOML registry.
|
||||
|
||||
Initializes the LogRegistry with a path to the registry file.
|
||||
|
||||
Args:
|
||||
registry_path (str): The file path to the TOML registry.
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.registry_path = registry_path
|
||||
@@ -73,8 +75,9 @@ class LogRegistry:
|
||||
def load_registry(self) -> None:
|
||||
"""
|
||||
|
||||
Loads the registry data from the TOML file into memory.
|
||||
Handles date/time conversions from TOML-native formats to strings for consistency.
|
||||
|
||||
Loads the registry data from the TOML file into memory.
|
||||
Handles date/time conversions from TOML-native formats to strings for consistency.
|
||||
"""
|
||||
if os.path.exists(self.registry_path):
|
||||
try:
|
||||
@@ -102,8 +105,9 @@ class LogRegistry:
|
||||
def save_registry(self) -> None:
|
||||
"""
|
||||
|
||||
Serializes and saves the current registry data to the TOML file.
|
||||
Converts internal datetime objects to ISO format strings for compatibility.
|
||||
|
||||
Serializes and saves the current registry data to the TOML file.
|
||||
Converts internal datetime objects to ISO format strings for compatibility.
|
||||
[C: tests/test_logging_e2e.py:test_logging_e2e]
|
||||
"""
|
||||
try:
|
||||
@@ -137,12 +141,13 @@ class LogRegistry:
|
||||
def register_session(self, session_id: str, path: str, start_time: datetime | str) -> None:
|
||||
"""
|
||||
|
||||
Registers a new session in the registry.
|
||||
|
||||
Args:
|
||||
session_id (str): Unique identifier for the session.
|
||||
path (str): File path to the session's log directory.
|
||||
start_time (datetime|str): The timestamp when the session started.
|
||||
|
||||
Registers a new session in the registry.
|
||||
|
||||
Args:
|
||||
session_id (str): Unique identifier for the session.
|
||||
path (str): File path to the session's log directory.
|
||||
start_time (datetime|str): The timestamp when the session started.
|
||||
[C: src/session_logger.py:open_session, tests/test_auto_whitelist.py:test_auto_whitelist_keywords, tests/test_auto_whitelist.py:test_auto_whitelist_large_size, tests/test_auto_whitelist.py:test_auto_whitelist_message_count, tests/test_auto_whitelist.py:test_no_auto_whitelist_insignificant, tests/test_log_pruner.py:test_prune_old_insignificant_logs, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_get_old_non_whitelisted_sessions_includes_empty_sessions, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_get_old_non_whitelisted_sessions_includes_sessions_without_metadata, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_prune_handles_relative_paths_starting_with_logs, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_prune_removes_empty_sessions_regardless_of_age, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_prune_removes_sessions_without_metadata_regardless_of_age, tests/test_log_registry.py:TestLogRegistry.test_get_old_non_whitelisted_sessions, tests/test_log_registry.py:TestLogRegistry.test_is_session_whitelisted, tests/test_log_registry.py:TestLogRegistry.test_register_session, tests/test_log_registry.py:TestLogRegistry.test_update_session_metadata, tests/test_logging_e2e.py:test_logging_e2e]
|
||||
"""
|
||||
if session_id in self.data:
|
||||
@@ -163,15 +168,16 @@ class LogRegistry:
|
||||
def update_session_metadata(self, session_id: str, message_count: int, errors: int, size_kb: int, whitelisted: bool, reason: str) -> None:
|
||||
"""
|
||||
|
||||
Updates metadata fields for an existing session.
|
||||
|
||||
Args:
|
||||
session_id (str): Unique identifier for the session.
|
||||
message_count (int): Total number of messages in the session.
|
||||
errors (int): Number of errors identified in logs.
|
||||
size_kb (int): Total size of the session logs in kilobytes.
|
||||
whitelisted (bool): Whether the session should be protected from pruning.
|
||||
reason (str): Explanation for the current whitelisting status.
|
||||
|
||||
Updates metadata fields for an existing session.
|
||||
|
||||
Args:
|
||||
session_id (str): Unique identifier for the session.
|
||||
message_count (int): Total number of messages in the session.
|
||||
errors (int): Number of errors identified in logs.
|
||||
size_kb (int): Total size of the session logs in kilobytes.
|
||||
whitelisted (bool): Whether the session should be protected from pruning.
|
||||
reason (str): Explanation for the current whitelisting status.
|
||||
[C: tests/test_auto_whitelist.py:test_auto_whitelist_large_size, tests/test_auto_whitelist.py:test_auto_whitelist_message_count, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_get_old_non_whitelisted_sessions_includes_empty_sessions, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_prune_removes_empty_sessions_regardless_of_age, tests/test_log_registry.py:TestLogRegistry.test_get_old_non_whitelisted_sessions, tests/test_log_registry.py:TestLogRegistry.test_is_session_whitelisted, tests/test_log_registry.py:TestLogRegistry.test_update_session_metadata]
|
||||
"""
|
||||
if session_id not in self.data:
|
||||
@@ -197,13 +203,14 @@ class LogRegistry:
|
||||
def is_session_whitelisted(self, session_id: str) -> bool:
|
||||
"""
|
||||
|
||||
Checks if a specific session is marked as whitelisted.
|
||||
|
||||
Args:
|
||||
session_id (str): Unique identifier for the session.
|
||||
|
||||
Returns:
|
||||
bool: True if whitelisted, False otherwise.
|
||||
|
||||
Checks if a specific session is marked as whitelisted.
|
||||
|
||||
Args:
|
||||
session_id (str): Unique identifier for the session.
|
||||
|
||||
Returns:
|
||||
bool: True if whitelisted, False otherwise.
|
||||
[C: tests/test_auto_whitelist.py:test_auto_whitelist_keywords, tests/test_auto_whitelist.py:test_auto_whitelist_large_size, tests/test_auto_whitelist.py:test_auto_whitelist_message_count, tests/test_auto_whitelist.py:test_no_auto_whitelist_insignificant, tests/test_log_registry.py:TestLogRegistry.test_is_session_whitelisted, tests/test_logging_e2e.py:test_logging_e2e]
|
||||
"""
|
||||
session_data = self.data.get(session_id)
|
||||
@@ -215,12 +222,13 @@ class LogRegistry:
|
||||
def update_auto_whitelist_status(self, session_id: str) -> None:
|
||||
"""
|
||||
|
||||
Analyzes session logs and updates whitelisting status based on heuristics.
|
||||
Sessions are automatically whitelisted if they contain error keywords,
|
||||
have a high message count, or exceed a size threshold.
|
||||
|
||||
Args:
|
||||
session_id (str): Unique identifier for the session to analyze.
|
||||
|
||||
Analyzes session logs and updates whitelisting status based on heuristics.
|
||||
Sessions are automatically whitelisted if they contain error keywords,
|
||||
have a high message count, or exceed a size threshold.
|
||||
|
||||
Args:
|
||||
session_id (str): Unique identifier for the session to analyze.
|
||||
[C: src/session_logger.py:close_session]
|
||||
"""
|
||||
if session_id not in self.data:
|
||||
@@ -275,15 +283,16 @@ class LogRegistry:
|
||||
def get_old_non_whitelisted_sessions(self, cutoff_datetime: datetime) -> list[dict[str, Any]]:
|
||||
"""
|
||||
|
||||
Retrieves a list of sessions that are older than a specific cutoff time
|
||||
and are not marked as whitelisted.
|
||||
Also includes non-whitelisted sessions that are empty (message_count=0 or size_kb=0).
|
||||
|
||||
Args:
|
||||
cutoff_datetime (datetime): The threshold time for identifying old sessions.
|
||||
|
||||
Returns:
|
||||
list: A list of dictionaries containing session details (id, path, start_time).
|
||||
|
||||
Retrieves a list of sessions that are older than a specific cutoff time
|
||||
and are not marked as whitelisted.
|
||||
Also includes non-whitelisted sessions that are empty (message_count=0 or size_kb=0).
|
||||
|
||||
Args:
|
||||
cutoff_datetime (datetime): The threshold time for identifying old sessions.
|
||||
|
||||
Returns:
|
||||
list: A list of dictionaries containing session details (id, path, start_time).
|
||||
[C: tests/test_log_pruner.py:test_prune_old_insignificant_logs, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_get_old_non_whitelisted_sessions_includes_empty_sessions, tests/test_log_pruning_heuristic.py:TestLogPruningHeuristic.test_get_old_non_whitelisted_sessions_includes_sessions_without_metadata, tests/test_log_registry.py:TestLogRegistry.test_get_old_non_whitelisted_sessions]
|
||||
"""
|
||||
old_sessions = []
|
||||
@@ -316,4 +325,3 @@ class LogRegistry:
|
||||
'start_time': start_time_raw
|
||||
})
|
||||
return old_sessions
|
||||
|
||||
|
||||
@@ -10,12 +10,13 @@ from typing import Optional, Dict, Callable
|
||||
class MarkdownRenderer:
|
||||
"""
|
||||
|
||||
Hybrid Markdown renderer that uses imgui_md for text/headers
|
||||
and ImGuiColorTextEdit for syntax-highlighted code blocks.
|
||||
|
||||
Hybrid Markdown renderer that uses imgui_md for text/headers
|
||||
and ImGuiColorTextEdit for syntax-highlighted code blocks.
|
||||
"""
|
||||
def __init__(self):
|
||||
"""
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
[C: src/mcp_client.py:_DDGParser.__init__, src/mcp_client.py:_TextExtractor.__init__]
|
||||
"""
|
||||
self.options = imgui_md.MarkdownOptions()
|
||||
# Base path for fonts (Inter family)
|
||||
@@ -67,8 +68,9 @@ class MarkdownRenderer:
|
||||
|
||||
def render(self, text: str, context_id: str = "default") -> None:
|
||||
"""
|
||||
Render Markdown text with code block interception.
|
||||
[C: tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
|
||||
Render Markdown text with code block interception.
|
||||
[C: src/theme_2.py:render_post_fx, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
"""
|
||||
if not text:
|
||||
return
|
||||
@@ -167,7 +169,7 @@ def get_renderer() -> MarkdownRenderer:
|
||||
|
||||
def render(text: str, context_id: str = "default") -> None:
|
||||
"""
|
||||
[C: tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
[C: src/theme_2.py:render_post_fx, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
"""
|
||||
get_renderer().render(text, context_id)
|
||||
|
||||
@@ -175,4 +177,4 @@ def render_unindented(text: str) -> None:
|
||||
get_renderer().render_unindented(text)
|
||||
|
||||
def render_code(code: str, lang: str = "", context_id: str = "default", block_idx: int = 0) -> None:
|
||||
get_renderer().render_code(code, lang, context_id, block_idx)
|
||||
get_renderer().render_code(code, lang, context_id, block_idx)
|
||||
+79
-55
@@ -102,12 +102,13 @@ perf_monitor_callback: Optional[Callable[[], dict[str, Any]]] = None
|
||||
def configure(file_items: list[dict[str, Any]], extra_base_dirs: list[str] | None = None) -> None:
|
||||
"""
|
||||
|
||||
Build the allowlist from aggregate file_items.
|
||||
Called by ai_client before each send so the list reflects the current project.
|
||||
|
||||
file_items : list of dicts from aggregate.build_file_items()
|
||||
extra_base_dirs : additional directory roots to allow traversal of
|
||||
[C: tests/conftest.py:reset_ai_client, tests/test_arch_boundary_phase1.py:TestArchBoundaryPhase1.test_mcp_client_whitelist_enforcement, tests/test_mcp_client_beads.py:test_bd_mcp_tools]
|
||||
|
||||
Build the allowlist from aggregate file_items.
|
||||
Called by ai_client before each send so the list reflects the current project.
|
||||
|
||||
file_items : list of dicts from aggregate.build_file_items()
|
||||
extra_base_dirs : additional directory roots to allow traversal of
|
||||
[C: tests/conftest.py:reset_ai_client, tests/test_arch_boundary_phase1.py:TestArchBoundaryPhase1.test_mcp_client_whitelist_enforcement, tests/test_mcp_client_beads.py:test_bd_mcp_tools, tests/test_py_struct_tools.py:test_mcp_dispatch_errors, tests/test_py_struct_tools.py:test_mcp_dispatch_integration]
|
||||
"""
|
||||
global _allowed_paths, _base_dirs, _primary_base_dir
|
||||
_allowed_paths = set()
|
||||
@@ -131,14 +132,15 @@ def configure(file_items: list[dict[str, Any]], extra_base_dirs: list[str] | Non
|
||||
def _is_allowed(path: Path) -> bool:
|
||||
"""
|
||||
|
||||
Return True if `path` is within the allowlist.
|
||||
A path is allowed if:
|
||||
- it is explicitly in _allowed_paths, OR
|
||||
- it is contained within (or equal to) one of the _base_dirs
|
||||
All paths are resolved (follows symlinks) before comparison to prevent
|
||||
symlink-based path traversal.
|
||||
|
||||
CRITICAL: Blacklisted files (history) are NEVER allowed.
|
||||
|
||||
Return True if `path` is within the allowlist.
|
||||
A path is allowed if:
|
||||
- it is explicitly in _allowed_paths, OR
|
||||
- it is contained within (or equal to) one of the _base_dirs
|
||||
All paths are resolved (follows symlinks) before comparison to prevent
|
||||
symlink-based path traversal.
|
||||
|
||||
CRITICAL: Blacklisted files (history) are NEVER allowed.
|
||||
[C: tests/test_arch_boundary_phase1.py:TestArchBoundaryPhase1.test_mcp_client_whitelist_enforcement, tests/test_history_management.py:test_mcp_blacklist]
|
||||
"""
|
||||
from src.paths import get_config_path
|
||||
@@ -180,8 +182,9 @@ def _is_allowed(path: Path) -> bool:
|
||||
def _resolve_and_check(raw_path: str) -> tuple[Path | None, str]:
|
||||
"""
|
||||
|
||||
Resolve raw_path and verify it passes the allowlist check.
|
||||
Returns (resolved_path, error_string). error_string is empty on success.
|
||||
|
||||
Resolve raw_path and verify it passes the allowlist check.
|
||||
Returns (resolved_path, error_string). error_string is empty on success.
|
||||
"""
|
||||
try:
|
||||
p = Path(raw_path)
|
||||
@@ -243,8 +246,9 @@ def list_directory(path: str) -> str:
|
||||
def search_files(path: str, pattern: str) -> str:
|
||||
"""
|
||||
|
||||
Search for files matching a glob pattern within path.
|
||||
pattern examples: '*.py', '**/*.toml', 'src/**/*.rs'
|
||||
|
||||
Search for files matching a glob pattern within path.
|
||||
pattern examples: '*.py', '**/*.toml', 'src/**/*.rs'
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err or p is None:
|
||||
@@ -274,9 +278,10 @@ def search_files(path: str, pattern: str) -> str:
|
||||
def get_file_summary(path: str) -> str:
|
||||
"""
|
||||
|
||||
Return the heuristic summary for a file (same as the initial context block).
|
||||
For .py files: imports, classes, methods, functions, constants.
|
||||
For .toml: table keys. For .md: headings. Others: line count + preview.
|
||||
|
||||
Return the heuristic summary for a file (same as the initial context block).
|
||||
For .py files: imports, classes, methods, functions, constants.
|
||||
For .toml: table keys. For .md: headings. Others: line count + preview.
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err or p is None:
|
||||
@@ -294,7 +299,8 @@ def get_file_summary(path: str) -> str:
|
||||
def py_get_skeleton(path: str) -> str:
|
||||
"""
|
||||
|
||||
Returns a skeleton of a Python file (preserving docstrings, stripping function bodies).
|
||||
|
||||
Returns a skeleton of a Python file (preserving docstrings, stripping function bodies).
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err:
|
||||
@@ -314,7 +320,8 @@ def py_get_skeleton(path: str) -> str:
|
||||
|
||||
def ts_c_get_skeleton(path: str) -> str:
|
||||
"""
|
||||
Returns a skeleton of a C file.
|
||||
|
||||
Returns a skeleton of a C file.
|
||||
[C: tests/test_ts_c_tools.py:test_ts_c_get_skeleton]
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
@@ -331,8 +338,9 @@ def ts_c_get_skeleton(path: str) -> str:
|
||||
|
||||
def ts_cpp_get_skeleton(path: str) -> str:
|
||||
"""
|
||||
Returns a skeleton of a C++ file.
|
||||
[C: tests/test_ts_cpp_tools.py:test_exhaustive_cpp_samples, tests/test_ts_cpp_tools.py:test_exhaustive_gencpp_samples, tests/test_ts_cpp_tools.py:test_ts_cpp_get_skeleton]
|
||||
|
||||
Returns a skeleton of a C++ file.
|
||||
[C: tests/test_gencpp_full_suite.py:test_gencpp_full_suite, tests/test_ts_cpp_tools.py:test_exhaustive_cpp_samples, tests/test_ts_cpp_tools.py:test_exhaustive_gencpp_samples, tests/test_ts_cpp_tools.py:test_ts_cpp_get_skeleton]
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err: return err
|
||||
@@ -349,7 +357,8 @@ def ts_cpp_get_skeleton(path: str) -> str:
|
||||
def py_get_code_outline(path: str) -> str:
|
||||
"""
|
||||
|
||||
Returns a hierarchical outline of a code file (classes, functions, methods with line ranges).
|
||||
|
||||
Returns a hierarchical outline of a code file (classes, functions, methods with line ranges).
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err:
|
||||
@@ -367,7 +376,8 @@ def py_get_code_outline(path: str) -> str:
|
||||
|
||||
def ts_c_get_code_outline(path: str) -> str:
|
||||
"""
|
||||
Returns a hierarchical outline of a C file.
|
||||
|
||||
Returns a hierarchical outline of a C file.
|
||||
[C: tests/test_ts_c_tools.py:test_ts_c_get_code_outline]
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
@@ -384,8 +394,9 @@ def ts_c_get_code_outline(path: str) -> str:
|
||||
|
||||
def ts_cpp_get_code_outline(path: str) -> str:
|
||||
"""
|
||||
Returns a hierarchical outline of a C++ file.
|
||||
[C: tests/test_ts_cpp_tools.py:test_exhaustive_cpp_samples, tests/test_ts_cpp_tools.py:test_exhaustive_gencpp_samples, tests/test_ts_cpp_tools.py:test_ts_cpp_get_code_outline]
|
||||
|
||||
Returns a hierarchical outline of a C++ file.
|
||||
[C: tests/test_gencpp_full_suite.py:test_gencpp_full_suite, tests/test_ts_cpp_tools.py:test_exhaustive_cpp_samples, tests/test_ts_cpp_tools.py:test_exhaustive_gencpp_samples, tests/test_ts_cpp_tools.py:test_ts_cpp_get_code_outline]
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err: return err
|
||||
@@ -415,8 +426,9 @@ def ts_c_get_definition(path: str, name: str) -> str:
|
||||
|
||||
def ts_cpp_get_definition(path: str, name: str) -> str:
|
||||
"""
|
||||
Returns the source code for a specific definition in a C++ file.
|
||||
[C: tests/test_ts_cpp_tools.py:test_exhaustive_cpp_samples, tests/test_ts_cpp_tools.py:test_exhaustive_gencpp_samples, tests/test_ts_cpp_tools.py:test_ts_cpp_update_definition, tests/test_ts_cpp_tools.py:test_ts_cpp_update_definition_gencpp]
|
||||
|
||||
Returns the source code for a specific definition in a C++ file.
|
||||
[C: tests/test_ast_masking_core.py:test_ast_masking_gencpp_samples, tests/test_gencpp_full_suite.py:test_gencpp_full_suite, tests/test_ts_cpp_tools.py:test_exhaustive_cpp_samples, tests/test_ts_cpp_tools.py:test_exhaustive_gencpp_samples, tests/test_ts_cpp_tools.py:test_ts_cpp_update_definition, tests/test_ts_cpp_tools.py:test_ts_cpp_update_definition_gencpp]
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err: return err
|
||||
@@ -478,7 +490,8 @@ def ts_c_update_definition(path: str, name: str, new_content: str) -> str:
|
||||
|
||||
def ts_cpp_update_definition(path: str, name: str, new_content: str) -> str:
|
||||
"""
|
||||
Surgically replace the definition of a class or function in a C++ file.
|
||||
|
||||
Surgically replace the definition of a class or function in a C++ file.
|
||||
[C: tests/test_ts_cpp_tools.py:test_ts_cpp_update_definition, tests/test_ts_cpp_tools.py:test_ts_cpp_update_definition_gencpp]
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
@@ -537,8 +550,9 @@ def set_file_slice(path: str, start_line: int, end_line: int, new_content: str)
|
||||
def edit_file(path: str, old_string: str, new_string: str, replace_all: bool = False) -> str:
|
||||
"""
|
||||
|
||||
Replace exact string match in a file. Preserves indentation and line endings.
|
||||
Drop-in replacement for native edit tool that destroys 1-space indentation.
|
||||
|
||||
Replace exact string match in a file. Preserves indentation and line endings.
|
||||
Drop-in replacement for native edit tool that destroys 1-space indentation.
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err:
|
||||
@@ -595,8 +609,9 @@ def _get_symbol_node(tree: ast.AST, name: str) -> Optional[ast.AST]:
|
||||
def py_get_symbol_info(path: str, name: str) -> tuple[str, int] | str:
|
||||
"""
|
||||
|
||||
Returns (source_code, line_number) for a specific class, function, or method definition.
|
||||
If not found, returns an error string.
|
||||
|
||||
Returns (source_code, line_number) for a specific class, function, or method definition.
|
||||
If not found, returns an error string.
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err:
|
||||
@@ -622,9 +637,10 @@ def py_get_symbol_info(path: str, name: str) -> tuple[str, int] | str:
|
||||
def py_get_definition(path: str, name: str) -> str:
|
||||
"""
|
||||
|
||||
Returns the source code for a specific class, function, or method definition.
|
||||
path: Path to the code file.
|
||||
name: Name of the definition to retrieve (e.g., 'MyClass', 'my_function', 'MyClass.my_method').
|
||||
|
||||
Returns the source code for a specific class, function, or method definition.
|
||||
path: Path to the code file.
|
||||
name: Name of the definition to retrieve (e.g., 'MyClass', 'my_function', 'MyClass.my_method').
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err:
|
||||
@@ -797,9 +813,10 @@ def py_set_var_declaration(path: str, name: str, new_declaration: str) -> str:
|
||||
def get_git_diff(path: str, base_rev: str = "HEAD", head_rev: str = "") -> str:
|
||||
"""
|
||||
|
||||
Returns the git diff for a file or directory.
|
||||
base_rev: The base revision (default: HEAD)
|
||||
head_rev: The head revision (optional)
|
||||
|
||||
Returns the git diff for a file or directory.
|
||||
base_rev: The base revision (default: HEAD)
|
||||
head_rev: The head revision (optional)
|
||||
"""
|
||||
p, err = _resolve_and_check(path)
|
||||
if err:
|
||||
@@ -1151,7 +1168,8 @@ def fetch_url(url: str) -> str:
|
||||
|
||||
def get_ui_performance() -> str:
|
||||
"""
|
||||
Returns current UI performance metrics (FPS, Frame Time, CPU, Input Lag).
|
||||
|
||||
Returns current UI performance metrics (FPS, Frame Time, CPU, Input Lag).
|
||||
[C: tests/test_mcp_perf_tool.py:test_mcp_perf_tool_retrieval]
|
||||
"""
|
||||
if perf_monitor_callback is None:
|
||||
@@ -1183,7 +1201,7 @@ class StdioMCPServer:
|
||||
|
||||
async def start(self):
|
||||
"""
|
||||
[C: src/multi_agent_conductor.py:WorkerPool.spawn, src/performance_monitor.py:PerformanceMonitor.__init__, tests/test_ai_client_concurrency.py:test_ai_client_tier_isolation, tests/test_conductor_engine_abort.py:test_kill_worker_sets_abort_and_joins_thread, tests/test_conductor_engine_v2.py:side_effect, tests/test_spawn_interception_v2.py:test_confirm_spawn_pushed_to_queue, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
[C: src/multi_agent_conductor.py:WorkerPool.spawn, src/performance_monitor.py:PerformanceMonitor.__init__, tests/test_ai_client_concurrency.py:test_ai_client_tier_isolation, tests/test_conductor_engine_abort.py:test_kill_worker_sets_abort_and_joins_thread, tests/test_conductor_engine_v2.py:side_effect, tests/test_spawn_interception_v2.py:test_confirm_spawn_pushed_to_queue, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
self.status = 'starting'
|
||||
self.proc = await asyncio.create_subprocess_exec(
|
||||
@@ -1199,7 +1217,7 @@ class StdioMCPServer:
|
||||
|
||||
async def stop(self):
|
||||
"""
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_basic_timing, tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_basic_timing, tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
if self.proc:
|
||||
try:
|
||||
@@ -1262,7 +1280,8 @@ class ExternalMCPManager:
|
||||
|
||||
async def add_server(self, config: models.MCPServerConfig):
|
||||
"""
|
||||
Add and start a new MCP server from a configuration object.
|
||||
|
||||
Add and start a new MCP server from a configuration object.
|
||||
[C: tests/test_external_mcp.py:test_external_mcp_real_process, tests/test_external_mcp.py:test_get_tool_schemas_includes_external]
|
||||
"""
|
||||
if config.url:
|
||||
@@ -1274,7 +1293,8 @@ class ExternalMCPManager:
|
||||
|
||||
async def stop_all(self):
|
||||
"""
|
||||
Stop all managed MCP servers and clear the registry.
|
||||
|
||||
Stop all managed MCP servers and clear the registry.
|
||||
[C: tests/test_external_mcp.py:test_external_mcp_real_process, tests/test_external_mcp.py:test_get_tool_schemas_includes_external, tests/test_external_mcp_e2e.py:test_external_mcp_e2e_refresh_and_call]
|
||||
"""
|
||||
for server in self.servers.values():
|
||||
@@ -1283,7 +1303,8 @@ class ExternalMCPManager:
|
||||
|
||||
def get_all_tools(self) -> dict:
|
||||
"""
|
||||
Retrieve a dictionary of all tools available across all managed servers.
|
||||
|
||||
Retrieve a dictionary of all tools available across all managed servers.
|
||||
[C: tests/test_external_mcp.py:test_external_mcp_real_process, tests/test_external_mcp_e2e.py:test_external_mcp_e2e_refresh_and_call]
|
||||
"""
|
||||
all_tools = {}
|
||||
@@ -1298,7 +1319,8 @@ class ExternalMCPManager:
|
||||
|
||||
async def async_dispatch(self, tool_name: str, tool_input: dict) -> str:
|
||||
"""
|
||||
Dispatch a tool call to the appropriate external MCP server asynchronously.
|
||||
|
||||
Dispatch a tool call to the appropriate external MCP server asynchronously.
|
||||
[C: src/rag_engine.py:RAGEngine._async_search_mcp, tests/test_external_mcp.py:test_external_mcp_real_process]
|
||||
"""
|
||||
for server in self.servers.values():
|
||||
@@ -1310,7 +1332,8 @@ _external_mcp_manager = ExternalMCPManager()
|
||||
|
||||
def get_external_mcp_manager() -> ExternalMCPManager:
|
||||
"""
|
||||
Retrieve the global ExternalMCPManager instance.
|
||||
|
||||
Retrieve the global ExternalMCPManager instance.
|
||||
[C: tests/test_external_mcp.py:test_get_tool_schemas_includes_external, tests/test_external_mcp_e2e.py:test_external_mcp_e2e_refresh_and_call]
|
||||
"""
|
||||
global _external_mcp_manager
|
||||
@@ -1319,8 +1342,9 @@ def get_external_mcp_manager() -> ExternalMCPManager:
|
||||
def dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
|
||||
"""
|
||||
|
||||
Dispatch an MCP tool call by name. Returns the result as a string.
|
||||
[C: tests/test_gemini_cli_edge_cases.py:test_gemini_cli_parameter_resilience, tests/test_mcp_client_beads.py:test_bd_mcp_tools, tests/test_mcp_ts_integration.py:test_ts_c_get_code_outline_dispatch, tests/test_mcp_ts_integration.py:test_ts_c_get_definition_dispatch, tests/test_mcp_ts_integration.py:test_ts_c_get_signature_dispatch, tests/test_mcp_ts_integration.py:test_ts_c_get_skeleton_dispatch, tests/test_mcp_ts_integration.py:test_ts_c_update_definition_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_get_code_outline_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_get_definition_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_get_signature_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_get_skeleton_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_update_definition_dispatch]
|
||||
|
||||
Dispatch an MCP tool call by name. Returns the result as a string.
|
||||
[C: tests/test_gemini_cli_edge_cases.py:test_gemini_cli_parameter_resilience, tests/test_mcp_client_beads.py:test_bd_mcp_tools, tests/test_mcp_ts_integration.py:test_ts_c_get_code_outline_dispatch, tests/test_mcp_ts_integration.py:test_ts_c_get_definition_dispatch, tests/test_mcp_ts_integration.py:test_ts_c_get_signature_dispatch, tests/test_mcp_ts_integration.py:test_ts_c_get_skeleton_dispatch, tests/test_mcp_ts_integration.py:test_ts_c_update_definition_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_get_code_outline_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_get_definition_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_get_signature_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_get_skeleton_dispatch, tests/test_mcp_ts_integration.py:test_ts_cpp_update_definition_dispatch, tests/test_py_struct_tools.py:test_mcp_dispatch_errors, tests/test_py_struct_tools.py:test_mcp_dispatch_integration]
|
||||
"""
|
||||
# Handle aliases
|
||||
path = str(tool_input.get("path", tool_input.get("file_path", tool_input.get("dir_path", ""))))
|
||||
@@ -1476,7 +1500,7 @@ def dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
|
||||
async def async_dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
|
||||
# Check native tools
|
||||
"""
|
||||
[C: src/rag_engine.py:RAGEngine._async_search_mcp, tests/test_external_mcp.py:test_external_mcp_real_process]
|
||||
[C: src/rag_engine.py:RAGEngine._async_search_mcp, tests/test_external_mcp.py:test_external_mcp_real_process]
|
||||
"""
|
||||
native_names = {t['name'] for t in MCP_TOOL_SPECS}
|
||||
if tool_name in native_names:
|
||||
@@ -1492,7 +1516,7 @@ async def async_dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
|
||||
|
||||
def get_tool_schemas() -> list[dict[str, Any]]:
|
||||
"""
|
||||
[C: tests/test_arch_boundary_phase2.py:TestArchBoundaryPhase2.test_mcp_client_dispatch_completeness, tests/test_external_mcp.py:test_get_tool_schemas_includes_external, tests/test_mcp_client_beads.py:test_bd_mcp_tools]
|
||||
[C: tests/test_arch_boundary_phase2.py:TestArchBoundaryPhase2.test_mcp_client_dispatch_completeness, tests/test_external_mcp.py:test_get_tool_schemas_includes_external, tests/test_mcp_client_beads.py:test_bd_mcp_tools]
|
||||
"""
|
||||
res = list(MCP_TOOL_SPECS)
|
||||
manager = get_external_mcp_manager()
|
||||
@@ -2283,4 +2307,4 @@ MCP_TOOL_SPECS: list[dict[str, Any]] = [
|
||||
}
|
||||
]
|
||||
|
||||
TOOL_NAMES: set[str] = {t['name'] for t in MCP_TOOL_SPECS}
|
||||
TOOL_NAMES: set[str] = {t['name'] for t in MCP_TOOL_SPECS}
|
||||
+150
-3
File diff suppressed because one or more lines are too long
@@ -48,7 +48,8 @@ from src.dag_engine import TrackDAG, ExecutionEngine
|
||||
class WorkerPool:
|
||||
"""
|
||||
|
||||
Manages a pool of worker threads with a concurrency limit.
|
||||
|
||||
Manages a pool of worker threads with a concurrency limit.
|
||||
"""
|
||||
def __init__(self, max_workers: int = 4):
|
||||
self.max_workers = max_workers
|
||||
@@ -59,8 +60,9 @@ class WorkerPool:
|
||||
def spawn(self, ticket_id: str, target: Callable, args: tuple) -> Optional[threading.Thread]:
|
||||
"""
|
||||
|
||||
Spawns a new worker thread if the pool is not full.
|
||||
Returns the thread object or None if full.
|
||||
|
||||
Spawns a new worker thread if the pool is not full.
|
||||
Returns the thread object or None if full.
|
||||
[C: tests/test_parallel_execution.py:test_worker_pool_completion_cleanup, tests/test_parallel_execution.py:test_worker_pool_limit, tests/test_parallel_execution.py:test_worker_pool_tracking]
|
||||
"""
|
||||
with self._lock:
|
||||
@@ -83,7 +85,7 @@ class WorkerPool:
|
||||
|
||||
def join_all(self, timeout: float = None) -> None:
|
||||
"""
|
||||
[C: tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_parallel_execution.py:test_worker_pool_limit, tests/test_parallel_execution.py:test_worker_pool_tracking]
|
||||
[C: tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_parallel_execution.py:test_worker_pool_limit, tests/test_parallel_execution.py:test_worker_pool_tracking]
|
||||
"""
|
||||
with self._lock:
|
||||
threads = list(self._active.values())
|
||||
@@ -94,21 +96,22 @@ class WorkerPool:
|
||||
|
||||
def get_active_count(self) -> int:
|
||||
"""
|
||||
[C: tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_parallel_execution.py:test_worker_pool_completion_cleanup, tests/test_parallel_execution.py:test_worker_pool_limit]
|
||||
[C: tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_parallel_execution.py:test_worker_pool_completion_cleanup, tests/test_parallel_execution.py:test_worker_pool_limit]
|
||||
"""
|
||||
with self._lock:
|
||||
return len(self._active)
|
||||
|
||||
def is_full(self) -> bool:
|
||||
"""
|
||||
[C: tests/test_parallel_execution.py:test_worker_pool_limit]
|
||||
[C: tests/test_parallel_execution.py:test_worker_pool_limit]
|
||||
"""
|
||||
return self.get_active_count() >= self.max_workers
|
||||
|
||||
class ConductorEngine:
|
||||
"""
|
||||
|
||||
Orchestrates the execution of tickets within a track.
|
||||
|
||||
Orchestrates the execution of tickets within a track.
|
||||
"""
|
||||
|
||||
def __init__(self, track: Track, event_queue: Optional[events.AsyncEventQueue] = None, auto_queue: bool = False) -> None:
|
||||
@@ -148,21 +151,24 @@ class ConductorEngine:
|
||||
|
||||
def pause(self) -> None:
|
||||
"""
|
||||
Pauses the pipeline execution.
|
||||
|
||||
Pauses the pipeline execution.
|
||||
[C: tests/test_pipeline_pause.py:test_pause_method, tests/test_pipeline_pause.py:test_resume_method]
|
||||
"""
|
||||
self._pause_event.set()
|
||||
|
||||
def resume(self) -> None:
|
||||
"""
|
||||
Resumes the pipeline execution.
|
||||
|
||||
Resumes the pipeline execution.
|
||||
[C: tests/test_pipeline_pause.py:test_resume_method]
|
||||
"""
|
||||
self._pause_event.clear()
|
||||
|
||||
def approve_task(self, task_id: str) -> None:
|
||||
"""
|
||||
Manually transition todo to in_progress and mark engine dirty.
|
||||
|
||||
Manually transition todo to in_progress and mark engine dirty.
|
||||
[C: tests/test_execution_engine.py:test_execution_engine_approve_task, tests/test_execution_engine.py:test_execution_engine_step_mode]
|
||||
"""
|
||||
self.engine.approve_task(task_id)
|
||||
@@ -170,7 +176,8 @@ class ConductorEngine:
|
||||
|
||||
def update_task_status(self, task_id: str, status: str) -> None:
|
||||
"""
|
||||
Force-update ticket status and mark engine dirty.
|
||||
|
||||
Force-update ticket status and mark engine dirty.
|
||||
[C: tests/test_arch_boundary_phase3.py:TestArchBoundaryPhase3.test_manual_unblock_restores_todo, tests/test_execution_engine.py:test_execution_engine_auto_queue, tests/test_execution_engine.py:test_execution_engine_basic_flow, tests/test_execution_engine.py:test_execution_engine_status_persistence, tests/test_execution_engine.py:test_execution_engine_update_nonexistent_task]
|
||||
"""
|
||||
self.engine.update_task_status(task_id, status)
|
||||
@@ -178,7 +185,8 @@ class ConductorEngine:
|
||||
|
||||
def kill_worker(self, ticket_id: str) -> None:
|
||||
"""
|
||||
Sets the abort event for a worker and attempts to join its thread.
|
||||
|
||||
Sets the abort event for a worker and attempts to join its thread.
|
||||
[C: tests/test_conductor_engine_abort.py:test_kill_worker_sets_abort_and_joins_thread]
|
||||
"""
|
||||
if ticket_id in self._abort_events:
|
||||
@@ -213,8 +221,9 @@ class ConductorEngine:
|
||||
def parse_json_tickets(self, json_str: str) -> None:
|
||||
"""
|
||||
|
||||
Parses a JSON string of ticket definitions (Godot ECS Flat List format)
|
||||
and populates the Track's ticket list.
|
||||
|
||||
Parses a JSON string of ticket definitions (Godot ECS Flat List format)
|
||||
and populates the Track's ticket list.
|
||||
[C: tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_orchestration_logic.py:test_conductor_engine_parse_json_tickets]
|
||||
"""
|
||||
try:
|
||||
@@ -244,11 +253,12 @@ class ConductorEngine:
|
||||
def run(self, md_content: str = "", max_ticks: Optional[int] = None) -> None:
|
||||
"""
|
||||
|
||||
Main execution loop using the DAG engine.
|
||||
Args:
|
||||
md_content: The full markdown context (history + files) for AI workers.
|
||||
max_ticks: Optional limit on number of iterations (for testing).
|
||||
[C: simulation/sim_base.py:run_sim, src/project_manager.py:get_git_commit, src/project_manager.py:get_git_log, src/rag_engine.py:RAGEngine._search_mcp, src/shell_runner.py:run_powershell, tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
|
||||
Main execution loop using the DAG engine.
|
||||
Args:
|
||||
md_content: The full markdown context (history + files) for AI workers.
|
||||
max_ticks: Optional limit on number of iterations (for testing).
|
||||
[C: simulation/sim_base.py:run_sim, src/project_manager.py:get_git_commit, src/rag_engine.py:RAGEngine._search_mcp, src/shell_runner.py:run_powershell, tests/conftest.py:kill_process_tree, tests/conftest.py:live_gui, tests/test_conductor_abort_event.py:test_conductor_abort_event_populated, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_extended_sims.py:test_ai_settings_sim_live, tests/test_extended_sims.py:test_context_sim_live, tests/test_extended_sims.py:test_execution_sim_live, tests/test_extended_sims.py:test_tools_sim_live, tests/test_external_editor_gui.py:get_vscode_processes, tests/test_external_editor_gui.py:test_vscode_launches_with_diff_view, tests/test_gui_custom_window.py:test_app_window_is_borderless, tests/test_headless_simulation.py:module, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_mock_gemini_cli.py:run_mock, tests/test_orchestration_logic.py:test_conductor_engine_run, tests/test_parallel_execution.py:test_conductor_engine_pool_integration, tests/test_sim_ai_settings.py:test_ai_settings_simulation_run, tests/test_sim_context.py:test_context_simulation_run, tests/test_sim_execution.py:test_execution_simulation_run, tests/test_sim_tools.py:test_tools_simulation_run]
|
||||
"""
|
||||
tick_count = 0
|
||||
while True:
|
||||
@@ -368,7 +378,8 @@ def _queue_put(event_queue: events.AsyncEventQueue, event_name: str, payload) ->
|
||||
def confirm_execution(payload: str, event_queue: events.AsyncEventQueue, ticket_id: str) -> bool:
|
||||
"""
|
||||
|
||||
Pushes an approval request to the GUI and waits for response.
|
||||
|
||||
Pushes an approval request to the GUI and waits for response.
|
||||
"""
|
||||
dialog_container = [None]
|
||||
task = {
|
||||
@@ -391,8 +402,9 @@ def confirm_execution(payload: str, event_queue: events.AsyncEventQueue, ticket_
|
||||
def confirm_spawn(role: str, prompt: str, context_md: str, event_queue: events.AsyncEventQueue, ticket_id: str) -> Tuple[bool, str, str]:
|
||||
"""
|
||||
|
||||
Pushes a spawn approval request to the GUI and waits for response.
|
||||
Returns (approved, modified_prompt, modified_context)
|
||||
|
||||
Pushes a spawn approval request to the GUI and waits for response.
|
||||
Returns (approved, modified_prompt, modified_context)
|
||||
[C: tests/test_spawn_interception_v2.py:run_confirm]
|
||||
"""
|
||||
dialog_container = [None]
|
||||
@@ -432,15 +444,16 @@ def confirm_spawn(role: str, prompt: str, context_md: str, event_queue: events.A
|
||||
def run_worker_lifecycle(ticket: Ticket, context: WorkerContext, context_files: List[str] | None = None, event_queue: events.AsyncEventQueue | None = None, engine: Optional['ConductorEngine'] = None, md_content: str = "") -> None:
|
||||
"""
|
||||
|
||||
Simulates the lifecycle of a single agent working on a ticket.
|
||||
Calls the AI client and updates the ticket status based on the response.
|
||||
Args:
|
||||
ticket: The ticket to process.
|
||||
context: The worker context.
|
||||
context_files: List of files to include in the context.
|
||||
event_queue: Queue for pushing state updates and receiving approvals.
|
||||
engine: The conductor engine.
|
||||
md_content: The markdown context (history + files) for AI workers.
|
||||
|
||||
Simulates the lifecycle of a single agent working on a ticket.
|
||||
Calls the AI client and updates the ticket status based on the response.
|
||||
Args:
|
||||
ticket: The ticket to process.
|
||||
context: The worker context.
|
||||
context_files: List of files to include in the context.
|
||||
event_queue: Queue for pushing state updates and receiving approvals.
|
||||
engine: The conductor engine.
|
||||
md_content: The markdown context (history + files) for AI workers.
|
||||
[C: tests/test_conductor_engine_v2.py:test_run_worker_lifecycle_calls_ai_client_send, tests/test_conductor_engine_v2.py:test_run_worker_lifecycle_context_injection, tests/test_conductor_engine_v2.py:test_run_worker_lifecycle_handles_blocked_response, tests/test_conductor_engine_v2.py:test_run_worker_lifecycle_pushes_response_via_queue, tests/test_conductor_engine_v2.py:test_run_worker_lifecycle_step_mode_confirmation, tests/test_conductor_engine_v2.py:test_run_worker_lifecycle_step_mode_rejection, tests/test_conductor_engine_v2.py:test_run_worker_lifecycle_token_usage_from_comms_log, tests/test_context_pruner.py:test_token_reduction_logging, tests/test_orchestration_logic.py:test_run_worker_lifecycle_blocked, tests/test_phase6_engine.py:test_worker_streaming_intermediate, tests/test_run_worker_lifecycle_abort.py:TestRunWorkerLifecycleAbort.test_run_worker_lifecycle_returns_early_on_abort, tests/test_spawn_interception_v2.py:test_run_worker_lifecycle_approved, tests/test_spawn_interception_v2.py:test_run_worker_lifecycle_rejected, tests/test_tiered_aggregation.py:test_run_worker_lifecycle_uses_strategy]
|
||||
"""
|
||||
# Enforce Context Amnesia: each ticket starts with a clean slate.
|
||||
@@ -645,4 +658,4 @@ def run_worker_lifecycle(ticket: Ticket, context: WorkerContext, context_files:
|
||||
|
||||
if event_queue:
|
||||
_queue_put(event_queue, "ticket_completed", {"ticket_id": ticket.id, "timestamp": time.time()})
|
||||
return response
|
||||
return response
|
||||
@@ -12,7 +12,8 @@ from src import paths
|
||||
def get_track_history_summary() -> str:
|
||||
"""
|
||||
|
||||
Scans conductor/archive/ and conductor/tracks/ to build a summary of past work.
|
||||
|
||||
Scans conductor/archive/ and conductor/tracks/ to build a summary of past work.
|
||||
[C: tests/test_orchestrator_pm_history.py:TestOrchestratorPMHistory.test_get_track_history_summary, tests/test_orchestrator_pm_history.py:TestOrchestratorPMHistory.test_get_track_history_summary_missing_files]
|
||||
"""
|
||||
summary_parts = []
|
||||
@@ -59,8 +60,9 @@ def get_track_history_summary() -> str:
|
||||
def generate_tracks(user_request: str, project_config: dict[str, Any], file_items: list[dict[str, Any]], history_summary: Optional[str] = None) -> list[dict[str, Any]]:
|
||||
"""
|
||||
|
||||
Tier 1 (Strategic PM) call.
|
||||
Analyzes the project state and user request to generate a list of Tracks.
|
||||
|
||||
Tier 1 (Strategic PM) call.
|
||||
Analyzes the project state and user request to generate a list of Tracks.
|
||||
[C: tests/test_orchestration_logic.py:test_generate_tracks, tests/test_orchestrator_pm.py:TestOrchestratorPM.test_generate_tracks_malformed_json, tests/test_orchestrator_pm.py:TestOrchestratorPM.test_generate_tracks_markdown_wrapped, tests/test_orchestrator_pm.py:TestOrchestratorPM.test_generate_tracks_success, tests/test_orchestrator_pm_history.py:TestOrchestratorPMHistory.test_generate_tracks_with_history]
|
||||
"""
|
||||
# 1. Build Repository Map (Summary View)
|
||||
@@ -128,4 +130,4 @@ if __name__ == "__main__":
|
||||
print("Testing Tier 1 Track Generation...")
|
||||
history = get_track_history_summary()
|
||||
tracks = generate_tracks("Implement a basic unit test for the ai_client.py module.", flat, file_items, history_summary=history)
|
||||
print(json.dumps(tracks, indent=2))
|
||||
print(json.dumps(tracks, indent=2))
|
||||
+7
-1
@@ -39,6 +39,9 @@ class CodeOutliner:
|
||||
pass
|
||||
|
||||
def outline(self, code: str) -> str:
|
||||
"""
|
||||
[C: tests/test_outline_tool.py:test_code_outliner_imgui_scopes, tests/test_outline_tool.py:test_code_outliner_nested_ifs, tests/test_outline_tool.py:test_code_outliner_type_hints]
|
||||
"""
|
||||
code = code.lstrip(chr(0xFEFF))
|
||||
try:
|
||||
tree = ast.parse(code)
|
||||
@@ -55,6 +58,9 @@ class CodeOutliner:
|
||||
|
||||
count = [0]
|
||||
def walk(node: ast.AST, indent: int = 0) -> None:
|
||||
"""
|
||||
[C: src/summarize.py:_summarise_python]
|
||||
"""
|
||||
count[0] += 1
|
||||
if count[0] > 100000:
|
||||
raise Exception("Infinite loop detected! " + str(type(node)))
|
||||
@@ -120,4 +126,4 @@ def get_outline(path: Path, code: str) -> str:
|
||||
outliner = CodeOutliner()
|
||||
return outliner.outline(code)
|
||||
else:
|
||||
return f"Outlining not supported for {suffix} files yet."
|
||||
return f"Outlining not supported for {suffix} files yet."
|
||||
+11
-11
@@ -17,7 +17,7 @@ class PatchModalManager:
|
||||
|
||||
def request_patch_approval(self, patch_text: str, file_paths: List[str], generated_by: str = "Tier 4 QA") -> bool:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_close_modal, tests/test_patch_modal.py:test_reject_patch, tests/test_patch_modal.py:test_request_patch_approval, tests/test_patch_modal.py:test_reset]
|
||||
[C: tests/test_patch_modal.py:test_close_modal, tests/test_patch_modal.py:test_reject_patch, tests/test_patch_modal.py:test_request_patch_approval, tests/test_patch_modal.py:test_reset]
|
||||
"""
|
||||
from time import time
|
||||
self._pending_patch = PendingPatch(
|
||||
@@ -31,31 +31,31 @@ class PatchModalManager:
|
||||
|
||||
def get_pending_patch(self) -> Optional[PendingPatch]:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_patch_modal_manager_init, tests/test_patch_modal.py:test_reject_patch, tests/test_patch_modal.py:test_request_patch_approval, tests/test_patch_modal.py:test_reset]
|
||||
[C: tests/test_patch_modal.py:test_patch_modal_manager_init, tests/test_patch_modal.py:test_reject_patch, tests/test_patch_modal.py:test_request_patch_approval, tests/test_patch_modal.py:test_reset]
|
||||
"""
|
||||
return self._pending_patch
|
||||
|
||||
def is_modal_shown(self) -> bool:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_close_modal, tests/test_patch_modal.py:test_patch_modal_manager_init, tests/test_patch_modal.py:test_reject_patch, tests/test_patch_modal.py:test_request_patch_approval, tests/test_patch_modal.py:test_reset]
|
||||
[C: tests/test_patch_modal.py:test_close_modal, tests/test_patch_modal.py:test_patch_modal_manager_init, tests/test_patch_modal.py:test_reject_patch, tests/test_patch_modal.py:test_request_patch_approval, tests/test_patch_modal.py:test_reset]
|
||||
"""
|
||||
return self._show_modal
|
||||
|
||||
def set_apply_callback(self, callback: Callable[[str], bool]) -> None:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_apply_callback, tests/test_patch_modal.py:test_reset]
|
||||
[C: tests/test_patch_modal.py:test_apply_callback, tests/test_patch_modal.py:test_reset]
|
||||
"""
|
||||
self._on_apply_callback = callback
|
||||
|
||||
def set_reject_callback(self, callback: Callable[[], None]) -> None:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_reject_callback, tests/test_patch_modal.py:test_reset]
|
||||
[C: tests/test_patch_modal.py:test_reject_callback, tests/test_patch_modal.py:test_reset]
|
||||
"""
|
||||
self._on_reject_callback = callback
|
||||
|
||||
def apply_patch(self, patch_text: str) -> bool:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_apply_callback]
|
||||
[C: tests/test_patch_modal.py:test_apply_callback]
|
||||
"""
|
||||
if self._on_apply_callback:
|
||||
return self._on_apply_callback(patch_text)
|
||||
@@ -63,7 +63,7 @@ class PatchModalManager:
|
||||
|
||||
def reject_patch(self) -> None:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_reject_callback, tests/test_patch_modal.py:test_reject_patch]
|
||||
[C: tests/test_patch_modal.py:test_reject_callback, tests/test_patch_modal.py:test_reject_patch]
|
||||
"""
|
||||
self._pending_patch = None
|
||||
self._show_modal = False
|
||||
@@ -72,13 +72,13 @@ class PatchModalManager:
|
||||
|
||||
def close_modal(self) -> None:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_close_modal]
|
||||
[C: tests/test_patch_modal.py:test_close_modal]
|
||||
"""
|
||||
self._show_modal = False
|
||||
|
||||
def reset(self) -> None:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_reset]
|
||||
[C: tests/test_patch_modal.py:test_reset]
|
||||
"""
|
||||
self._pending_patch = None
|
||||
self._show_modal = False
|
||||
@@ -89,7 +89,7 @@ _patch_modal_manager: Optional[PatchModalManager] = None
|
||||
|
||||
def get_patch_modal_manager() -> PatchModalManager:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_get_patch_modal_manager_singleton]
|
||||
[C: tests/test_patch_modal.py:test_get_patch_modal_manager_singleton]
|
||||
"""
|
||||
global _patch_modal_manager
|
||||
if _patch_modal_manager is None:
|
||||
@@ -98,7 +98,7 @@ def get_patch_modal_manager() -> PatchModalManager:
|
||||
|
||||
def reset_patch_modal_manager() -> None:
|
||||
"""
|
||||
[C: tests/test_patch_modal.py:test_get_patch_modal_manager_singleton]
|
||||
[C: tests/test_patch_modal.py:test_get_patch_modal_manager_singleton]
|
||||
"""
|
||||
global _patch_modal_manager
|
||||
if _patch_modal_manager:
|
||||
|
||||
+19
-15
@@ -49,60 +49,60 @@ _RESOLVED: dict[str, Path] = {}
|
||||
|
||||
def get_config_path() -> Path:
|
||||
"""
|
||||
[C: tests/test_paths.py:test_default_paths]
|
||||
[C: tests/test_paths.py:test_default_paths]
|
||||
"""
|
||||
root_dir = Path(__file__).resolve().parent.parent
|
||||
return Path(os.environ.get("SLOP_CONFIG", root_dir / "config.toml"))
|
||||
|
||||
def get_global_presets_path() -> Path:
|
||||
"""
|
||||
[C: src/presets.py:PresetManager.__init__, src/presets.py:PresetManager.delete_preset, src/presets.py:PresetManager.get_preset_scope]
|
||||
[C: src/presets.py:PresetManager.__init__, src/presets.py:PresetManager.delete_preset, src/presets.py:PresetManager.get_preset_scope]
|
||||
"""
|
||||
root_dir = Path(__file__).resolve().parent.parent
|
||||
return Path(os.environ.get("SLOP_GLOBAL_PRESETS", root_dir / "presets.toml"))
|
||||
|
||||
def get_project_presets_path(project_root: Path) -> Path:
|
||||
"""
|
||||
[C: src/presets.py:PresetManager.delete_preset, src/presets.py:PresetManager.get_preset_scope, src/presets.py:PresetManager.project_path]
|
||||
[C: src/presets.py:PresetManager.delete_preset, src/presets.py:PresetManager.get_preset_scope, src/presets.py:PresetManager.project_path]
|
||||
"""
|
||||
return project_root / "project_presets.toml"
|
||||
|
||||
def get_global_tool_presets_path() -> Path:
|
||||
"""
|
||||
[C: src/tool_presets.py:ToolPresetManager._get_path, src/tool_presets.py:ToolPresetManager.load_all_bias_profiles, src/tool_presets.py:ToolPresetManager.load_all_presets]
|
||||
[C: src/tool_presets.py:ToolPresetManager._get_path, src/tool_presets.py:ToolPresetManager.load_all_bias_profiles, src/tool_presets.py:ToolPresetManager.load_all_presets]
|
||||
"""
|
||||
root_dir = Path(__file__).resolve().parent.parent
|
||||
return Path(os.environ.get("SLOP_GLOBAL_TOOL_PRESETS", root_dir / "tool_presets.toml"))
|
||||
|
||||
def get_project_tool_presets_path(project_root: Path) -> Path:
|
||||
"""
|
||||
[C: src/tool_presets.py:ToolPresetManager._get_path, src/tool_presets.py:ToolPresetManager.load_all_bias_profiles, src/tool_presets.py:ToolPresetManager.load_all_presets]
|
||||
[C: src/tool_presets.py:ToolPresetManager._get_path, src/tool_presets.py:ToolPresetManager.load_all_bias_profiles, src/tool_presets.py:ToolPresetManager.load_all_presets]
|
||||
"""
|
||||
return project_root / "project_tool_presets.toml"
|
||||
|
||||
def get_global_personas_path() -> Path:
|
||||
"""
|
||||
[C: src/personas.py:PersonaManager._get_path, src/personas.py:PersonaManager.get_persona_scope, src/personas.py:PersonaManager.load_all]
|
||||
[C: src/personas.py:PersonaManager._get_path, src/personas.py:PersonaManager.get_persona_scope, src/personas.py:PersonaManager.load_all]
|
||||
"""
|
||||
root_dir = Path(__file__).resolve().parent.parent
|
||||
return Path(os.environ.get("SLOP_GLOBAL_PERSONAS", root_dir / "personas.toml"))
|
||||
|
||||
def get_project_personas_path(project_root: Path) -> Path:
|
||||
"""
|
||||
[C: src/personas.py:PersonaManager._get_path, src/personas.py:PersonaManager.get_persona_scope, src/personas.py:PersonaManager.load_all]
|
||||
[C: src/personas.py:PersonaManager._get_path, src/personas.py:PersonaManager.get_persona_scope, src/personas.py:PersonaManager.load_all]
|
||||
"""
|
||||
return project_root / "project_personas.toml"
|
||||
|
||||
def get_global_workspace_profiles_path() -> Path:
|
||||
"""
|
||||
[C: src/workspace_manager.py:WorkspaceManager._get_path, src/workspace_manager.py:WorkspaceManager.load_all_profiles]
|
||||
[C: src/workspace_manager.py:WorkspaceManager._get_path, src/workspace_manager.py:WorkspaceManager.load_all_profiles]
|
||||
"""
|
||||
root_dir = Path(__file__).resolve().parent.parent
|
||||
return Path(os.environ.get("SLOP_GLOBAL_WORKSPACE_PROFILES", root_dir / "workspace_profiles.toml"))
|
||||
|
||||
def get_project_workspace_profiles_path(project_root: Path) -> Path:
|
||||
"""
|
||||
[C: src/workspace_manager.py:WorkspaceManager._get_path, src/workspace_manager.py:WorkspaceManager.load_all_profiles]
|
||||
[C: src/workspace_manager.py:WorkspaceManager._get_path, src/workspace_manager.py:WorkspaceManager.load_all_profiles]
|
||||
"""
|
||||
return project_root / ".ai" / "workspace_profiles.toml"
|
||||
|
||||
@@ -143,7 +143,7 @@ def _get_project_conductor_dir_from_toml(project_root: Path) -> Optional[Path]:
|
||||
|
||||
def get_conductor_dir(project_path: Optional[str] = None) -> Path:
|
||||
"""
|
||||
[C: tests/test_paths.py:test_conductor_dir_project_relative, tests/test_project_paths.py:test_get_conductor_dir_default, tests/test_project_paths.py:test_get_conductor_dir_project_specific_with_toml]
|
||||
[C: tests/test_paths.py:test_conductor_dir_project_relative, tests/test_project_paths.py:test_get_conductor_dir_default, tests/test_project_paths.py:test_get_conductor_dir_project_specific_with_toml]
|
||||
"""
|
||||
if not project_path:
|
||||
# Fallback for legacy/tests, but we should avoid this
|
||||
@@ -157,7 +157,7 @@ def get_conductor_dir(project_path: Optional[str] = None) -> Path:
|
||||
|
||||
def get_logs_dir() -> Path:
|
||||
"""
|
||||
[C: src/session_logger.py:close_session, src/session_logger.py:open_session, tests/test_paths.py:test_config_overrides, tests/test_paths.py:test_default_paths, tests/test_paths.py:test_env_var_overrides, tests/test_paths.py:test_precedence]
|
||||
[C: src/session_logger.py:close_session, src/session_logger.py:open_session, tests/test_paths.py:test_config_overrides, tests/test_paths.py:test_default_paths, tests/test_paths.py:test_env_var_overrides, tests/test_paths.py:test_precedence]
|
||||
"""
|
||||
if "logs_dir" not in _RESOLVED:
|
||||
_RESOLVED["logs_dir"] = _resolve_path("SLOP_LOGS_DIR", "logs_dir", "logs/sessions")
|
||||
@@ -165,7 +165,7 @@ def get_logs_dir() -> Path:
|
||||
|
||||
def get_scripts_dir() -> Path:
|
||||
"""
|
||||
[C: src/session_logger.py:log_tool_call, src/session_logger.py:open_session, tests/test_paths.py:test_config_overrides, tests/test_paths.py:test_default_paths]
|
||||
[C: src/session_logger.py:log_tool_call, src/session_logger.py:open_session, tests/test_paths.py:test_config_overrides, tests/test_paths.py:test_default_paths]
|
||||
"""
|
||||
if "scripts_dir" not in _RESOLVED:
|
||||
_RESOLVED["scripts_dir"] = _resolve_path("SLOP_SCRIPTS_DIR", "scripts_dir", "scripts/generated")
|
||||
@@ -173,17 +173,20 @@ def get_scripts_dir() -> Path:
|
||||
|
||||
def get_tracks_dir(project_path: Optional[str] = None) -> Path:
|
||||
"""
|
||||
[C: src/project_manager.py:get_all_tracks, tests/test_paths.py:test_conductor_dir_project_relative]
|
||||
[C: src/project_manager.py:get_all_tracks, tests/test_paths.py:test_conductor_dir_project_relative]
|
||||
"""
|
||||
return get_conductor_dir(project_path) / "tracks"
|
||||
|
||||
def get_track_state_dir(track_id: str, project_path: Optional[str] = None) -> Path:
|
||||
"""
|
||||
[C: src/project_manager.py:load_track_state, src/project_manager.py:save_track_state, tests/test_paths.py:test_conductor_dir_project_relative]
|
||||
[C: src/project_manager.py:load_track_state, src/project_manager.py:save_track_state, tests/test_paths.py:test_conductor_dir_project_relative]
|
||||
"""
|
||||
return get_tracks_dir(project_path) / track_id
|
||||
|
||||
def get_archive_dir(project_path: Optional[str] = None) -> Path:
|
||||
"""
|
||||
[C: tests/test_paths.py:test_conductor_dir_project_relative]
|
||||
"""
|
||||
return get_conductor_dir(project_path) / "archive"
|
||||
|
||||
def _resolve_path_info(env_var: str, config_key: str, default: str) -> dict[str, Any]:
|
||||
@@ -210,7 +213,8 @@ def get_full_path_info() -> dict[str, dict[str, Any]]:
|
||||
|
||||
def reset_resolved() -> None:
|
||||
"""
|
||||
For testing only - clear cached resolutions.
|
||||
|
||||
For testing only - clear cached resolutions.
|
||||
[C: tests/conftest.py:reset_paths, tests/test_app_controller_offloading.py:tmp_session_dir, tests/test_gui_phase3.py:test_conductor_setup_scan, tests/test_paths.py:reset_paths, tests/test_project_paths.py:test_get_all_tracks_project_specific, tests/test_project_paths.py:test_get_conductor_dir_default, tests/test_project_paths.py:test_get_conductor_dir_project_specific_with_toml]
|
||||
"""
|
||||
_RESOLVED.clear()
|
||||
+17
-13
@@ -77,7 +77,7 @@ class PerformanceScope:
|
||||
|
||||
def get_monitor() -> PerformanceMonitor:
|
||||
"""
|
||||
[C: tests/test_perf_aggregate.py:test_build_tier3_context_scaling, tests/test_perf_dag.py:test_dag_performance]
|
||||
[C: tests/test_perf_aggregate.py:test_build_tier3_context_scaling, tests/test_perf_dag.py:test_dag_performance]
|
||||
"""
|
||||
global _instance
|
||||
if _instance is None:
|
||||
@@ -87,8 +87,9 @@ def get_monitor() -> PerformanceMonitor:
|
||||
class PerformanceMonitor:
|
||||
"""
|
||||
|
||||
Tracks application performance metrics like FPS, frame time, and CPU usage.
|
||||
Supports thread-safe tracking for individual components with efficient moving averages.
|
||||
|
||||
Tracks application performance metrics like FPS, frame time, and CPU usage.
|
||||
Supports thread-safe tracking for individual components with efficient moving averages.
|
||||
"""
|
||||
def __init__(self, history_size: int = 300) -> None:
|
||||
self.enabled: bool = False
|
||||
@@ -160,7 +161,7 @@ class PerformanceMonitor:
|
||||
|
||||
def start_frame(self) -> None:
|
||||
"""
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_basic_timing]
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_basic_timing]
|
||||
"""
|
||||
now = time.perf_counter()
|
||||
with self._lock:
|
||||
@@ -174,7 +175,7 @@ class PerformanceMonitor:
|
||||
|
||||
def end_frame(self) -> None:
|
||||
"""
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_basic_timing]
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_basic_timing]
|
||||
"""
|
||||
if self._start_time is None:
|
||||
return
|
||||
@@ -205,7 +206,7 @@ class PerformanceMonitor:
|
||||
|
||||
def start_component(self, name: str) -> None:
|
||||
"""
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics]
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics]
|
||||
"""
|
||||
if not self.enabled: return
|
||||
now = time.perf_counter()
|
||||
@@ -214,7 +215,7 @@ class PerformanceMonitor:
|
||||
|
||||
def end_component(self, name: str) -> None:
|
||||
"""
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics]
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics]
|
||||
"""
|
||||
if not self.enabled: return
|
||||
now = time.perf_counter()
|
||||
@@ -233,7 +234,8 @@ class PerformanceMonitor:
|
||||
|
||||
def get_metrics(self) -> dict[str, float]:
|
||||
"""
|
||||
Returns current metrics and their moving averages. Thread-safe.
|
||||
|
||||
Returns current metrics and their moving averages. Thread-safe.
|
||||
[C: tests/test_perf_aggregate.py:test_build_tier3_context_scaling, tests/test_perf_dag.py:test_dag_performance, tests/test_performance_monitor.py:test_perf_monitor_basic_timing, tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager]
|
||||
"""
|
||||
with self._lock:
|
||||
@@ -269,8 +271,9 @@ class PerformanceMonitor:
|
||||
|
||||
def get_history(self, key: str) -> List[float]:
|
||||
"""
|
||||
Returns a snapshot of the full history buffer for a specific metric key.
|
||||
[C: tests/test_history.py:test_initial_state, tests/test_history.py:test_push_state]
|
||||
|
||||
Returns a snapshot of the full history buffer for a specific metric key.
|
||||
[C: tests/test_history.py:test_initial_state, tests/test_history.py:test_push_state, tests/test_history_manager.py:TestHistoryManager.test_get_history_returns_descriptions]
|
||||
"""
|
||||
with self._lock:
|
||||
if key in self._history:
|
||||
@@ -281,15 +284,16 @@ class PerformanceMonitor:
|
||||
|
||||
def scope(self, name: str) -> PerformanceScope:
|
||||
"""
|
||||
Returns a context manager for timing a component.
|
||||
|
||||
Returns a context manager for timing a component.
|
||||
[C: tests/test_perf_aggregate.py:test_build_tier3_context_scaling, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager]
|
||||
"""
|
||||
return PerformanceScope(self, name)
|
||||
|
||||
def stop(self) -> None:
|
||||
"""
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_basic_timing, tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
[C: tests/test_performance_monitor.py:test_perf_monitor_basic_timing, tests/test_performance_monitor.py:test_perf_monitor_component_timing, tests/test_performance_monitor.py:test_perf_monitor_extended_metrics, tests/test_performance_monitor.py:test_perf_monitor_scope_context_manager, tests/test_websocket_server.py:test_websocket_subscription_and_broadcast]
|
||||
"""
|
||||
self._stop_event.set()
|
||||
if self._cpu_thread.is_alive():
|
||||
self._cpu_thread.join(timeout=2.0)
|
||||
self._cpu_thread.join(timeout=2.0)
|
||||
+8
-7
@@ -13,7 +13,7 @@ class PersonaManager:
|
||||
|
||||
def _get_path(self, scope: str) -> Path:
|
||||
"""
|
||||
[C: src/tool_presets.py:ToolPresetManager.delete_bias_profile, src/tool_presets.py:ToolPresetManager.delete_preset, src/tool_presets.py:ToolPresetManager.save_bias_profile, src/tool_presets.py:ToolPresetManager.save_preset, src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
[C: src/tool_presets.py:ToolPresetManager.delete_bias_profile, src/tool_presets.py:ToolPresetManager.delete_preset, src/tool_presets.py:ToolPresetManager.save_bias_profile, src/tool_presets.py:ToolPresetManager.save_preset, src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
"""
|
||||
if scope == "global":
|
||||
return paths.get_global_personas_path()
|
||||
@@ -26,7 +26,8 @@ class PersonaManager:
|
||||
|
||||
def load_all(self) -> Dict[str, Persona]:
|
||||
"""
|
||||
Merges global and project personas into a single dictionary.
|
||||
|
||||
Merges global and project personas into a single dictionary.
|
||||
[C: tests/test_persona_manager.py:test_delete_persona, tests/test_persona_manager.py:test_load_all_merged, tests/test_persona_manager.py:test_save_persona, tests/test_preset_manager.py:test_delete_preset, tests/test_preset_manager.py:test_load_all_merged, tests/test_preset_manager.py:test_save_preset_global, tests/test_preset_manager.py:test_save_preset_project, tests/test_presets.py:TestPresetManager.test_delete_preset, tests/test_presets.py:TestPresetManager.test_project_overwrites_global, tests/test_presets.py:TestPresetManager.test_save_and_load_global, tests/test_presets.py:TestPresetManager.test_save_and_load_project]
|
||||
"""
|
||||
personas = {}
|
||||
@@ -46,7 +47,7 @@ class PersonaManager:
|
||||
|
||||
def save_persona(self, persona: Persona, scope: str = "project") -> None:
|
||||
"""
|
||||
[C: tests/test_persona_manager.py:test_save_persona]
|
||||
[C: tests/test_persona_manager.py:test_save_persona]
|
||||
"""
|
||||
path = self._get_path(scope)
|
||||
data = self._load_file(path)
|
||||
@@ -73,7 +74,7 @@ class PersonaManager:
|
||||
|
||||
def delete_persona(self, name: str, scope: str = "project") -> None:
|
||||
"""
|
||||
[C: tests/test_persona_manager.py:test_delete_persona]
|
||||
[C: tests/test_persona_manager.py:test_delete_persona]
|
||||
"""
|
||||
path = self._get_path(scope)
|
||||
data = self._load_file(path)
|
||||
@@ -83,7 +84,7 @@ class PersonaManager:
|
||||
|
||||
def _load_file(self, path: Path) -> Dict[str, Any]:
|
||||
"""
|
||||
[C: src/presets.py:PresetManager.delete_preset, src/presets.py:PresetManager.get_preset_scope, src/presets.py:PresetManager.load_all, src/presets.py:PresetManager.save_preset, src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.load_all_profiles, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
[C: src/presets.py:PresetManager.delete_preset, src/presets.py:PresetManager.get_preset_scope, src/presets.py:PresetManager.load_all, src/presets.py:PresetManager.save_preset, src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.load_all_profiles, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
"""
|
||||
if not path.exists():
|
||||
return {}
|
||||
@@ -95,8 +96,8 @@ class PersonaManager:
|
||||
|
||||
def _save_file(self, path: Path, data: Dict[str, Any]) -> None:
|
||||
"""
|
||||
[C: src/presets.py:PresetManager.delete_preset, src/presets.py:PresetManager.save_preset, src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
[C: src/presets.py:PresetManager.delete_preset, src/presets.py:PresetManager.save_preset, src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
"""
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(path, "wb") as f:
|
||||
tomli_w.dump(data, f)
|
||||
tomli_w.dump(data, f)
|
||||
+7
-5
@@ -19,7 +19,8 @@ class PresetManager:
|
||||
|
||||
def load_all(self) -> Dict[str, Preset]:
|
||||
"""
|
||||
Merges global and project presets into a single dictionary.
|
||||
|
||||
Merges global and project presets into a single dictionary.
|
||||
[C: tests/test_persona_manager.py:test_delete_persona, tests/test_persona_manager.py:test_load_all_merged, tests/test_persona_manager.py:test_save_persona, tests/test_preset_manager.py:test_delete_preset, tests/test_preset_manager.py:test_load_all_merged, tests/test_preset_manager.py:test_save_preset_global, tests/test_preset_manager.py:test_save_preset_project, tests/test_presets.py:TestPresetManager.test_delete_preset, tests/test_presets.py:TestPresetManager.test_project_overwrites_global, tests/test_presets.py:TestPresetManager.test_save_and_load_global, tests/test_presets.py:TestPresetManager.test_save_and_load_project]
|
||||
"""
|
||||
presets: Dict[str, Preset] = {}
|
||||
@@ -45,7 +46,8 @@ class PresetManager:
|
||||
|
||||
def save_preset(self, preset: Preset, scope: str = "project") -> None:
|
||||
"""
|
||||
Saves a preset to either the global or project-specific TOML file.
|
||||
|
||||
Saves a preset to either the global or project-specific TOML file.
|
||||
[C: tests/test_preset_manager.py:test_save_preset_global, tests/test_preset_manager.py:test_save_preset_project, tests/test_preset_manager.py:test_save_preset_project_no_root, tests/test_presets.py:TestPresetManager.test_delete_preset, tests/test_presets.py:TestPresetManager.test_project_overwrites_global, tests/test_presets.py:TestPresetManager.test_save_and_load_global, tests/test_presets.py:TestPresetManager.test_save_and_load_project]
|
||||
"""
|
||||
path = self.global_path if scope == "global" else self.project_path
|
||||
@@ -63,7 +65,7 @@ class PresetManager:
|
||||
|
||||
def delete_preset(self, name: str, scope: str) -> None:
|
||||
"""
|
||||
[C: tests/test_preset_manager.py:test_delete_preset, tests/test_presets.py:TestPresetManager.test_delete_preset]
|
||||
[C: tests/test_preset_manager.py:test_delete_preset, tests/test_presets.py:TestPresetManager.test_delete_preset]
|
||||
"""
|
||||
if scope == "project" and self.project_root:
|
||||
path = get_project_presets_path(self.project_root)
|
||||
@@ -92,7 +94,7 @@ class PresetManager:
|
||||
|
||||
def _load_file(self, path: Path) -> Dict[str, Any]:
|
||||
"""
|
||||
[C: src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.load_all_profiles, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
[C: src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.load_all_profiles, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
"""
|
||||
if not path.exists():
|
||||
return {"presets": {}}
|
||||
@@ -110,7 +112,7 @@ class PresetManager:
|
||||
|
||||
def _save_file(self, path: Path, data: Dict[str, Any]) -> None:
|
||||
"""
|
||||
[C: src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
[C: src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
"""
|
||||
if path.parent.exists() and path.parent.is_file():
|
||||
raise ValueError(f"Cannot save to {path}: Parent directory {path.parent} is a file.")
|
||||
|
||||
+48
-30
@@ -30,7 +30,8 @@ def parse_ts(s: str) -> Optional[datetime.datetime]:
|
||||
|
||||
def entry_to_str(entry: dict[str, Any]) -> str:
|
||||
"""
|
||||
Serialise a disc entry dict -> stored string.
|
||||
|
||||
Serialise a disc entry dict -> stored string.
|
||||
[C: tests/test_thinking_persistence.py:test_entry_to_str_with_thinking]
|
||||
"""
|
||||
ts = entry.get("ts", "")
|
||||
@@ -50,7 +51,8 @@ def entry_to_str(entry: dict[str, Any]) -> str:
|
||||
|
||||
def str_to_entry(raw: str, roles: list[str]) -> dict[str, Any]:
|
||||
"""
|
||||
Parse a stored string back to a disc entry dict.
|
||||
|
||||
Parse a stored string back to a disc entry dict.
|
||||
[C: tests/test_thinking_persistence.py:test_str_to_entry_with_thinking]
|
||||
"""
|
||||
ts = ""
|
||||
@@ -91,13 +93,13 @@ def get_git_commit(git_dir: str) -> str:
|
||||
|
||||
def default_discussion() -> dict[str, Any]:
|
||||
"""
|
||||
[C: tests/test_discussion_takes.py:TestDiscussionTakes.test_promote_take_renames_discussion]
|
||||
[C: tests/test_discussion_takes.py:TestDiscussionTakes.test_promote_take_renames_discussion]
|
||||
"""
|
||||
return {"git_commit": "", "last_updated": now_ts(), "history": []}
|
||||
|
||||
def default_project(name: str = "unnamed") -> dict[str, Any]:
|
||||
"""
|
||||
[C: tests/test_deepseek_infra.py:test_default_project_includes_reasoning_role, tests/test_discussion_takes.py:TestDiscussionTakes.setUp, tests/test_history_management.py:test_history_persistence_across_turns, tests/test_history_management.py:test_save_separation, tests/test_project_manager_modes.py:test_default_project_execution_mode, tests/test_project_manager_modes.py:test_load_save_execution_mode, tests/test_project_serialization.py:TestProjectSerialization.test_default_roles_include_context, tests/test_project_serialization.py:TestProjectSerialization.test_fileitem_roundtrip]
|
||||
[C: tests/test_deepseek_infra.py:test_default_project_includes_reasoning_role, tests/test_discussion_takes.py:TestDiscussionTakes.setUp, tests/test_history_management.py:test_history_persistence_across_turns, tests/test_history_management.py:test_save_separation, tests/test_project_manager_modes.py:test_default_project_execution_mode, tests/test_project_manager_modes.py:test_load_save_execution_mode, tests/test_project_serialization.py:TestProjectSerialization.test_default_roles_include_context, tests/test_project_serialization.py:TestProjectSerialization.test_fileitem_roundtrip]
|
||||
"""
|
||||
return {
|
||||
"project": {"name": name, "git_dir": "", "system_prompt": "", "execution_mode": "native"},
|
||||
@@ -152,7 +154,8 @@ def default_project(name: str = "unnamed") -> dict[str, Any]:
|
||||
|
||||
def get_history_path(project_path: Union[str, Path]) -> Path:
|
||||
"""
|
||||
Return the Path to the sibling history TOML file for a given project.
|
||||
|
||||
Return the Path to the sibling history TOML file for a given project.
|
||||
[C: tests/test_history_management.py:test_save_separation]
|
||||
"""
|
||||
p = Path(project_path)
|
||||
@@ -161,8 +164,9 @@ def get_history_path(project_path: Union[str, Path]) -> Path:
|
||||
def load_project(path: Union[str, Path]) -> dict[str, Any]:
|
||||
"""
|
||||
|
||||
Load a project TOML file.
|
||||
Automatically migrates legacy 'discussion' keys to a sibling history file.
|
||||
|
||||
Load a project TOML file.
|
||||
Automatically migrates legacy 'discussion' keys to a sibling history file.
|
||||
[C: tests/test_history_management.py:test_history_persistence_across_turns, tests/test_history_management.py:test_migration_on_load, tests/test_project_manager_modes.py:test_load_save_execution_mode, tests/test_project_serialization.py:TestProjectSerialization.test_backward_compatibility_strings, tests/test_project_serialization.py:TestProjectSerialization.test_fileitem_roundtrip]
|
||||
"""
|
||||
with open(path, "rb") as f:
|
||||
@@ -185,7 +189,8 @@ def load_project(path: Union[str, Path]) -> dict[str, Any]:
|
||||
|
||||
def load_history(project_path: Union[str, Path]) -> dict[str, Any]:
|
||||
"""
|
||||
Load the segregated discussion history from its dedicated TOML file.
|
||||
|
||||
Load the segregated discussion history from its dedicated TOML file.
|
||||
[C: tests/test_thinking_persistence.py:test_save_and_load_history_with_thinking_segments]
|
||||
"""
|
||||
hist_path = get_history_path(project_path)
|
||||
@@ -196,7 +201,8 @@ def load_history(project_path: Union[str, Path]) -> dict[str, Any]:
|
||||
|
||||
def clean_nones(data: Any) -> Any:
|
||||
"""
|
||||
Recursively remove None values from a dictionary/list.
|
||||
|
||||
Recursively remove None values from a dictionary/list.
|
||||
[C: tests/test_thinking_persistence.py:test_clean_nones_removes_thinking]
|
||||
"""
|
||||
if isinstance(data, dict):
|
||||
@@ -208,8 +214,9 @@ def clean_nones(data: Any) -> Any:
|
||||
def save_project(proj: dict[str, Any], path: Union[str, Path], disc_data: Optional[dict[str, Any]] = None) -> None:
|
||||
"""
|
||||
|
||||
Save the project TOML.
|
||||
If 'discussion' is present in proj, it is moved to the sibling history file.
|
||||
|
||||
Save the project TOML.
|
||||
If 'discussion' is present in proj, it is moved to the sibling history file.
|
||||
[C: tests/test_history_management.py:test_history_persistence_across_turns, tests/test_history_management.py:test_save_separation, tests/test_project_manager_modes.py:test_load_save_execution_mode, tests/test_project_serialization.py:TestProjectSerialization.test_fileitem_roundtrip, tests/test_thinking_persistence.py:test_save_and_load_history_with_thinking_segments]
|
||||
"""
|
||||
proj = clean_nones(proj)
|
||||
@@ -270,7 +277,8 @@ def flat_config(proj: dict[str, Any], disc_name: Optional[str] = None, track_id:
|
||||
|
||||
def save_context_preset(project_dict: dict, preset_name: str, files: list[str], screenshots: list[str]) -> None:
|
||||
"""
|
||||
Save a named context preset (files + screenshots) into the project dict.
|
||||
|
||||
Save a named context preset (files + screenshots) into the project dict.
|
||||
[C: tests/test_context_presets.py:test_save_context_preset]
|
||||
"""
|
||||
if "context_presets" not in project_dict:
|
||||
@@ -282,7 +290,8 @@ def save_context_preset(project_dict: dict, preset_name: str, files: list[str],
|
||||
|
||||
def load_context_preset(project_dict: dict, preset_name: str) -> dict:
|
||||
"""
|
||||
Return the files and screenshots for a named preset.
|
||||
|
||||
Return the files and screenshots for a named preset.
|
||||
[C: tests/test_context_presets.py:test_load_context_preset, tests/test_context_presets.py:test_load_nonexistent_preset]
|
||||
"""
|
||||
if "context_presets" not in project_dict or preset_name not in project_dict["context_presets"]:
|
||||
@@ -291,7 +300,8 @@ def load_context_preset(project_dict: dict, preset_name: str) -> dict:
|
||||
|
||||
def delete_context_preset(project_dict: dict, preset_name: str) -> None:
|
||||
"""
|
||||
Remove a named preset if it exists.
|
||||
|
||||
Remove a named preset if it exists.
|
||||
[C: tests/test_context_presets.py:test_delete_context_preset, tests/test_context_presets.py:test_delete_nonexistent_preset_no_error]
|
||||
"""
|
||||
if "context_presets" in project_dict:
|
||||
@@ -301,7 +311,8 @@ def delete_context_preset(project_dict: dict, preset_name: str) -> None:
|
||||
def save_track_state(track_id: str, state: 'TrackState', base_dir: Union[str, Path] = ".") -> None:
|
||||
"""
|
||||
|
||||
Saves a TrackState object to conductor/tracks/<track_id>/state.toml.
|
||||
|
||||
Saves a TrackState object to conductor/tracks/<track_id>/state.toml.
|
||||
[C: tests/test_project_manager_tracks.py:test_get_all_tracks_with_state, tests/test_track_state_persistence.py:test_track_state_persistence]
|
||||
"""
|
||||
track_dir = paths.get_track_state_dir(track_id, project_path=str(base_dir))
|
||||
@@ -314,7 +325,8 @@ def save_track_state(track_id: str, state: 'TrackState', base_dir: Union[str, Pa
|
||||
def load_track_state(track_id: str, base_dir: Union[str, Path] = ".") -> Optional['TrackState']:
|
||||
"""
|
||||
|
||||
Loads a TrackState object from conductor/tracks/<track_id>/state.toml.
|
||||
|
||||
Loads a TrackState object from conductor/tracks/<track_id>/state.toml.
|
||||
[C: tests/test_track_state_persistence.py:test_track_state_persistence]
|
||||
"""
|
||||
from src.models import TrackState
|
||||
@@ -328,8 +340,9 @@ def load_track_state(track_id: str, base_dir: Union[str, Path] = ".") -> Optiona
|
||||
def load_track_history(track_id: str, base_dir: Union[str, Path] = ".") -> list[str]:
|
||||
"""
|
||||
|
||||
Loads the discussion history for a specific track from its state.toml.
|
||||
Returns a list of entry strings formatted with @timestamp.
|
||||
|
||||
Loads the discussion history for a specific track from its state.toml.
|
||||
Returns a list of entry strings formatted with @timestamp.
|
||||
"""
|
||||
state = load_track_state(track_id, base_dir)
|
||||
if not state:
|
||||
@@ -346,8 +359,9 @@ def load_track_history(track_id: str, base_dir: Union[str, Path] = ".") -> list[
|
||||
def save_track_history(track_id: str, history: list[str], base_dir: Union[str, Path] = ".") -> None:
|
||||
"""
|
||||
|
||||
Saves the discussion history for a specific track to its state.toml.
|
||||
'history' is expected to be a list of formatted strings.
|
||||
|
||||
Saves the discussion history for a specific track to its state.toml.
|
||||
'history' is expected to be a list of formatted strings.
|
||||
"""
|
||||
state = load_track_state(track_id, base_dir)
|
||||
if not state:
|
||||
@@ -360,11 +374,12 @@ def save_track_history(track_id: str, history: list[str], base_dir: Union[str, P
|
||||
def get_all_tracks(base_dir: Union[str, Path] = ".") -> list[dict[str, Any]]:
|
||||
"""
|
||||
|
||||
Scans the conductor/tracks/ directory and returns a list of dictionaries
|
||||
containing track metadata: 'id', 'title', 'status', 'complete', 'total',
|
||||
and 'progress' (0.0 to 1.0).
|
||||
Handles missing or malformed metadata.json or state.toml by falling back
|
||||
to available info or defaults.
|
||||
|
||||
Scans the conductor/tracks/ directory and returns a list of dictionaries
|
||||
containing track metadata: 'id', 'title', 'status', 'complete', 'total',
|
||||
and 'progress' (0.0 to 1.0).
|
||||
Handles missing or malformed metadata.json or state.toml by falling back
|
||||
to available info or defaults.
|
||||
[C: tests/test_project_manager_tracks.py:test_get_all_tracks_empty, tests/test_project_manager_tracks.py:test_get_all_tracks_malformed, tests/test_project_manager_tracks.py:test_get_all_tracks_with_metadata_json, tests/test_project_manager_tracks.py:test_get_all_tracks_with_state, tests/test_project_paths.py:test_get_all_tracks_project_specific]
|
||||
"""
|
||||
tracks_dir = paths.get_tracks_dir(project_path=str(base_dir))
|
||||
@@ -428,8 +443,9 @@ def get_all_tracks(base_dir: Union[str, Path] = ".") -> list[dict[str, Any]]:
|
||||
def calculate_track_progress(tickets: list) -> dict:
|
||||
"""
|
||||
|
||||
Calculates track progress based on ticket statuses.
|
||||
percentage (float), completed (int), total (int), in_progress (int), blocked (int), todo (int)
|
||||
|
||||
Calculates track progress based on ticket statuses.
|
||||
percentage (float), completed (int), total (int), in_progress (int), blocked (int), todo (int)
|
||||
[C: tests/test_progress_viz.py:test_calculate_track_progress_all_completed, tests/test_progress_viz.py:test_calculate_track_progress_all_todo, tests/test_progress_viz.py:test_calculate_track_progress_empty, tests/test_progress_viz.py:test_calculate_track_progress_mixed]
|
||||
"""
|
||||
total = len(tickets)
|
||||
@@ -463,8 +479,9 @@ def calculate_track_progress(tickets: list) -> dict:
|
||||
def branch_discussion(project_dict: dict, source_id: str, new_id: str, message_index: int) -> None:
|
||||
"""
|
||||
|
||||
Creates a new discussion in project_dict['discussion']['discussions'] by copying
|
||||
the history from source_id up to (and including) message_index, and sets active to new_id.
|
||||
|
||||
Creates a new discussion in project_dict['discussion']['discussions'] by copying
|
||||
the history from source_id up to (and including) message_index, and sets active to new_id.
|
||||
[C: tests/test_discussion_takes.py:TestDiscussionTakes.test_branch_discussion_creates_new_take]
|
||||
"""
|
||||
if "discussion" not in project_dict or "discussions" not in project_dict["discussion"]:
|
||||
@@ -483,7 +500,8 @@ def branch_discussion(project_dict: dict, source_id: str, new_id: str, message_i
|
||||
|
||||
def promote_take(project_dict: dict, take_id: str, new_id: str) -> None:
|
||||
"""
|
||||
Renames a take_id to new_id in the discussions dict.
|
||||
|
||||
Renames a take_id to new_id in the discussions dict.
|
||||
[C: tests/test_discussion_takes.py:TestDiscussionTakes.test_promote_take_renames_discussion]
|
||||
"""
|
||||
if "discussion" not in project_dict or "discussions" not in project_dict["discussion"]:
|
||||
|
||||
+3
-3
@@ -108,7 +108,7 @@ class RAGEngine:
|
||||
|
||||
def add_documents(self, ids: List[str], texts: List[str], metadatas: Optional[List[Dict[str, Any]]] = None):
|
||||
"""
|
||||
[C: tests/test_rag_engine.py:test_rag_engine_chroma]
|
||||
[C: tests/test_rag_engine.py:test_rag_engine_chroma]
|
||||
"""
|
||||
if not self.config.enabled or self.collection == "mock":
|
||||
return
|
||||
@@ -215,7 +215,7 @@ class RAGEngine:
|
||||
|
||||
def search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
[C: tests/mock_concurrent_mma.py:main, tests/test_rag_engine.py:test_rag_engine_chroma]
|
||||
[C: tests/mock_concurrent_mma.py:main, tests/test_rag_engine.py:test_rag_engine_chroma]
|
||||
"""
|
||||
if not self.config.enabled:
|
||||
return []
|
||||
@@ -243,7 +243,7 @@ class RAGEngine:
|
||||
|
||||
def delete_documents(self, ids: List[str]):
|
||||
"""
|
||||
[C: tests/test_rag_engine.py:test_rag_engine_chroma]
|
||||
[C: tests/test_rag_engine.py:test_rag_engine_chroma]
|
||||
"""
|
||||
if not self.config.enabled or self.collection == "mock":
|
||||
return
|
||||
|
||||
+15
-10
@@ -44,8 +44,9 @@ def _now_ts() -> str:
|
||||
def open_session(label: Optional[str] = None) -> None:
|
||||
"""
|
||||
|
||||
Called once at GUI startup. Creates the log directories if needed and
|
||||
opens the log files for this session within a sub-directory.
|
||||
|
||||
Called once at GUI startup. Creates the log directories if needed and
|
||||
opens the log files for this session within a sub-directory.
|
||||
[C: tests/test_app_controller_offloading.py:tmp_session_dir, tests/test_logging_e2e.py:test_logging_e2e, tests/test_session_logger_optimization.py:test_log_tool_call_saves_in_session_scripts, tests/test_session_logger_optimization.py:test_log_tool_output_saves_in_session_outputs, tests/test_session_logger_optimization.py:test_session_directory_and_subdirectories_creation, tests/test_session_logger_reset.py:test_reset_session, tests/test_session_logging.py:test_open_session_creates_subdir_and_registry]
|
||||
"""
|
||||
global _ts, _session_id, _session_dir, _comms_fh, _tool_fh, _api_fh, _cli_fh, _seq, _output_seq
|
||||
@@ -89,7 +90,8 @@ def open_session(label: Optional[str] = None) -> None:
|
||||
|
||||
def close_session() -> None:
|
||||
"""
|
||||
Flush and close all log files. Called on clean exit.
|
||||
|
||||
Flush and close all log files. Called on clean exit.
|
||||
[C: tests/test_app_controller_offloading.py:tmp_session_dir, tests/test_logging_e2e.py:e2e_setup, tests/test_logging_e2e.py:test_logging_e2e, tests/test_session_logger_optimization.py:temp_session_setup, tests/test_session_logger_reset.py:temp_logs, tests/test_session_logging.py:temp_logs]
|
||||
"""
|
||||
global _comms_fh, _tool_fh, _api_fh, _cli_fh, _session_id
|
||||
@@ -136,8 +138,9 @@ def log_api_hook(method: str, path: str, payload: str) -> None:
|
||||
def log_comms(entry: dict[str, Any]) -> None:
|
||||
"""
|
||||
|
||||
Append one comms entry to the comms log file as a JSON-L line.
|
||||
Thread-safe (GIL + line-buffered file).
|
||||
|
||||
Append one comms entry to the comms log file as a JSON-L line.
|
||||
Thread-safe (GIL + line-buffered file).
|
||||
[C: tests/test_logging_e2e.py:test_logging_e2e]
|
||||
"""
|
||||
if _comms_fh is None:
|
||||
@@ -150,8 +153,9 @@ def log_comms(entry: dict[str, Any]) -> None:
|
||||
def log_tool_call(script: str, result: str, script_path: Optional[str]) -> Optional[str]:
|
||||
"""
|
||||
|
||||
Append a tool-call record to the toolcalls log and write the PS1 script to
|
||||
the session's scripts directory. Returns the path of the written script file.
|
||||
|
||||
Append a tool-call record to the toolcalls log and write the PS1 script to
|
||||
the session's scripts directory. Returns the path of the written script file.
|
||||
[C: tests/test_session_logger_optimization.py:test_log_tool_call_saves_in_session_scripts]
|
||||
"""
|
||||
global _seq
|
||||
@@ -194,8 +198,9 @@ def log_tool_call(script: str, result: str, script_path: Optional[str]) -> Optio
|
||||
def log_tool_output(content: str) -> Optional[str]:
|
||||
"""
|
||||
|
||||
Save tool output content to a unique file in the session's outputs directory.
|
||||
Returns the path of the written file.
|
||||
|
||||
Save tool output content to a unique file in the session's outputs directory.
|
||||
Returns the path of the written file.
|
||||
[C: tests/test_session_logger_optimization.py:test_log_tool_output_returns_none_if_no_session, tests/test_session_logger_optimization.py:test_log_tool_output_saves_in_session_outputs]
|
||||
"""
|
||||
global _output_seq
|
||||
@@ -232,4 +237,4 @@ def log_cli_call(command: str, stdin_content: Optional[str], stdout_content: Opt
|
||||
_cli_fh.write(json.dumps(log_data, ensure_ascii=False, default=str) + "\n")
|
||||
_cli_fh.flush()
|
||||
except Exception:
|
||||
pass
|
||||
pass
|
||||
+4
-3
@@ -3,8 +3,9 @@ from imgui_bundle import imgui
|
||||
def draw_soft_shadow(draw_list: imgui.ImDrawList, p_min: imgui.ImVec2, p_max: imgui.ImVec2, color: imgui.ImVec4, shadow_size: float = 10.0, rounding: float = 0.0) -> None:
|
||||
"""
|
||||
|
||||
Simulates a soft shadow effect by drawing multiple concentric rounded rectangles
|
||||
with decreasing alpha values. This is a faux-shader effect using primitive batching.
|
||||
|
||||
Simulates a soft shadow effect by drawing multiple concentric rounded rectangles
|
||||
with decreasing alpha values. This is a faux-shader effect using primitive batching.
|
||||
"""
|
||||
r, g, b, a = color.x, color.y, color.z, color.w
|
||||
steps = int(shadow_size)
|
||||
@@ -35,4 +36,4 @@ def draw_soft_shadow(draw_list: imgui.ImDrawList, p_min: imgui.ImVec2, p_max: im
|
||||
rounding + expand if rounding > 0 else 0.0,
|
||||
flags=imgui.ImDrawFlags_.round_corners_all if rounding > 0 else imgui.ImDrawFlags_.none,
|
||||
thickness=1.0
|
||||
)
|
||||
)
|
||||
+8
-7
@@ -54,12 +54,13 @@ def _build_subprocess_env() -> dict[str, str]:
|
||||
def run_powershell(script: str, base_dir: str, qa_callback: Optional[Callable[[str], str]] = None, patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
"""
|
||||
|
||||
Run a PowerShell script with working directory set to base_dir.
|
||||
Returns a string combining stdout, stderr, and exit code.
|
||||
Environment is configured via mcp_env.toml (project root).
|
||||
If qa_callback is provided and the command fails or has stderr,
|
||||
the callback is called with the stderr content and its result is appended.
|
||||
If patch_callback is provided, it receives (error, file_context) and returns patch text.
|
||||
|
||||
Run a PowerShell script with working directory set to base_dir.
|
||||
Returns a string combining stdout, stderr, and exit code.
|
||||
Environment is configured via mcp_env.toml (project root).
|
||||
If qa_callback is provided and the command fails or has stderr,
|
||||
the callback is called with the stderr content and its result is appended.
|
||||
If patch_callback is provided, it receives (error, file_context) and returns patch text.
|
||||
[C: tests/test_tier4_interceptor.py:test_run_powershell_no_qa_callback_on_success, tests/test_tier4_interceptor.py:test_run_powershell_optional_qa_callback, tests/test_tier4_interceptor.py:test_run_powershell_qa_callback_on_failure, tests/test_tier4_interceptor.py:test_run_powershell_qa_callback_on_stderr_only]
|
||||
"""
|
||||
safe_dir: str = str(base_dir).replace("'", "''")
|
||||
@@ -98,4 +99,4 @@ def run_powershell(script: str, base_dir: str, qa_callback: Optional[Callable[[s
|
||||
except Exception as e:
|
||||
if 'process' in locals() and process:
|
||||
subprocess.run(["taskkill", "/F", "/T", "/PID", str(process.pid)], capture_output=True)
|
||||
return f"ERROR: {e}"
|
||||
return f"ERROR: {e}"
|
||||
+10
-7
@@ -154,8 +154,9 @@ _SUMMARISERS: dict[str, Callable[[Path, str], str]] = {
|
||||
def summarise_file(path: Path, content: str) -> str:
|
||||
"""
|
||||
|
||||
Return a compact markdown summary string for a single file.
|
||||
`content` is the already-read file text (or an error string).
|
||||
|
||||
Return a compact markdown summary string for a single file.
|
||||
`content` is the already-read file text (or an error string).
|
||||
[C: tests/test_subagent_summarization.py:test_summarise_file_integration]
|
||||
"""
|
||||
content_hash = get_file_hash(content)
|
||||
@@ -193,8 +194,9 @@ def summarise_file(path: Path, content: str) -> str:
|
||||
def summarise_items(file_items: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
"""
|
||||
|
||||
Given a list of file_item dicts (as returned by aggregate.build_file_items),
|
||||
return a parallel list of dicts with an added `summary` key.
|
||||
|
||||
Given a list of file_item dicts (as returned by aggregate.build_file_items),
|
||||
return a parallel list of dicts with an added `summary` key.
|
||||
"""
|
||||
result = []
|
||||
for item in file_items:
|
||||
@@ -212,8 +214,9 @@ def summarise_items(file_items: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
def build_summary_markdown(file_items: list[dict[str, Any]]) -> str:
|
||||
"""
|
||||
|
||||
Build a compact markdown string of file summaries, suitable for the
|
||||
initial <context> block instead of full file contents.
|
||||
|
||||
Build a compact markdown string of file summaries, suitable for the
|
||||
initial <context> block instead of full file contents.
|
||||
"""
|
||||
summarised = summarise_items(file_items)
|
||||
parts = []
|
||||
@@ -221,4 +224,4 @@ def build_summary_markdown(file_items: list[dict[str, Any]]) -> str:
|
||||
path = item.get("path") or item.get("entry", "unknown")
|
||||
summary = item.get("summary", "")
|
||||
parts.append(f"### `{path}`\n\n{summary}")
|
||||
return "\n\n---\n\n".join(parts)
|
||||
return "\n\n---\n\n".join(parts)
|
||||
+14
-8
@@ -5,7 +5,8 @@ from typing import Optional, Dict
|
||||
|
||||
def get_file_hash(content: str) -> str:
|
||||
"""
|
||||
Returns SHA256 hash of the content.
|
||||
|
||||
Returns SHA256 hash of the content.
|
||||
[C: tests/test_summary_cache.py:test_get_file_hash, tests/test_summary_cache.py:test_summary_cache]
|
||||
"""
|
||||
return hashlib.sha256(content.encode("utf-8")).hexdigest()
|
||||
@@ -13,8 +14,9 @@ def get_file_hash(content: str) -> str:
|
||||
class SummaryCache:
|
||||
"""
|
||||
|
||||
A hash-based cache for file summaries to avoid redundant processing.
|
||||
Invalidates when content hash changes.
|
||||
|
||||
A hash-based cache for file summaries to avoid redundant processing.
|
||||
Invalidates when content hash changes.
|
||||
"""
|
||||
def __init__(self, cache_file: Optional[str] = None, max_entries: int = 1000):
|
||||
if cache_file:
|
||||
@@ -28,8 +30,9 @@ class SummaryCache:
|
||||
|
||||
def load(self) -> None:
|
||||
"""
|
||||
Loads cache from disk.
|
||||
[C: src/tool_presets.py:ToolPresetManager._read_raw, src/workspace_manager.py:WorkspaceManager._load_file, tests/test_gui_phase3.py:test_create_track, tests/test_history_management.py:test_save_separation, tests/test_saved_presets_sim.py:test_preset_manager_modal, tests/test_session_logging.py:test_open_session_creates_subdir_and_registry, tests/test_visual_sim_gui_ux.py:test_gui_track_creation]
|
||||
|
||||
Loads cache from disk.
|
||||
[C: src/tool_presets.py:ToolPresetManager._read_raw, src/workspace_manager.py:WorkspaceManager._load_file, tests/test_gui_phase3.py:test_create_track, tests/test_history_management.py:test_save_separation, tests/test_session_logging.py:test_open_session_creates_subdir_and_registry]
|
||||
"""
|
||||
if self.cache_file.exists():
|
||||
try:
|
||||
@@ -49,7 +52,8 @@ class SummaryCache:
|
||||
|
||||
def get_summary(self, file_path: str, content_hash: str) -> Optional[str]:
|
||||
"""
|
||||
Returns cached summary if hash matches, otherwise None.
|
||||
|
||||
Returns cached summary if hash matches, otherwise None.
|
||||
[C: tests/test_summary_cache.py:test_summary_cache, tests/test_summary_cache.py:test_summary_cache_lru]
|
||||
"""
|
||||
entry = self.cache.get(file_path)
|
||||
@@ -62,7 +66,8 @@ class SummaryCache:
|
||||
|
||||
def set_summary(self, file_path: str, content_hash: str, summary: str) -> None:
|
||||
"""
|
||||
Stores summary in cache and saves to disk.
|
||||
|
||||
Stores summary in cache and saves to disk.
|
||||
[C: tests/test_summary_cache.py:test_summary_cache, tests/test_summary_cache.py:test_summary_cache_lru]
|
||||
"""
|
||||
if file_path in self.cache:
|
||||
@@ -80,7 +85,8 @@ class SummaryCache:
|
||||
|
||||
def clear(self) -> None:
|
||||
"""
|
||||
Clears the cache both in-memory and on disk.
|
||||
|
||||
Clears the cache both in-memory and on disk.
|
||||
[C: tests/conftest.py:reset_ai_client]
|
||||
"""
|
||||
self.cache.clear()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
def format_takes_diff(takes: dict[str, list[dict]]) -> str:
|
||||
"""
|
||||
[C: tests/test_synthesis_formatter.py:test_format_takes_diff_common_prefix, tests/test_synthesis_formatter.py:test_format_takes_diff_empty, tests/test_synthesis_formatter.py:test_format_takes_diff_no_common_prefix, tests/test_synthesis_formatter.py:test_format_takes_diff_single_take]
|
||||
[C: tests/test_synthesis_formatter.py:test_format_takes_diff_common_prefix, tests/test_synthesis_formatter.py:test_format_takes_diff_empty, tests/test_synthesis_formatter.py:test_format_takes_diff_no_common_prefix, tests/test_synthesis_formatter.py:test_format_takes_diff_single_take]
|
||||
"""
|
||||
if not takes:
|
||||
return ""
|
||||
|
||||
+6
-3
@@ -23,7 +23,8 @@ from src.theme_nerv_fx import CRTFilter, AlertPulsing, StatusFlicker
|
||||
|
||||
def _c(r: int, g: int, b: int, a: int = 255) -> tuple[float, float, float, float]:
|
||||
"""
|
||||
Convert 0-255 RGBA to 0.0-1.0 floats.
|
||||
|
||||
Convert 0-255 RGBA to 0.0-1.0 floats.
|
||||
[C: src/theme_nerv.py:module]
|
||||
"""
|
||||
return (r / 255.0, g / 255.0, b / 255.0, a / 255.0)
|
||||
@@ -283,7 +284,9 @@ def set_child_transparency(val: float) -> None:
|
||||
def apply(palette_name: str) -> None:
|
||||
"""
|
||||
|
||||
Apply a named palette by setting all ImGui style colors and applying global professional styling.
|
||||
|
||||
Apply a named palette by setting all ImGui style colors and applying global professional styling.
|
||||
[C: tests/test_theme.py:test_theme_apply_sets_rounding_and_padding]
|
||||
"""
|
||||
global _current_palette
|
||||
_current_palette = palette_name
|
||||
@@ -420,4 +423,4 @@ def render_post_fx(width: float, height: float, ai_status: str, crt_enabled: boo
|
||||
_alert_pulsing.update(ai_status)
|
||||
_alert_pulsing.render(width, height)
|
||||
_crt_filter.enabled = crt_enabled
|
||||
_crt_filter.render(width, height)
|
||||
_crt_filter.render(width, height)
|
||||
+3
-2
@@ -63,7 +63,8 @@ NERV_PALETTE = {
|
||||
|
||||
def apply_nerv() -> None:
|
||||
"""
|
||||
Apply NERV theme with hard edges and specific palette.
|
||||
|
||||
Apply NERV theme with hard edges and specific palette.
|
||||
[C: tests/test_theme_nerv.py:test_apply_nerv_sets_rounding_and_colors]
|
||||
"""
|
||||
style = imgui.get_style()
|
||||
@@ -84,4 +85,4 @@ def apply_nerv() -> None:
|
||||
style.frame_border_size = 1.0
|
||||
style.popup_border_size = 1.0
|
||||
style.child_border_size = 1.0
|
||||
style.tab_border_size = 1.0
|
||||
style.tab_border_size = 1.0
|
||||
@@ -9,7 +9,7 @@ class CRTFilter:
|
||||
|
||||
def render(self, width: float, height: float):
|
||||
"""
|
||||
[C: tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
[C: tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
"""
|
||||
if not self.enabled:
|
||||
return
|
||||
@@ -68,7 +68,7 @@ class StatusFlicker:
|
||||
def get_alpha(self) -> float:
|
||||
# Modulate between 0.7 and 1.0 using sin wave
|
||||
"""
|
||||
[C: tests/test_theme_nerv_fx.py:TestThemeNervFx.test_status_flicker_get_alpha]
|
||||
[C: tests/test_theme_nerv_fx.py:TestThemeNervFx.test_status_flicker_get_alpha]
|
||||
"""
|
||||
return 0.85 + 0.15 * math.sin(time.time() * 20.0)
|
||||
|
||||
@@ -78,13 +78,13 @@ class AlertPulsing:
|
||||
|
||||
def update(self, status: str):
|
||||
"""
|
||||
[C: tests/test_spawn_interception_v2.py:MockDialog.wait, tests/test_theme_nerv_alert.py:test_alert_pulsing_update, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_update]
|
||||
[C: tests/test_spawn_interception_v2.py:MockDialog.wait, tests/test_theme_nerv_alert.py:test_alert_pulsing_update, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_update]
|
||||
"""
|
||||
self.active = status.lower().startswith("error")
|
||||
|
||||
def render(self, width: float, height: float):
|
||||
"""
|
||||
[C: tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
[C: tests/test_theme_nerv_alert.py:test_alert_pulsing_render_active, tests/test_theme_nerv_alert.py:test_alert_pulsing_render_inactive, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_alert_pulsing_render, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_disabled, tests/test_theme_nerv_fx.py:TestThemeNervFx.test_crt_filter_render]
|
||||
"""
|
||||
if not self.active:
|
||||
return
|
||||
@@ -94,4 +94,4 @@ class AlertPulsing:
|
||||
# multiply by (0.2 - 0.05) = 0.15 and add 0.05
|
||||
alpha = 0.05 + 0.15 * ((math.sin(time.time() * 4.0) + 1.0) / 2.0)
|
||||
color = imgui.get_color_u32((1.0, 0.0, 0.0, alpha))
|
||||
draw_list.add_rect((0.0, 0.0), (width, height), color, 0.0, 0, 10.0)
|
||||
draw_list.add_rect((0.0, 0.0), (width, height), color, 0.0, 0, 10.0)
|
||||
@@ -5,9 +5,10 @@ from src.models import ThinkingSegment
|
||||
def parse_thinking_trace(text: str) -> Tuple[List[ThinkingSegment], str]:
|
||||
"""
|
||||
|
||||
Parses thinking segments from text and returns (segments, response_content).
|
||||
Support extraction of thinking traces from <thinking>...</thinking>, <thought>...</thought>,
|
||||
and blocks prefixed with Thinking:.
|
||||
|
||||
Parses thinking segments from text and returns (segments, response_content).
|
||||
Support extraction of thinking traces from <thinking>...</thinking>, <thought>...</thought>,
|
||||
and blocks prefixed with Thinking:.
|
||||
[C: tests/test_thinking_trace.py:test_parse_empty_response, tests/test_thinking_trace.py:test_parse_multiple_markers, tests/test_thinking_trace.py:test_parse_no_thinking, tests/test_thinking_trace.py:test_parse_text_thinking_prefix, tests/test_thinking_trace.py:test_parse_thinking_with_empty_response, tests/test_thinking_trace.py:test_parse_xml_thinking_tag, tests/test_thinking_trace.py:test_parse_xml_thought_tag]
|
||||
"""
|
||||
segments = []
|
||||
|
||||
+3
-3
@@ -4,7 +4,7 @@ from src.models import Tool, ToolPreset, BiasProfile
|
||||
class ToolBiasEngine:
|
||||
def apply_semantic_nudges(self, tool_definitions: List[Dict[str, Any]], preset: ToolPreset) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
[C: tests/test_tool_bias.py:test_apply_semantic_nudges, tests/test_tool_bias.py:test_parameter_bias_nudging]
|
||||
[C: tests/test_tool_bias.py:test_apply_semantic_nudges, tests/test_tool_bias.py:test_parameter_bias_nudging]
|
||||
"""
|
||||
weight_map = {
|
||||
5: "[HIGH PRIORITY] ",
|
||||
@@ -40,7 +40,7 @@ class ToolBiasEngine:
|
||||
|
||||
def generate_tooling_strategy(self, preset: ToolPreset, global_bias: BiasProfile) -> str:
|
||||
"""
|
||||
[C: tests/test_tool_bias.py:test_generate_tooling_strategy]
|
||||
[C: tests/test_tool_bias.py:test_generate_tooling_strategy]
|
||||
"""
|
||||
lines = ["### Tooling Strategy"]
|
||||
|
||||
@@ -68,4 +68,4 @@ class ToolBiasEngine:
|
||||
for cat, mult in global_bias.category_multipliers.items():
|
||||
lines.append(f"- {cat}: {mult}x")
|
||||
|
||||
return "\n\n".join(lines)
|
||||
return "\n\n".join(lines)
|
||||
+10
-9
@@ -11,7 +11,7 @@ class ToolPresetManager:
|
||||
|
||||
def _get_path(self, scope: str) -> Path:
|
||||
"""
|
||||
[C: src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
[C: src/workspace_manager.py:WorkspaceManager.delete_profile, src/workspace_manager.py:WorkspaceManager.save_profile]
|
||||
"""
|
||||
if scope == "global":
|
||||
return paths.get_global_tool_presets_path()
|
||||
@@ -38,7 +38,7 @@ class ToolPresetManager:
|
||||
|
||||
def load_all_presets(self) -> Dict[str, ToolPreset]:
|
||||
"""
|
||||
[C: tests/test_tool_preset_manager.py:test_load_all_presets_merged]
|
||||
[C: tests/test_tool_preset_manager.py:test_load_all_presets_merged]
|
||||
"""
|
||||
global_path = paths.get_global_tool_presets_path()
|
||||
global_data = self._read_raw(global_path).get("presets", {})
|
||||
@@ -59,14 +59,15 @@ class ToolPresetManager:
|
||||
|
||||
def load_all(self) -> Dict[str, ToolPreset]:
|
||||
"""
|
||||
Backward compatibility for load_all().
|
||||
|
||||
Backward compatibility for load_all().
|
||||
[C: tests/test_persona_manager.py:test_delete_persona, tests/test_persona_manager.py:test_load_all_merged, tests/test_persona_manager.py:test_save_persona, tests/test_preset_manager.py:test_delete_preset, tests/test_preset_manager.py:test_load_all_merged, tests/test_preset_manager.py:test_save_preset_global, tests/test_preset_manager.py:test_save_preset_project, tests/test_presets.py:TestPresetManager.test_delete_preset, tests/test_presets.py:TestPresetManager.test_project_overwrites_global, tests/test_presets.py:TestPresetManager.test_save_and_load_global, tests/test_presets.py:TestPresetManager.test_save_and_load_project]
|
||||
"""
|
||||
return self.load_all_presets()
|
||||
|
||||
def save_preset(self, preset: ToolPreset, scope: str = "project") -> None:
|
||||
"""
|
||||
[C: tests/test_preset_manager.py:test_save_preset_global, tests/test_preset_manager.py:test_save_preset_project, tests/test_preset_manager.py:test_save_preset_project_no_root, tests/test_presets.py:TestPresetManager.test_delete_preset, tests/test_presets.py:TestPresetManager.test_project_overwrites_global, tests/test_presets.py:TestPresetManager.test_save_and_load_global, tests/test_presets.py:TestPresetManager.test_save_and_load_project]
|
||||
[C: tests/test_preset_manager.py:test_save_preset_global, tests/test_preset_manager.py:test_save_preset_project, tests/test_preset_manager.py:test_save_preset_project_no_root, tests/test_presets.py:TestPresetManager.test_delete_preset, tests/test_presets.py:TestPresetManager.test_project_overwrites_global, tests/test_presets.py:TestPresetManager.test_save_and_load_global, tests/test_presets.py:TestPresetManager.test_save_and_load_project]
|
||||
"""
|
||||
path = self._get_path(scope)
|
||||
data = self._read_raw(path)
|
||||
@@ -77,7 +78,7 @@ class ToolPresetManager:
|
||||
|
||||
def delete_preset(self, name: str, scope: str = "project") -> None:
|
||||
"""
|
||||
[C: tests/test_preset_manager.py:test_delete_preset, tests/test_presets.py:TestPresetManager.test_delete_preset]
|
||||
[C: tests/test_preset_manager.py:test_delete_preset, tests/test_presets.py:TestPresetManager.test_delete_preset]
|
||||
"""
|
||||
path = self._get_path(scope)
|
||||
data = self._read_raw(path)
|
||||
@@ -87,7 +88,7 @@ class ToolPresetManager:
|
||||
|
||||
def load_all_bias_profiles(self) -> Dict[str, BiasProfile]:
|
||||
"""
|
||||
[C: tests/test_tool_preset_manager.py:test_bias_profiles_merged, tests/test_tool_preset_manager.py:test_delete_bias_profile, tests/test_tool_preset_manager.py:test_save_bias_profile]
|
||||
[C: tests/test_tool_preset_manager.py:test_bias_profiles_merged, tests/test_tool_preset_manager.py:test_delete_bias_profile, tests/test_tool_preset_manager.py:test_save_bias_profile]
|
||||
"""
|
||||
global_path = paths.get_global_tool_presets_path()
|
||||
global_data = self._read_raw(global_path).get("bias_profiles", {})
|
||||
@@ -114,7 +115,7 @@ class ToolPresetManager:
|
||||
|
||||
def save_bias_profile(self, profile: BiasProfile, scope: str = "project") -> None:
|
||||
"""
|
||||
[C: tests/test_tool_preset_manager.py:test_save_bias_profile]
|
||||
[C: tests/test_tool_preset_manager.py:test_save_bias_profile]
|
||||
"""
|
||||
path = self._get_path(scope)
|
||||
data = self._read_raw(path)
|
||||
@@ -125,10 +126,10 @@ class ToolPresetManager:
|
||||
|
||||
def delete_bias_profile(self, name: str, scope: str = "project") -> None:
|
||||
"""
|
||||
[C: tests/test_tool_preset_manager.py:test_delete_bias_profile]
|
||||
[C: tests/test_tool_preset_manager.py:test_delete_bias_profile]
|
||||
"""
|
||||
path = self._get_path(scope)
|
||||
data = self._read_raw(path)
|
||||
if "bias_profiles" in data and name in data["bias_profiles"]:
|
||||
del data["bias_profiles"][name]
|
||||
self._write_raw(path, data)
|
||||
self._write_raw(path, data)
|
||||
@@ -26,7 +26,8 @@ class WorkspaceManager:
|
||||
|
||||
def load_all_profiles(self) -> Dict[str, WorkspaceProfile]:
|
||||
"""
|
||||
Merges global and project profiles into a single dictionary.
|
||||
|
||||
Merges global and project profiles into a single dictionary.
|
||||
[C: tests/test_workspace_manager.py:test_delete_profile, tests/test_workspace_manager.py:test_load_all_profiles_merged, tests/test_workspace_manager.py:test_save_profile_global_and_project]
|
||||
"""
|
||||
profiles = {}
|
||||
@@ -46,7 +47,7 @@ class WorkspaceManager:
|
||||
|
||||
def save_profile(self, profile: WorkspaceProfile, scope: str = "project") -> None:
|
||||
"""
|
||||
[C: tests/test_workspace_manager.py:test_delete_profile, tests/test_workspace_manager.py:test_save_profile_global_and_project]
|
||||
[C: tests/test_workspace_manager.py:test_delete_profile, tests/test_workspace_manager.py:test_save_profile_global_and_project]
|
||||
"""
|
||||
path = self._get_path(scope)
|
||||
data = self._load_file(path)
|
||||
@@ -58,7 +59,7 @@ class WorkspaceManager:
|
||||
|
||||
def delete_profile(self, name: str, scope: str = "project") -> None:
|
||||
"""
|
||||
[C: tests/test_workspace_manager.py:test_delete_profile]
|
||||
[C: tests/test_workspace_manager.py:test_delete_profile]
|
||||
"""
|
||||
path = self._get_path(scope)
|
||||
data = self._load_file(path)
|
||||
|
||||
+15
-10
@@ -36,7 +36,7 @@ class VerificationLogger:
|
||||
|
||||
def log_state(self, field: str, before: Any, after: Any) -> None:
|
||||
"""
|
||||
[C: tests/test_ai_style_formatter.py:test_multiple_top_level_definitions, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_conductor_tech_lead.py:test_topological_sort_vlog, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_tier4_interceptor.py:test_run_powershell_qa_callback_on_failure, tests/test_vlogger_availability.py:test_vlogger_available]
|
||||
[C: tests/test_ai_style_formatter.py:test_multiple_top_level_definitions, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_conductor_tech_lead.py:test_topological_sort_vlog, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_tier4_interceptor.py:test_run_powershell_qa_callback_on_failure, tests/test_vlogger_availability.py:test_vlogger_available]
|
||||
"""
|
||||
delta = ""
|
||||
if isinstance(before, (int, float)) and isinstance(after, (int, float)):
|
||||
@@ -51,7 +51,7 @@ class VerificationLogger:
|
||||
|
||||
def finalize(self, title: str, status: str, result_msg: str) -> None:
|
||||
"""
|
||||
[C: tests/test_ai_style_formatter.py:test_multiple_top_level_definitions, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_conductor_tech_lead.py:test_topological_sort_vlog, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_tier4_interceptor.py:test_end_to_end_tier4_integration, tests/test_tier4_interceptor.py:test_run_powershell_qa_callback_on_failure, tests/test_tier4_interceptor.py:test_run_powershell_qa_callback_on_stderr_only, tests/test_vlogger_availability.py:test_vlogger_available]
|
||||
[C: tests/test_ai_style_formatter.py:test_multiple_top_level_definitions, tests/test_conductor_engine_v2.py:test_conductor_engine_dynamic_parsing_and_execution, tests/test_conductor_engine_v2.py:test_conductor_engine_run_executes_tickets_in_order, tests/test_conductor_tech_lead.py:test_topological_sort_vlog, tests/test_headless_verification.py:test_headless_verification_error_and_qa_interceptor, tests/test_headless_verification.py:test_headless_verification_full_run, tests/test_tier4_interceptor.py:test_end_to_end_tier4_integration, tests/test_tier4_interceptor.py:test_run_powershell_qa_callback_on_failure, tests/test_tier4_interceptor.py:test_run_powershell_qa_callback_on_stderr_only, tests/test_vlogger_availability.py:test_vlogger_available]
|
||||
"""
|
||||
round(time.time() - self.start_time, 2)
|
||||
log_file = self.logs_dir / f"{self.script_name}.txt"
|
||||
@@ -71,7 +71,8 @@ class VerificationLogger:
|
||||
def reset_paths() -> Generator[None, None, None]:
|
||||
"""
|
||||
|
||||
Autouse fixture that resets the paths global state before each test.
|
||||
|
||||
Autouse fixture that resets the paths global state before each test.
|
||||
"""
|
||||
from src import paths
|
||||
paths.reset_resolved()
|
||||
@@ -82,8 +83,9 @@ def reset_paths() -> Generator[None, None, None]:
|
||||
def reset_ai_client() -> Generator[None, None, None]:
|
||||
"""
|
||||
|
||||
Autouse fixture that resets the ai_client global state before each test.
|
||||
This is critical for preventing state pollution between tests.
|
||||
|
||||
Autouse fixture that resets the ai_client global state before each test.
|
||||
This is critical for preventing state pollution between tests.
|
||||
"""
|
||||
from src import ai_client
|
||||
from src import mcp_client
|
||||
@@ -131,7 +133,8 @@ def kill_process_tree(pid: int | None) -> None:
|
||||
def mock_app() -> Generator[App, None, None]:
|
||||
"""
|
||||
|
||||
Mock version of the App for simple unit tests that don't need a loop.
|
||||
|
||||
Mock version of the App for simple unit tests that don't need a loop.
|
||||
"""
|
||||
with (
|
||||
patch('src.models.load_config', return_value={
|
||||
@@ -163,8 +166,9 @@ def mock_app() -> Generator[App, None, None]:
|
||||
def app_instance() -> Generator[App, None, None]:
|
||||
"""
|
||||
|
||||
Centralized App instance with all external side effects mocked.
|
||||
Matches the pattern used in test_token_viz.py and test_gui_phase4.py.
|
||||
|
||||
Centralized App instance with all external side effects mocked.
|
||||
Matches the pattern used in test_token_viz.py and test_gui_phase4.py.
|
||||
[C: tests/test_gui2_events.py:test_app_subscribes_to_events]
|
||||
"""
|
||||
with (
|
||||
@@ -199,8 +203,9 @@ def app_instance() -> Generator[App, None, None]:
|
||||
def live_gui() -> Generator[tuple[subprocess.Popen, str], None, None]:
|
||||
"""
|
||||
|
||||
Session-scoped fixture that starts sloppy.py with --enable-test-hooks.
|
||||
Includes high-signal environment telemetry and workspace isolation.
|
||||
|
||||
Session-scoped fixture that starts sloppy.py with --enable-test-hooks.
|
||||
Includes high-signal environment telemetry and workspace isolation.
|
||||
"""
|
||||
gui_script = os.path.abspath("sloppy.py")
|
||||
diag = VerificationLogger("live_gui_startup", "live_gui_diag")
|
||||
|
||||
@@ -10,7 +10,8 @@ from src.api_hook_client import ApiHookClient
|
||||
|
||||
def wait_for_value(client, field, expected, timeout=5):
|
||||
"""
|
||||
Polls the GUI state until a field matches the expected value.
|
||||
|
||||
Polls the GUI state until a field matches the expected value.
|
||||
[C: tests/test_live_workflow.py:test_full_live_workflow]
|
||||
"""
|
||||
start = time.time()
|
||||
|
||||
@@ -3,8 +3,9 @@ from src import ai_client
|
||||
def test_list_models_gemini_cli() -> None:
|
||||
"""
|
||||
|
||||
Verifies that 'ai_client.list_models' correctly returns a list of models
|
||||
for the 'gemini_cli' provider.
|
||||
|
||||
Verifies that 'ai_client.list_models' correctly returns a list of models
|
||||
for the 'gemini_cli' provider.
|
||||
"""
|
||||
models = ai_client.list_models("gemini_cli")
|
||||
assert "gemini-3.1-pro-preview" in models
|
||||
|
||||
@@ -37,7 +37,8 @@ def app_controller(tmp_session_dir):
|
||||
def test_on_comms_entry_tool_result_offloading(app_controller, tmp_session_dir):
|
||||
"""
|
||||
|
||||
Test that _on_comms_entry offloads tool_result output to a separate file.
|
||||
|
||||
Test that _on_comms_entry offloads tool_result output to a separate file.
|
||||
"""
|
||||
output_content = "This is a large tool output that should be offloaded."
|
||||
entry = {
|
||||
@@ -83,7 +84,8 @@ def test_on_comms_entry_tool_result_offloading(app_controller, tmp_session_dir):
|
||||
def test_on_tool_log_offloading(app_controller, tmp_session_dir):
|
||||
"""
|
||||
|
||||
Test that _on_tool_log calls session_logger.log_tool_call and log_tool_output.
|
||||
|
||||
Test that _on_tool_log calls session_logger.log_tool_call and log_tool_output.
|
||||
"""
|
||||
script = "Get-Process"
|
||||
result = "Process list..."
|
||||
|
||||
@@ -323,4 +323,4 @@ public:
|
||||
assert 'void myMethod() {' in updated
|
||||
assert 'int y = 2;' in updated
|
||||
assert 'int x = 1;' not in updated
|
||||
assert 'class MyClass {' in updated
|
||||
assert 'class MyClass {' in updated
|
||||
@@ -9,8 +9,9 @@ from src import mcp_client
|
||||
async def test_execute_tool_calls_concurrently_timing():
|
||||
"""
|
||||
|
||||
Verifies that _execute_tool_calls_concurrently runs tools in parallel.
|
||||
Total time should be approx 0.5s for 3 tools each taking 0.5s.
|
||||
|
||||
Verifies that _execute_tool_calls_concurrently runs tools in parallel.
|
||||
Total time should be approx 0.5s for 3 tools each taking 0.5s.
|
||||
"""
|
||||
# 1. Setup mock tool calls (Gemini style)
|
||||
class MockGeminiCall:
|
||||
@@ -67,8 +68,9 @@ async def test_execute_tool_calls_concurrently_timing():
|
||||
async def test_execute_tool_calls_concurrently_exception_handling():
|
||||
"""
|
||||
|
||||
Verifies that if one tool call fails, it doesn't crash the whole group if caught,
|
||||
but currently gather is used WITHOUT return_exceptions=True, so it should re-raise.
|
||||
|
||||
Verifies that if one tool call fails, it doesn't crash the whole group if caught,
|
||||
but currently gather is used WITHOUT return_exceptions=True, so it should re-raise.
|
||||
"""
|
||||
class MockGeminiCall:
|
||||
def __init__(self, name, args):
|
||||
|
||||
@@ -22,8 +22,9 @@ class TestCliToolBridgeMapping(unittest.TestCase):
|
||||
def test_mapping_from_api_format(self, mock_request: MagicMock, mock_stdout: MagicMock, mock_stdin: MagicMock) -> None:
|
||||
"""
|
||||
|
||||
Verify that bridge correctly maps 'id', 'name', 'input' (Gemini API format)
|
||||
into tool_name and tool_input for the hook client.
|
||||
|
||||
Verify that bridge correctly maps 'id', 'name', 'input' (Gemini API format)
|
||||
into tool_name and tool_input for the hook client.
|
||||
"""
|
||||
api_tool_call = {
|
||||
'id': 'call123',
|
||||
|
||||
@@ -7,7 +7,8 @@ import threading
|
||||
def test_conductor_abort_event_populated():
|
||||
"""
|
||||
|
||||
Test that ConductorEngine populates _abort_events when spawning a worker.
|
||||
|
||||
Test that ConductorEngine populates _abort_events when spawning a worker.
|
||||
"""
|
||||
# 1. Mock WorkerPool.spawn to return a mock thread
|
||||
with patch('src.multi_agent_conductor.WorkerPool.spawn') as mock_spawn:
|
||||
|
||||
@@ -4,7 +4,8 @@ from src.api_hook_client import ApiHookClient
|
||||
def simulate_conductor_phase_completion(client: ApiHookClient, track_id: str, phase_name: str) -> bool:
|
||||
"""
|
||||
|
||||
Simulates the Conductor agent's logic for phase completion using ApiHookClient.
|
||||
|
||||
Simulates the Conductor agent's logic for phase completion using ApiHookClient.
|
||||
"""
|
||||
try:
|
||||
# 1. Poll for state
|
||||
@@ -24,8 +25,9 @@ def simulate_conductor_phase_completion(client: ApiHookClient, track_id: str, ph
|
||||
|
||||
def test_conductor_integrates_api_hook_client_for_verification(live_gui) -> None:
|
||||
"""
|
||||
Verify that Conductor's simulated phase completion logic properly integrates
|
||||
with the ApiHookClient and the live Hook Server.
|
||||
|
||||
Verify that Conductor's simulated phase completion logic properly integrates
|
||||
with the ApiHookClient and the live Hook Server.
|
||||
"""
|
||||
client = ApiHookClient()
|
||||
assert client.wait_for_server(timeout=10)
|
||||
|
||||
@@ -8,7 +8,8 @@ from src.models import Track
|
||||
def test_conductor_engine_initializes_empty_worker_and_abort_dicts() -> None:
|
||||
"""
|
||||
|
||||
Test that ConductorEngine correctly initializes _active_workers and _abort_events as empty dictionaries.
|
||||
|
||||
Test that ConductorEngine correctly initializes _active_workers and _abort_events as empty dictionaries.
|
||||
"""
|
||||
# Mock the track object
|
||||
mock_track = MagicMock(spec=Track)
|
||||
@@ -24,8 +25,9 @@ def test_conductor_engine_initializes_empty_worker_and_abort_dicts() -> None:
|
||||
def test_kill_worker_sets_abort_and_joins_thread() -> None:
|
||||
"""
|
||||
|
||||
Test kill_worker: mock a running thread in _active_workers, call kill_worker,
|
||||
assert abort_event is set and thread is joined.
|
||||
|
||||
Test kill_worker: mock a running thread in _active_workers, call kill_worker,
|
||||
assert abort_event is set and thread is joined.
|
||||
"""
|
||||
mock_track = MagicMock(spec=Track)
|
||||
mock_track.tickets = []
|
||||
@@ -38,7 +40,7 @@ def test_kill_worker_sets_abort_and_joins_thread() -> None:
|
||||
# Create a thread that waits for the abort event
|
||||
def worker():
|
||||
"""
|
||||
[C: tests/test_symbol_parsing.py:test_handle_generate_send_appends_definitions, tests/test_symbol_parsing.py:test_handle_generate_send_no_symbols]
|
||||
[C: tests/test_symbol_parsing.py:test_handle_generate_send_appends_definitions, tests/test_symbol_parsing.py:test_handle_generate_send_no_symbols]
|
||||
"""
|
||||
abort_event.wait(timeout=2.0)
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ from src import ai_client
|
||||
def test_conductor_engine_initialization() -> None:
|
||||
"""
|
||||
|
||||
Test that ConductorEngine can be initialized with a Track.
|
||||
|
||||
Test that ConductorEngine can be initialized with a Track.
|
||||
"""
|
||||
track = Track(id="test_track", description="Test Track")
|
||||
from src.multi_agent_conductor import ConductorEngine
|
||||
@@ -20,7 +21,8 @@ def test_conductor_engine_initialization() -> None:
|
||||
def test_conductor_engine_run_executes_tickets_in_order(monkeypatch: pytest.MonkeyPatch, vlogger) -> None:
|
||||
"""
|
||||
|
||||
Test that run iterates through executable tickets and calls the worker lifecycle.
|
||||
|
||||
Test that run iterates through executable tickets and calls the worker lifecycle.
|
||||
"""
|
||||
ticket1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
|
||||
ticket2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker2", depends_on=["T1"])
|
||||
@@ -67,7 +69,8 @@ def test_conductor_engine_run_executes_tickets_in_order(monkeypatch: pytest.Monk
|
||||
def test_run_worker_lifecycle_calls_ai_client_send(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""
|
||||
|
||||
Test that run_worker_lifecycle triggers the AI client and updates ticket status on success.
|
||||
|
||||
Test that run_worker_lifecycle triggers the AI client and updates ticket status on success.
|
||||
"""
|
||||
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
|
||||
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
|
||||
@@ -88,7 +91,8 @@ def test_run_worker_lifecycle_calls_ai_client_send(monkeypatch: pytest.MonkeyPat
|
||||
def test_run_worker_lifecycle_context_injection(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""
|
||||
|
||||
Test that run_worker_lifecycle can take a context_files list and injects AST views into the prompt.
|
||||
|
||||
Test that run_worker_lifecycle can take a context_files list and injects AST views into the prompt.
|
||||
"""
|
||||
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
|
||||
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
|
||||
@@ -134,7 +138,8 @@ def test_run_worker_lifecycle_context_injection(monkeypatch: pytest.MonkeyPatch)
|
||||
def test_run_worker_lifecycle_handles_blocked_response(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""
|
||||
|
||||
Test that run_worker_lifecycle marks the ticket as blocked if the AI indicates it cannot proceed.
|
||||
|
||||
Test that run_worker_lifecycle marks the ticket as blocked if the AI indicates it cannot proceed.
|
||||
"""
|
||||
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
|
||||
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
|
||||
@@ -151,9 +156,10 @@ def test_run_worker_lifecycle_handles_blocked_response(monkeypatch: pytest.Monke
|
||||
def test_run_worker_lifecycle_step_mode_confirmation(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""
|
||||
|
||||
Test that run_worker_lifecycle passes confirm_execution to ai_client.send when step_mode is True.
|
||||
Verify that if confirm_execution is called (simulated by mocking ai_client.send to call its callback),
|
||||
the flow works as expected.
|
||||
|
||||
Test that run_worker_lifecycle passes confirm_execution to ai_client.send when step_mode is True.
|
||||
Verify that if confirm_execution is called (simulated by mocking ai_client.send to call its callback),
|
||||
the flow works as expected.
|
||||
"""
|
||||
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1", step_mode=True)
|
||||
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
|
||||
@@ -188,8 +194,9 @@ def test_run_worker_lifecycle_step_mode_confirmation(monkeypatch: pytest.MonkeyP
|
||||
def test_run_worker_lifecycle_step_mode_rejection(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""
|
||||
|
||||
Verify that if confirm_execution returns False, the logic (in ai_client, which we simulate here)
|
||||
would prevent execution. In run_worker_lifecycle, we just check if it's passed.
|
||||
|
||||
Verify that if confirm_execution returns False, the logic (in ai_client, which we simulate here)
|
||||
would prevent execution. In run_worker_lifecycle, we just check if it's passed.
|
||||
"""
|
||||
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1", step_mode=True)
|
||||
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
|
||||
@@ -213,7 +220,8 @@ def test_run_worker_lifecycle_step_mode_rejection(monkeypatch: pytest.MonkeyPatc
|
||||
def test_conductor_engine_dynamic_parsing_and_execution(monkeypatch: pytest.MonkeyPatch, vlogger) -> None:
|
||||
"""
|
||||
|
||||
Test that parse_json_tickets correctly populates the track and run executes them in dependency order.
|
||||
|
||||
Test that parse_json_tickets correctly populates the track and run executes them in dependency order.
|
||||
"""
|
||||
import json
|
||||
from src.multi_agent_conductor import ConductorEngine
|
||||
@@ -281,8 +289,9 @@ def test_conductor_engine_dynamic_parsing_and_execution(monkeypatch: pytest.Monk
|
||||
def test_run_worker_lifecycle_pushes_response_via_queue(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""
|
||||
|
||||
Test that run_worker_lifecycle pushes a 'response' event with the correct stream_id
|
||||
via _queue_put when event_queue is provided.
|
||||
|
||||
Test that run_worker_lifecycle pushes a 'response' event with the correct stream_id
|
||||
via _queue_put when event_queue is provided.
|
||||
"""
|
||||
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
|
||||
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
|
||||
@@ -307,8 +316,9 @@ def test_run_worker_lifecycle_pushes_response_via_queue(monkeypatch: pytest.Monk
|
||||
def test_run_worker_lifecycle_token_usage_from_comms_log(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""
|
||||
|
||||
Test that run_worker_lifecycle reads token usage from the comms log and
|
||||
updates engine.tier_usage['Tier 3'] with real input/output token counts.
|
||||
|
||||
Test that run_worker_lifecycle reads token usage from the comms log and
|
||||
updates engine.tier_usage['Tier 3'] with real input/output token counts.
|
||||
"""
|
||||
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
|
||||
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
|
||||
|
||||
+19
-10
@@ -10,7 +10,8 @@ from src.dag_engine import TrackDAG
|
||||
def test_get_ready_tasks_linear():
|
||||
"""
|
||||
|
||||
Verifies ready tasks detection in a simple linear dependency chain.
|
||||
|
||||
Verifies ready tasks detection in a simple linear dependency chain.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="worker1")
|
||||
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="worker1", depends_on=["T1"])
|
||||
@@ -22,8 +23,9 @@ def test_get_ready_tasks_linear():
|
||||
def test_get_ready_tasks_branching():
|
||||
"""
|
||||
|
||||
Verifies ready tasks detection in a branching dependency graph where multiple tasks
|
||||
are unlocked simultaneously after a prerequisite is met.
|
||||
|
||||
Verifies ready tasks detection in a branching dependency graph where multiple tasks
|
||||
are unlocked simultaneously after a prerequisite is met.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="desc", status="completed", assigned_to="worker1")
|
||||
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="worker1", depends_on=["T1"])
|
||||
@@ -38,7 +40,8 @@ def test_get_ready_tasks_branching():
|
||||
def test_has_cycle_no_cycle():
|
||||
"""
|
||||
|
||||
Validates that an acyclic graph is correctly identified as not having cycles.
|
||||
|
||||
Validates that an acyclic graph is correctly identified as not having cycles.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="worker1")
|
||||
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="worker1", depends_on=["T1"])
|
||||
@@ -48,7 +51,8 @@ def test_has_cycle_no_cycle():
|
||||
def test_has_cycle_direct_cycle():
|
||||
"""
|
||||
|
||||
Validates that a direct cycle (A depends on B, B depends on A) is correctly detected.
|
||||
|
||||
Validates that a direct cycle (A depends on B, B depends on A) is correctly detected.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="worker1", depends_on=["T2"])
|
||||
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="worker1", depends_on=["T1"])
|
||||
@@ -58,7 +62,8 @@ def test_has_cycle_direct_cycle():
|
||||
def test_has_cycle_indirect_cycle():
|
||||
"""
|
||||
|
||||
Validates that an indirect cycle (A->B->C->A) is correctly detected.
|
||||
|
||||
Validates that an indirect cycle (A->B->C->A) is correctly detected.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="worker1", depends_on=["T3"])
|
||||
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="worker1", depends_on=["T1"])
|
||||
@@ -69,7 +74,8 @@ def test_has_cycle_indirect_cycle():
|
||||
def test_has_cycle_complex_no_cycle():
|
||||
"""
|
||||
|
||||
Validates cycle detection in a complex graph that merges branches but remains acyclic.
|
||||
|
||||
Validates cycle detection in a complex graph that merges branches but remains acyclic.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="worker1")
|
||||
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="worker1", depends_on=["T1"])
|
||||
@@ -81,7 +87,8 @@ def test_has_cycle_complex_no_cycle():
|
||||
def test_get_ready_tasks_multiple_deps():
|
||||
"""
|
||||
|
||||
Validates that a task is not marked ready until ALL of its dependencies are completed.
|
||||
|
||||
Validates that a task is not marked ready until ALL of its dependencies are completed.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="desc", status="completed", assigned_to="worker1")
|
||||
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="worker1")
|
||||
@@ -95,7 +102,8 @@ def test_get_ready_tasks_multiple_deps():
|
||||
def test_topological_sort():
|
||||
"""
|
||||
|
||||
Verifies that tasks are correctly ordered by dependencies regardless of input order.
|
||||
|
||||
Verifies that tasks are correctly ordered by dependencies regardless of input order.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="worker1")
|
||||
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="worker1", depends_on=["T1"])
|
||||
@@ -107,7 +115,8 @@ def test_topological_sort():
|
||||
def test_topological_sort_cycle():
|
||||
"""
|
||||
|
||||
Verifies that topological sorting safely aborts and raises ValueError when a cycle is present.
|
||||
|
||||
Verifies that topological sorting safely aborts and raises ValueError when a cycle is present.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="worker1", depends_on=["T2"])
|
||||
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="worker1", depends_on=["T1"])
|
||||
|
||||
@@ -13,8 +13,9 @@ from src import project_manager
|
||||
def test_credentials_error_mentions_deepseek(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""
|
||||
|
||||
Verify that the error message shown when credentials.toml is missing
|
||||
includes deepseek instructions.
|
||||
|
||||
Verify that the error message shown when credentials.toml is missing
|
||||
includes deepseek instructions.
|
||||
"""
|
||||
# Monkeypatch SLOP_CREDENTIALS to a non-existent file
|
||||
monkeypatch.setenv("SLOP_CREDENTIALS", "non_existent_credentials_file.toml")
|
||||
@@ -27,8 +28,9 @@ def test_credentials_error_mentions_deepseek(monkeypatch: pytest.MonkeyPatch) ->
|
||||
def test_default_project_includes_reasoning_role() -> None:
|
||||
"""
|
||||
|
||||
Verify that 'Reasoning' is included in the default discussion roles
|
||||
to support DeepSeek-R1 reasoning traces.
|
||||
|
||||
Verify that 'Reasoning' is included in the default discussion roles
|
||||
to support DeepSeek-R1 reasoning traces.
|
||||
"""
|
||||
proj = project_manager.default_project("test")
|
||||
roles = proj["discussion"]["roles"]
|
||||
@@ -37,7 +39,8 @@ def test_default_project_includes_reasoning_role() -> None:
|
||||
def test_gui_providers_list() -> None:
|
||||
"""
|
||||
|
||||
Check if 'deepseek' is in the GUI's provider list.
|
||||
|
||||
Check if 'deepseek' is in the GUI's provider list.
|
||||
"""
|
||||
from src.models import PROVIDERS
|
||||
assert "deepseek" in PROVIDERS
|
||||
@@ -45,7 +48,8 @@ def test_gui_providers_list() -> None:
|
||||
def test_deepseek_model_listing() -> None:
|
||||
"""
|
||||
|
||||
Verify that list_models for deepseek returns expected models.
|
||||
|
||||
Verify that list_models for deepseek returns expected models.
|
||||
"""
|
||||
models = ai_client.list_models("deepseek")
|
||||
assert "deepseek-chat" in models
|
||||
@@ -54,7 +58,8 @@ def test_deepseek_model_listing() -> None:
|
||||
def test_gui_provider_list_via_hooks(live_gui: Any) -> None:
|
||||
"""
|
||||
|
||||
Verify 'deepseek' is present in the GUI provider list using API hooks.
|
||||
|
||||
Verify 'deepseek' is present in the GUI provider list using API hooks.
|
||||
"""
|
||||
from api_hook_client import ApiHookClient
|
||||
import time
|
||||
|
||||
@@ -5,7 +5,8 @@ from src import ai_client
|
||||
def test_deepseek_model_selection() -> None:
|
||||
"""
|
||||
|
||||
Verifies that ai_client.set_provider('deepseek', 'deepseek-chat') correctly updates the internal state.
|
||||
|
||||
Verifies that ai_client.set_provider('deepseek', 'deepseek-chat') correctly updates the internal state.
|
||||
"""
|
||||
ai_client.set_provider("deepseek", "deepseek-chat")
|
||||
assert ai_client._provider == "deepseek"
|
||||
@@ -15,7 +16,8 @@ def test_deepseek_model_selection() -> None:
|
||||
def test_deepseek_completion_logic(mock_post: MagicMock) -> None:
|
||||
"""
|
||||
|
||||
Verifies that ai_client.send() correctly calls the DeepSeek API and returns content.
|
||||
|
||||
Verifies that ai_client.send() correctly calls the DeepSeek API and returns content.
|
||||
"""
|
||||
ai_client.set_provider("deepseek", "deepseek-chat")
|
||||
with patch("src.ai_client._load_credentials", return_value={"deepseek": {"api_key": "test-key"}}):
|
||||
@@ -34,7 +36,8 @@ def test_deepseek_completion_logic(mock_post: MagicMock) -> None:
|
||||
def test_deepseek_reasoning_logic(mock_post: MagicMock) -> None:
|
||||
"""
|
||||
|
||||
Verifies that reasoning_content is captured and wrapped in <thinking> tags.
|
||||
|
||||
Verifies that reasoning_content is captured and wrapped in <thinking> tags.
|
||||
"""
|
||||
ai_client.set_provider("deepseek", "deepseek-reasoner")
|
||||
with patch("src.ai_client._load_credentials", return_value={"deepseek": {"api_key": "test-key"}}):
|
||||
@@ -56,7 +59,8 @@ def test_deepseek_reasoning_logic(mock_post: MagicMock) -> None:
|
||||
def test_deepseek_tool_calling(mock_post: MagicMock) -> None:
|
||||
"""
|
||||
|
||||
Verifies that DeepSeek provider correctly identifies and executes tool calls.
|
||||
|
||||
Verifies that DeepSeek provider correctly identifies and executes tool calls.
|
||||
"""
|
||||
ai_client.set_provider("deepseek", "deepseek-chat")
|
||||
with patch("src.ai_client._load_credentials", return_value={"deepseek": {"api_key": "test-key"}}), \
|
||||
@@ -98,7 +102,8 @@ def test_deepseek_tool_calling(mock_post: MagicMock) -> None:
|
||||
def test_deepseek_streaming(mock_post: MagicMock) -> None:
|
||||
"""
|
||||
|
||||
Verifies that DeepSeek provider correctly aggregates streaming chunks.
|
||||
|
||||
Verifies that DeepSeek provider correctly aggregates streaming chunks.
|
||||
"""
|
||||
ai_client.set_provider("deepseek", "deepseek-chat")
|
||||
with patch("src.ai_client._load_credentials", return_value={"deepseek": {"api_key": "test-key"}}):
|
||||
@@ -121,7 +126,8 @@ def test_deepseek_streaming(mock_post: MagicMock) -> None:
|
||||
def test_deepseek_payload_verification(mock_post: MagicMock) -> None:
|
||||
"""
|
||||
|
||||
Verifies that the correct JSON payload (tools, history, params) is sent to DeepSeek.
|
||||
|
||||
Verifies that the correct JSON payload (tools, history, params) is sent to DeepSeek.
|
||||
"""
|
||||
ai_client.set_provider("deepseek", "deepseek-chat")
|
||||
ai_client.reset_session()
|
||||
@@ -149,7 +155,8 @@ def test_deepseek_payload_verification(mock_post: MagicMock) -> None:
|
||||
def test_deepseek_reasoner_payload_verification(mock_post: MagicMock) -> None:
|
||||
"""
|
||||
|
||||
Verifies that deepseek-reasoner payload excludes tools and temperature.
|
||||
|
||||
Verifies that deepseek-reasoner payload excludes tools and temperature.
|
||||
"""
|
||||
ai_client.set_provider("deepseek", "deepseek-reasoner")
|
||||
ai_client.reset_session()
|
||||
|
||||
@@ -69,4 +69,4 @@ def test_execution_sim_live(live_gui: Any) -> None:
|
||||
client.set_value('auto_add_history', True)
|
||||
sim.run()
|
||||
time.sleep(2)
|
||||
sim.teardown()
|
||||
sim.teardown()
|
||||
@@ -55,4 +55,4 @@ def test_file_item_from_dict_defaults():
|
||||
assert item.view_mode == "full"
|
||||
assert item.ast_mask == {}
|
||||
assert item.custom_slices == []
|
||||
assert item.injected_at is None
|
||||
assert item.injected_at is None
|
||||
@@ -12,8 +12,9 @@ from src.ai_client import get_gemini_cache_stats, reset_session
|
||||
def test_get_gemini_cache_stats_with_mock_client() -> None:
|
||||
"""
|
||||
|
||||
Test that get_gemini_cache_stats correctly processes cache lists
|
||||
from a mocked client instance.
|
||||
|
||||
Test that get_gemini_cache_stats correctly processes cache lists
|
||||
from a mocked client instance.
|
||||
"""
|
||||
# Ensure a clean state before the test by resetting the session
|
||||
reset_session()
|
||||
|
||||
@@ -12,8 +12,9 @@ def app_instance(monkeypatch: pytest.MonkeyPatch) -> type[App]:
|
||||
def test_app_subscribes_to_events(app_instance: type[App]) -> None:
|
||||
"""
|
||||
|
||||
This test checks that the App's __init__ method subscribes the necessary
|
||||
event handlers to the ai_client.events emitter.
|
||||
|
||||
This test checks that the App's __init__ method subscribes the necessary
|
||||
event handlers to the ai_client.events emitter.
|
||||
"""
|
||||
with patch.object(ai_client.events, 'on') as mock_on:
|
||||
app = app_instance()
|
||||
|
||||
@@ -3,8 +3,9 @@ from src.gui_2 import App
|
||||
def test_gui2_hubs_exist_in_show_windows(app_instance: App) -> None:
|
||||
"""
|
||||
|
||||
Verifies that the new consolidated Hub windows are defined in the App's show_windows.
|
||||
This ensures they will be available in the 'Windows' menu.
|
||||
|
||||
Verifies that the new consolidated Hub windows are defined in the App's show_windows.
|
||||
This ensures they will be available in the 'Windows' menu.
|
||||
"""
|
||||
expected_hubs = [
|
||||
"Project Settings",
|
||||
@@ -20,8 +21,9 @@ def test_gui2_hubs_exist_in_show_windows(app_instance: App) -> None:
|
||||
def test_gui2_old_windows_removed_from_show_windows(app_instance: App) -> None:
|
||||
"""
|
||||
|
||||
Verifies that the old fragmented windows are removed from show_windows.
|
||||
Note: Message, Response, and Tool Calls are kept as they are now optional standalone windows.
|
||||
|
||||
Verifies that the old fragmented windows are removed from show_windows.
|
||||
Note: Message, Response, and Tool Calls are kept as they are now optional standalone windows.
|
||||
"""
|
||||
old_windows = [
|
||||
"Projects", "Files", "Screenshots",
|
||||
|
||||
@@ -6,9 +6,10 @@ from src import ai_client
|
||||
def test_mcp_tool_call_is_dispatched(app_instance: App) -> None:
|
||||
"""
|
||||
|
||||
This test verifies that when the AI returns a tool call for an MCP function,
|
||||
the ai_client correctly dispatches it to mcp_client.
|
||||
This will fail until mcp_client is properly integrated.
|
||||
|
||||
This test verifies that when the AI returns a tool call for an MCP function,
|
||||
the ai_client correctly dispatches it to mcp_client.
|
||||
This will fail until mcp_client is properly integrated.
|
||||
"""
|
||||
# 1. Define the mock tool call from the AI
|
||||
mock_fc = MagicMock()
|
||||
|
||||
@@ -26,7 +26,8 @@ def cleanup_callback_file() -> None:
|
||||
def test_gui2_set_value_hook_works(live_gui: Any) -> None:
|
||||
"""
|
||||
|
||||
Tests that the 'set_value' GUI hook is correctly implemented.
|
||||
|
||||
Tests that the 'set_value' GUI hook is correctly implemented.
|
||||
"""
|
||||
client = ApiHookClient()
|
||||
assert client.wait_for_server(timeout=10)
|
||||
@@ -42,7 +43,8 @@ def test_gui2_set_value_hook_works(live_gui: Any) -> None:
|
||||
def test_gui2_click_hook_works(live_gui: Any) -> None:
|
||||
"""
|
||||
|
||||
Tests that the 'click' GUI hook for the 'Reset' button is implemented.
|
||||
|
||||
Tests that the 'click' GUI hook for the 'Reset' button is implemented.
|
||||
"""
|
||||
client = ApiHookClient()
|
||||
assert client.wait_for_server(timeout=10)
|
||||
@@ -60,7 +62,8 @@ def test_gui2_click_hook_works(live_gui: Any) -> None:
|
||||
def test_gui2_custom_callback_hook_works(live_gui: Any) -> None:
|
||||
"""
|
||||
|
||||
Tests that the 'custom_callback' GUI hook is correctly implemented.
|
||||
|
||||
Tests that the 'custom_callback' GUI hook is correctly implemented.
|
||||
"""
|
||||
client = ApiHookClient()
|
||||
assert client.wait_for_server(timeout=10)
|
||||
@@ -78,4 +81,4 @@ def test_gui2_custom_callback_hook_works(live_gui: Any) -> None:
|
||||
assert temp_workspace_file.exists(), f"Custom callback was NOT executed, or file path is wrong! Expected: {temp_workspace_file}"
|
||||
with open(temp_workspace_file, "r") as f:
|
||||
content = f.read()
|
||||
assert content == test_data, "Callback executed, but file content is incorrect."
|
||||
assert content == test_data, "Callback executed, but file content is incorrect."
|
||||
@@ -20,8 +20,9 @@ _shared_metrics = {}
|
||||
def test_performance_benchmarking(live_gui: tuple) -> None:
|
||||
"""
|
||||
|
||||
Collects performance metrics for the current GUI script over a 5-second window.
|
||||
Ensures the application does not lock up and can report its internal state.
|
||||
|
||||
Collects performance metrics for the current GUI script over a 5-second window.
|
||||
Ensures the application does not lock up and can report its internal state.
|
||||
"""
|
||||
process, gui_script = live_gui
|
||||
client = ApiHookClient()
|
||||
@@ -67,8 +68,9 @@ def test_performance_benchmarking(live_gui: tuple) -> None:
|
||||
def test_performance_baseline_check() -> None:
|
||||
"""
|
||||
|
||||
Verifies that we have successfully collected performance metrics for sloppy.py
|
||||
and that they meet the minimum 30 FPS baseline.
|
||||
|
||||
Verifies that we have successfully collected performance metrics for sloppy.py
|
||||
and that they meet the minimum 30 FPS baseline.
|
||||
"""
|
||||
# Key is full path, find it by basename
|
||||
gui_key = next((k for k in _shared_metrics if "sloppy.py" in k), None)
|
||||
|
||||
@@ -15,9 +15,10 @@ def test_diagnostics_panel_initialization(app_instance: Any) -> None:
|
||||
def test_diagnostics_history_updates(app_instance: Any) -> None:
|
||||
"""
|
||||
|
||||
Verifies that the internal performance history is updated correctly.
|
||||
This logic is inside the render loop in gui_2.py, but we can test
|
||||
the data structure and initialization.
|
||||
|
||||
Verifies that the internal performance history is updated correctly.
|
||||
This logic is inside the render loop in gui_2.py, but we can test
|
||||
the data structure and initialization.
|
||||
"""
|
||||
assert "fps" in app_instance.perf_history
|
||||
assert len(app_instance.perf_history["fps"]) == 100
|
||||
@@ -60,4 +60,4 @@ def test_render_files_and_media_fast(app_instance: App):
|
||||
try:
|
||||
app_instance._render_files_and_media()
|
||||
except Exception as e:
|
||||
pytest.fail(f"_render_files_and_media raised an exception: {e}")
|
||||
pytest.fail(f"_render_files_and_media raised an exception: {e}")
|
||||
@@ -13,7 +13,7 @@ class MockApp:
|
||||
|
||||
def init_state(self):
|
||||
"""
|
||||
[C: tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_app_controller_init_state_loads_prompts]
|
||||
[C: tests/test_system_prompt_exposure.py:TestSystemPromptExposure.test_app_controller_init_state_loads_prompts]
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@@ -11,7 +11,8 @@ from api_hook_client import ApiHookClient
|
||||
def test_idle_performance_requirements(live_gui) -> None:
|
||||
"""
|
||||
|
||||
Requirement: GUI must maintain stable performance on idle.
|
||||
|
||||
Requirement: GUI must maintain stable performance on idle.
|
||||
"""
|
||||
# Warmup to ensure GUI is ready
|
||||
time.sleep(5.0)
|
||||
|
||||
@@ -12,8 +12,9 @@ from src import paths
|
||||
def test_track_proposal_editing(app_instance):
|
||||
"""
|
||||
|
||||
Verifies the structural integrity of track proposal items.
|
||||
Ensures that track proposals can be edited and removed from the active list.
|
||||
|
||||
Verifies the structural integrity of track proposal items.
|
||||
Ensures that track proposals can be edited and removed from the active list.
|
||||
"""
|
||||
app_instance.proposed_tracks = [
|
||||
{"title": "Old Title", "goal": "Old Goal"},
|
||||
@@ -35,8 +36,9 @@ def test_track_proposal_editing(app_instance):
|
||||
def test_conductor_setup_scan(app_instance, tmp_path, monkeypatch):
|
||||
"""
|
||||
|
||||
Verifies that the conductor setup scan properly iterates through the conductor directory,
|
||||
counts files and lines, and identifies active tracks.
|
||||
|
||||
Verifies that the conductor setup scan properly iterates through the conductor directory,
|
||||
counts files and lines, and identifies active tracks.
|
||||
"""
|
||||
old_cwd = os.getcwd()
|
||||
os.chdir(tmp_path)
|
||||
@@ -63,8 +65,9 @@ def test_conductor_setup_scan(app_instance, tmp_path, monkeypatch):
|
||||
def test_create_track(app_instance, tmp_path):
|
||||
"""
|
||||
|
||||
Verifies that _cb_create_track properly creates the track folder
|
||||
and populates the necessary boilerplate files (spec.md, plan.md, metadata.json).
|
||||
|
||||
Verifies that _cb_create_track properly creates the track folder
|
||||
and populates the necessary boilerplate files (spec.md, plan.md, metadata.json).
|
||||
"""
|
||||
old_cwd = os.getcwd()
|
||||
os.chdir(tmp_path)
|
||||
@@ -91,4 +94,4 @@ def test_create_track(app_instance, tmp_path):
|
||||
assert data['type'] == "feature"
|
||||
assert data['id'] == track_dir.name
|
||||
finally:
|
||||
os.chdir(old_cwd)
|
||||
os.chdir(old_cwd)
|
||||
@@ -3,7 +3,8 @@ import time
|
||||
def test_gui_startup_smoke(live_gui):
|
||||
"""
|
||||
|
||||
Smoke test to ensure the GUI starts and remains running.
|
||||
|
||||
Smoke test to ensure the GUI starts and remains running.
|
||||
"""
|
||||
proc, _ = live_gui
|
||||
|
||||
|
||||
@@ -5,7 +5,8 @@ from src.api_hook_client import ApiHookClient
|
||||
def test_text_viewer_state_update(live_gui) -> None:
|
||||
"""
|
||||
|
||||
Verifies that we can set text viewer state and it is reflected in GUI state.
|
||||
|
||||
Verifies that we can set text viewer state and it is reflected in GUI state.
|
||||
"""
|
||||
client = ApiHookClient()
|
||||
client.click("btn_reset")
|
||||
@@ -32,4 +33,4 @@ def test_text_viewer_state_update(live_gui) -> None:
|
||||
assert state is not None
|
||||
assert state.get('show_text_viewer') == True
|
||||
assert state.get('text_viewer_title') == label
|
||||
assert state.get('text_viewer_type') == text_type
|
||||
assert state.get('text_viewer_type') == text_type
|
||||
@@ -17,9 +17,10 @@ from src.gui_2 import App
|
||||
def test_telemetry_data_updates_correctly(app_instance: Any) -> None:
|
||||
"""
|
||||
|
||||
Tests that the _refresh_api_metrics method correctly updates
|
||||
the internal state for display by querying the ai_client.
|
||||
Verifies the boundary between GUI state and API state.
|
||||
|
||||
Tests that the _refresh_api_metrics method correctly updates
|
||||
the internal state for display by querying the ai_client.
|
||||
Verifies the boundary between GUI state and API state.
|
||||
"""
|
||||
# 1. Set the provider to anthropic
|
||||
app_instance._current_provider = "anthropic"
|
||||
@@ -43,9 +44,10 @@ def test_telemetry_data_updates_correctly(app_instance: Any) -> None:
|
||||
def test_performance_history_updates(app_instance: Any) -> None:
|
||||
"""
|
||||
|
||||
Verify the data structure that feeds the sparkline.
|
||||
This ensures that the rolling buffer for performance telemetry maintains
|
||||
the correct size and default initialization to prevent GUI rendering crashes.
|
||||
|
||||
Verify the data structure that feeds the sparkline.
|
||||
This ensures that the rolling buffer for performance telemetry maintains
|
||||
the correct size and default initialization to prevent GUI rendering crashes.
|
||||
"""
|
||||
# ANTI-SIMPLIFICATION: Verifying exactly 100 elements ensures the sparkline won't overflow
|
||||
assert len(app_instance.perf_history["frame_time"]) == 100
|
||||
@@ -54,9 +56,10 @@ def test_performance_history_updates(app_instance: Any) -> None:
|
||||
def test_gui_updates_on_event(app_instance: App) -> None:
|
||||
"""
|
||||
|
||||
Verifies that when an API event is received (e.g. from ai_client),
|
||||
the _on_api_event handler correctly updates internal metrics and
|
||||
queues the update to be processed by the GUI event loop.
|
||||
|
||||
Verifies that when an API event is received (e.g. from ai_client),
|
||||
the _on_api_event handler correctly updates internal metrics and
|
||||
queues the update to be processed by the GUI event loop.
|
||||
"""
|
||||
mock_stats = {"percentage": 50.0, "current": 500, "limit": 1000}
|
||||
app_instance.last_md = "mock_md"
|
||||
|
||||
@@ -6,10 +6,11 @@ from src.api_hook_client import ApiHookClient
|
||||
async def test_mma_track_lifecycle_simulation():
|
||||
"""
|
||||
|
||||
This test simulates the sequence of API calls an external orchestrator
|
||||
would make to manage an MMA track lifecycle via the Hook API.
|
||||
It verifies that ApiHookClient correctly routes requests to the
|
||||
corresponding endpoints in src/api_hooks.py.
|
||||
|
||||
This test simulates the sequence of API calls an external orchestrator
|
||||
would make to manage an MMA track lifecycle via the Hook API.
|
||||
It verifies that ApiHookClient correctly routes requests to the
|
||||
corresponding endpoints in src/api_hooks.py.
|
||||
"""
|
||||
|
||||
client = ApiHookClient("http://localhost:8999")
|
||||
|
||||
@@ -10,10 +10,11 @@ from src import ai_client
|
||||
async def test_headless_verification_full_run(vlogger) -> None:
|
||||
"""
|
||||
|
||||
1. Initialize a ConductorEngine with a Track containing multiple dependent Tickets.
|
||||
2. Simulate a full execution run using engine.run().
|
||||
3. Mock ai_client.send to simulate successful tool calls and final responses.
|
||||
4. Specifically verify that 'Context Amnesia' is maintained.
|
||||
|
||||
1. Initialize a ConductorEngine with a Track containing multiple dependent Tickets.
|
||||
2. Simulate a full execution run using engine.run().
|
||||
3. Mock ai_client.send to simulate successful tool calls and final responses.
|
||||
4. Specifically verify that 'Context Amnesia' is maintained.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
|
||||
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker1", depends_on=["T1"])
|
||||
@@ -49,8 +50,9 @@ async def test_headless_verification_full_run(vlogger) -> None:
|
||||
async def test_headless_verification_error_and_qa_interceptor(vlogger) -> None:
|
||||
"""
|
||||
|
||||
5. Simulate a shell error and verify that the Tier 4 QA interceptor is triggered
|
||||
and its summary is injected into the worker's history for the next retry.
|
||||
|
||||
5. Simulate a shell error and verify that the Tier 4 QA interceptor is triggered
|
||||
and its summary is injected into the worker's history for the next retry.
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="Task with error", status="todo", assigned_to="worker1")
|
||||
track = Track(id="track_error", description="Error Track", tickets=[t1])
|
||||
|
||||
@@ -12,8 +12,9 @@ from src.gui_2 import App
|
||||
def test_new_hubs_defined_in_show_windows(mock_app: App) -> None:
|
||||
"""
|
||||
|
||||
Verifies that the new consolidated Hub windows are defined in the App's show_windows.
|
||||
This ensures they will be available in the 'Windows' menu.
|
||||
|
||||
Verifies that the new consolidated Hub windows are defined in the App's show_windows.
|
||||
This ensures they will be available in the 'Windows' menu.
|
||||
"""
|
||||
expected_hubs = [
|
||||
"Project Settings",
|
||||
@@ -27,7 +28,8 @@ def test_new_hubs_defined_in_show_windows(mock_app: App) -> None:
|
||||
def test_old_windows_removed_from_gui2(app_instance_simple: Any) -> None:
|
||||
"""
|
||||
|
||||
Verifies that the old fragmented windows are removed or renamed.
|
||||
|
||||
Verifies that the old fragmented windows are removed or renamed.
|
||||
"""
|
||||
old_tags = [
|
||||
"win_projects", "win_files", "win_screenshots",
|
||||
@@ -54,7 +56,8 @@ def app_instance_simple() -> Any:
|
||||
def test_hub_windows_exist_in_gui2(app_instance_simple: Any) -> None:
|
||||
"""
|
||||
|
||||
Verifies that the new Hub windows are present in the show_windows dictionary.
|
||||
|
||||
Verifies that the new Hub windows are present in the show_windows dictionary.
|
||||
"""
|
||||
hubs = ["Project Settings", "AI Settings", "Discussion Hub", "Operations Hub"]
|
||||
for hub in hubs:
|
||||
@@ -63,7 +66,8 @@ def test_hub_windows_exist_in_gui2(app_instance_simple: Any) -> None:
|
||||
def test_indicators_logic_exists(app_instance_simple: Any) -> None:
|
||||
"""
|
||||
|
||||
Verifies that the status indicators logic exists in the App.
|
||||
|
||||
Verifies that the status indicators logic exists in the App.
|
||||
"""
|
||||
assert hasattr(app_instance_simple, 'ai_status')
|
||||
assert hasattr(app_instance_simple, 'mma_status')
|
||||
@@ -14,11 +14,12 @@ from src.api_hook_client import ApiHookClient
|
||||
def test_user_request_integration_flow(mock_app: App) -> None:
|
||||
"""
|
||||
|
||||
Verifies that pushing a UserRequestEvent to the event_queue:
|
||||
1. Triggers ai_client.send
|
||||
2. Results in a 'response' event back to the queue
|
||||
3. Eventually updates the UI state (ai_response, ai_status) after processing GUI tasks.
|
||||
ANTI-SIMPLIFICATION: This verifies the full cross-thread boundary.
|
||||
|
||||
Verifies that pushing a UserRequestEvent to the event_queue:
|
||||
1. Triggers ai_client.send
|
||||
2. Results in a 'response' event back to the queue
|
||||
3. Eventually updates the UI state (ai_response, ai_status) after processing GUI tasks.
|
||||
ANTI-SIMPLIFICATION: This verifies the full cross-thread boundary.
|
||||
"""
|
||||
app = mock_app
|
||||
# Mock all ai_client methods called during _handle_request_event
|
||||
@@ -76,7 +77,8 @@ def test_user_request_integration_flow(mock_app: App) -> None:
|
||||
def test_user_request_error_handling(mock_app: App) -> None:
|
||||
"""
|
||||
|
||||
Verifies that if ai_client.send raises an exception, the UI is updated with the error state.
|
||||
|
||||
Verifies that if ai_client.send raises an exception, the UI is updated with the error state.
|
||||
"""
|
||||
app = mock_app
|
||||
with (
|
||||
|
||||
@@ -16,7 +16,8 @@ from src.api_hook_client import ApiHookClient
|
||||
def wait_for_value(client, field, expected, timeout=10):
|
||||
"""
|
||||
|
||||
Helper to poll the GUI state until a field matches the expected value.
|
||||
|
||||
Helper to poll the GUI state until a field matches the expected value.
|
||||
"""
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
@@ -31,9 +32,10 @@ def wait_for_value(client, field, expected, timeout=10):
|
||||
def test_full_live_workflow(live_gui) -> None:
|
||||
"""
|
||||
|
||||
Integration test that drives the GUI through a full workflow.
|
||||
ANTI-SIMPLIFICATION: Asserts exact AI behavior, thinking state tracking,
|
||||
and response logging in discussion history.
|
||||
|
||||
Integration test that drives the GUI through a full workflow.
|
||||
ANTI-SIMPLIFICATION: Asserts exact AI behavior, thinking state tracking,
|
||||
and response logging in discussion history.
|
||||
"""
|
||||
client = ApiHookClient()
|
||||
assert client.wait_for_server(timeout=10)
|
||||
|
||||
@@ -6,7 +6,7 @@ from src.gui_2 import App
|
||||
|
||||
def _make_app(**kwargs):
|
||||
"""
|
||||
[C: tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier1_renders_stream_content, tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier3_renders_worker_subheaders]
|
||||
[C: tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier1_renders_stream_content, tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier3_renders_worker_subheaders]
|
||||
"""
|
||||
app = MagicMock()
|
||||
app.mma_streams = kwargs.get("mma_streams", {})
|
||||
@@ -65,7 +65,7 @@ def _make_app(**kwargs):
|
||||
|
||||
def _make_imgui_mock():
|
||||
"""
|
||||
[C: tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier1_renders_stream_content, tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier3_renders_worker_subheaders]
|
||||
[C: tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier1_renders_stream_content, tests/test_mma_dashboard_streams.py:TestMMADashboardStreams.test_tier3_renders_worker_subheaders]
|
||||
"""
|
||||
m = MagicMock()
|
||||
m.begin_table.return_value = False
|
||||
|
||||
@@ -11,7 +11,8 @@ from src import api_hook_client
|
||||
|
||||
def _poll_mma_status(client, timeout, condition, label):
|
||||
"""
|
||||
Poll get_mma_status() until condition(status) is True or timeout.
|
||||
|
||||
Poll get_mma_status() until condition(status) is True or timeout.
|
||||
[C: tests/test_mma_step_mode_sim.py:test_mma_step_mode_approval_flow]
|
||||
"""
|
||||
last_status = {}
|
||||
@@ -28,9 +29,10 @@ def _poll_mma_status(client, timeout, condition, label):
|
||||
def test_mma_concurrent_tracks_execution(live_gui) -> None:
|
||||
"""
|
||||
|
||||
Stress test for concurrent MMA track execution.
|
||||
Verifies that starting multiple tracks simultaneously doesn't cause crashes
|
||||
and that workers from both tracks are processed.
|
||||
|
||||
Stress test for concurrent MMA track execution.
|
||||
Verifies that starting multiple tracks simultaneously doesn't cause crashes
|
||||
and that workers from both tracks are processed.
|
||||
"""
|
||||
client = api_hook_client.ApiHookClient()
|
||||
assert client.wait_for_server(timeout=15), "Hook server did not start"
|
||||
|
||||
@@ -25,8 +25,9 @@ def _poll_mma_workers(client: api_hook_client.ApiHookClient, timeout: int, condi
|
||||
def test_mma_concurrent_tracks_stress(live_gui) -> None:
|
||||
"""
|
||||
|
||||
Stress test: Start two tracks concurrently and verify they both progress
|
||||
without crashing the GUI or losing state.
|
||||
|
||||
Stress test: Start two tracks concurrently and verify they both progress
|
||||
without crashing the GUI or losing state.
|
||||
"""
|
||||
client = api_hook_client.ApiHookClient()
|
||||
assert client.wait_for_server(timeout=15), "Hook server did not start"
|
||||
|
||||
+26
-17
@@ -3,8 +3,9 @@ from src.models import Ticket, Track, WorkerContext
|
||||
def test_ticket_instantiation() -> None:
|
||||
"""
|
||||
|
||||
Verifies that a Ticket can be instantiated with its required fields:
|
||||
id, description, status, assigned_to.
|
||||
|
||||
Verifies that a Ticket can be instantiated with its required fields:
|
||||
id, description, status, assigned_to.
|
||||
"""
|
||||
ticket_id = "T1"
|
||||
description = "Implement surgical code changes"
|
||||
@@ -25,7 +26,8 @@ def test_ticket_instantiation() -> None:
|
||||
def test_ticket_with_dependencies() -> None:
|
||||
"""
|
||||
|
||||
Verifies that a Ticket can store dependencies.
|
||||
|
||||
Verifies that a Ticket can store dependencies.
|
||||
"""
|
||||
ticket = Ticket(
|
||||
id="T2",
|
||||
@@ -39,8 +41,9 @@ def test_ticket_with_dependencies() -> None:
|
||||
def test_track_instantiation() -> None:
|
||||
"""
|
||||
|
||||
Verifies that a Track can be instantiated with its required fields:
|
||||
id, description, and a list of Tickets.
|
||||
|
||||
Verifies that a Track can be instantiated with its required fields:
|
||||
id, description, and a list of Tickets.
|
||||
"""
|
||||
ticket1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="a")
|
||||
ticket2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="b")
|
||||
@@ -61,7 +64,8 @@ def test_track_instantiation() -> None:
|
||||
def test_track_can_handle_empty_tickets() -> None:
|
||||
"""
|
||||
|
||||
Verifies that a Track can be instantiated with an empty list of tickets.
|
||||
|
||||
Verifies that a Track can be instantiated with an empty list of tickets.
|
||||
"""
|
||||
track = Track(id="TRACK-2", description="Empty Track", tickets=[])
|
||||
assert track.tickets == []
|
||||
@@ -69,8 +73,9 @@ def test_track_can_handle_empty_tickets() -> None:
|
||||
def test_worker_context_instantiation() -> None:
|
||||
"""
|
||||
|
||||
Verifies that a WorkerContext can be instantiated with ticket_id,
|
||||
model_name, and messages.
|
||||
|
||||
Verifies that a WorkerContext can be instantiated with ticket_id,
|
||||
model_name, and messages.
|
||||
"""
|
||||
ticket_id = "T1"
|
||||
model_name = "gemini-2.0-flash-lite"
|
||||
@@ -90,8 +95,9 @@ def test_worker_context_instantiation() -> None:
|
||||
def test_ticket_mark_blocked() -> None:
|
||||
"""
|
||||
|
||||
Verifies that ticket.mark_blocked(reason) sets the status to 'blocked'.
|
||||
Note: The reason field might need to be added to the Ticket class.
|
||||
|
||||
Verifies that ticket.mark_blocked(reason) sets the status to 'blocked'.
|
||||
Note: The reason field might need to be added to the Ticket class.
|
||||
"""
|
||||
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="a")
|
||||
ticket.mark_blocked("Waiting for API key")
|
||||
@@ -100,7 +106,8 @@ def test_ticket_mark_blocked() -> None:
|
||||
def test_ticket_mark_complete() -> None:
|
||||
"""
|
||||
|
||||
Verifies that ticket.mark_complete() sets the status to 'completed'.
|
||||
|
||||
Verifies that ticket.mark_complete() sets the status to 'completed'.
|
||||
"""
|
||||
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="a")
|
||||
ticket.mark_complete()
|
||||
@@ -109,8 +116,9 @@ def test_ticket_mark_complete() -> None:
|
||||
def test_track_get_executable_tickets() -> None:
|
||||
"""
|
||||
|
||||
Verifies that track.get_executable_tickets() returns only 'todo' tickets
|
||||
whose dependencies are all 'completed'.
|
||||
|
||||
Verifies that track.get_executable_tickets() returns only 'todo' tickets
|
||||
whose dependencies are all 'completed'.
|
||||
"""
|
||||
# T1: todo, no deps -> executable
|
||||
t1 = Ticket(id="T1", description="T1", status="todo", assigned_to="a")
|
||||
@@ -134,10 +142,11 @@ def test_track_get_executable_tickets() -> None:
|
||||
def test_track_get_executable_tickets_complex() -> None:
|
||||
"""
|
||||
|
||||
Verifies executable tickets with complex dependency chains.
|
||||
Chain: T1 (comp) -> T2 (todo) -> T3 (todo)
|
||||
T4 (comp) -> T3
|
||||
T5 (todo) -> T3
|
||||
|
||||
Verifies executable tickets with complex dependency chains.
|
||||
Chain: T1 (comp) -> T2 (todo) -> T3 (todo)
|
||||
T4 (comp) -> T3
|
||||
T5 (todo) -> T3
|
||||
"""
|
||||
t1 = Ticket(id="T1", description="T1", status="completed", assigned_to="a")
|
||||
t2 = Ticket(id="T2", description="T2", status="todo", assigned_to="a", depends_on=["T1"])
|
||||
|
||||
@@ -24,7 +24,8 @@ def _poll_mma_status(client: api_hook_client.ApiHookClient, timeout: int, condit
|
||||
def test_mma_step_mode_approval_flow(live_gui) -> None:
|
||||
"""
|
||||
|
||||
Verify that we can manually approve a ticket in Step Mode and it proceeds.
|
||||
|
||||
Verify that we can manually approve a ticket in Step Mode and it proceeds.
|
||||
"""
|
||||
client = api_hook_client.ApiHookClient()
|
||||
assert client.wait_for_server(timeout=15), "Hook server did not start"
|
||||
|
||||
@@ -13,8 +13,9 @@ from src import api_hook_client
|
||||
def test_patch_modal_appears_on_trigger(live_gui) -> None:
|
||||
"""
|
||||
|
||||
Test that triggering a patch shows the modal in the GUI.
|
||||
Uses live_gui fixture to start the GUI with test hooks enabled.
|
||||
|
||||
Test that triggering a patch shows the modal in the GUI.
|
||||
Uses live_gui fixture to start the GUI with test hooks enabled.
|
||||
"""
|
||||
proc, _ = live_gui
|
||||
client = api_hook_client.ApiHookClient()
|
||||
@@ -51,7 +52,8 @@ def test_patch_modal_appears_on_trigger(live_gui) -> None:
|
||||
def test_patch_apply_modal_workflow(live_gui) -> None:
|
||||
"""
|
||||
|
||||
Test the full patch apply workflow: trigger -> apply -> verify modal closes.
|
||||
|
||||
Test the full patch apply workflow: trigger -> apply -> verify modal closes.
|
||||
"""
|
||||
proc, _ = live_gui
|
||||
client = api_hook_client.ApiHookClient()
|
||||
|
||||
@@ -65,4 +65,4 @@ def test_process_pending_gui_tasks_right_click(app_instance: App) -> None:
|
||||
{"action": "right_click", "item": "item_id"}
|
||||
]
|
||||
app_instance.controller._process_pending_gui_tasks()
|
||||
mock_callback.assert_called_once()
|
||||
mock_callback.assert_called_once()
|
||||
@@ -25,7 +25,8 @@ def mock_project():
|
||||
def test_rag_integration(mock_project):
|
||||
"""
|
||||
|
||||
Integration test verifying the flow from AppController through RAGEngine to ai_client.
|
||||
|
||||
Integration test verifying the flow from AppController through RAGEngine to ai_client.
|
||||
"""
|
||||
# 1. Initializes a mock project and AppController.
|
||||
# We patch several components to avoid side effects during initialization.
|
||||
|
||||
@@ -10,8 +10,9 @@ class TestRunWorkerLifecycleAbort(unittest.TestCase):
|
||||
def test_run_worker_lifecycle_returns_early_on_abort(self):
|
||||
"""
|
||||
|
||||
Test that run_worker_lifecycle returns early and marks ticket as 'killed'
|
||||
if the abort event is set for the ticket.
|
||||
|
||||
Test that run_worker_lifecycle returns early and marks ticket as 'killed'
|
||||
if the abort event is set for the ticket.
|
||||
"""
|
||||
# Mock ai_client.send
|
||||
with patch('src.ai_client.send') as mock_send:
|
||||
|
||||
@@ -13,8 +13,9 @@ from src.gui_2 import App
|
||||
def test_selectable_label_stability(live_gui) -> None:
|
||||
"""
|
||||
|
||||
Verifies that the application starts correctly with --enable-test-hooks
|
||||
and that the selectable label infrastructure is present and stable.
|
||||
|
||||
Verifies that the application starts correctly with --enable-test-hooks
|
||||
and that the selectable label infrastructure is present and stable.
|
||||
"""
|
||||
client = ApiHookClient()
|
||||
assert client.wait_for_server(timeout=20), "Hook server failed to start"
|
||||
|
||||
@@ -16,8 +16,9 @@ from simulation.sim_ai_settings import AISettingsSimulation
|
||||
def test_ai_settings_simulation_run() -> None:
|
||||
"""
|
||||
|
||||
Verifies that AISettingsSimulation correctly cycles through models
|
||||
to test the settings UI components.
|
||||
|
||||
Verifies that AISettingsSimulation correctly cycles through models
|
||||
to test the settings UI components.
|
||||
"""
|
||||
mock_client = MagicMock()
|
||||
mock_client.wait_for_server.return_value = True
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user