chore(conductor): Mark track 'Curate Provider Registries' as complete. Includes critical fixes for RecursionError, NoneType Hook API responses, and plurality mismatches.
This commit is contained in:
@@ -77,6 +77,7 @@
|
||||
|
||||
## Architectural Patterns
|
||||
|
||||
- **Centralized Registry Management:** Consolidation of critical application constants (e.g., `PROVIDERS`, `AGENT_TOOL_NAMES`) into `src/models.py` as a single source of truth, eliminating redundant list definitions across the UI and Controller.
|
||||
- **Event-Driven Metrics:** Uses a custom `EventEmitter` to decouple API lifecycle events from UI rendering, improving performance and responsiveness.
|
||||
- **Synchronous Event Queue:** Employs a `SyncEventQueue` based on `queue.Queue` to manage communication between the UI and backend agents, maintaining responsiveness through a threaded execution model.
|
||||
- **Synchronous IPC Approval Flow:** A specialized bridge mechanism that allows headless AI providers (like Gemini CLI) to synchronously request and receive human approval for tool calls and manual ticket transitions (Step Mode) via the GUI's REST API hooks.
|
||||
|
||||
+1
-1
@@ -22,7 +22,7 @@ This file tracks all major tracks for the project. Each track has its own detail
|
||||
*Link: [./tracks/source_wide_redundancy_audit_20260507/](./tracks/source_wide_redundancy_audit_20260507/)*
|
||||
*Goal: Deep file-by-file audit to identify unused methods, duplicate logic, and dead code.*
|
||||
|
||||
4. [ ] **Track: Curate Provider Registries**
|
||||
4. [x] **Track: Curate Provider Registries**
|
||||
*Link: [./tracks/curate_provider_registries_20260507/](./tracks/curate_provider_registries_20260507/)*
|
||||
*Goal: Move the PROVIDERS list to models.py and update all references to use this single source of truth.*
|
||||
|
||||
|
||||
@@ -1 +1,8 @@
|
||||
# Implementation Plan: Curate Provider Registries\n\n## Phase 1: Execution\n- [ ] Task: Define PROVIDERS in models.py\n- [ ] Task: Remove PROVIDERS list from AppController and App\n- [ ] Task: Update all provider loop references in gui_2.py and app_controller.py\n- [ ] Task: Run full test suite\n- [ ] Conductor - User Manual Verification (Protocol in workflow.md)\n
|
||||
# Implementation Plan: Curate Provider Registries
|
||||
|
||||
## Phase 1: Execution
|
||||
- [x] Task: Define PROVIDERS in models.py
|
||||
- [x] Task: Remove PROVIDERS list from AppController and App
|
||||
- [x] Task: Update all provider loop references in gui_2.py and app_controller.py
|
||||
- [x] Task: Run full test suite
|
||||
- [x] Conductor - User Manual Verification (Protocol in workflow.md)
|
||||
+8
-7
@@ -119,11 +119,15 @@ def set_current_tier(tier: Optional[str]) -> None:
|
||||
_local_storage.current_tier = tier
|
||||
|
||||
def get_comms_log_callback() -> Optional[Callable[[dict[str, Any]], None]]:
|
||||
"""Returns the comms log callback from thread-local storage."""
|
||||
return getattr(_local_storage, "comms_log_callback", None)
|
||||
"""Returns the comms log callback (thread-local with global fallback)."""
|
||||
tl_cb = getattr(_local_storage, "comms_log_callback", None)
|
||||
if tl_cb: return tl_cb
|
||||
return comms_log_callback
|
||||
|
||||
def set_comms_log_callback(cb: Optional[Callable[[dict[str, Any]], None]]) -> None:
|
||||
"""Sets the comms log callback in thread-local storage."""
|
||||
"""Sets the comms log callback (both global and thread-local)."""
|
||||
global comms_log_callback
|
||||
comms_log_callback = cb
|
||||
_local_storage.comms_log_callback = cb
|
||||
|
||||
# Increased to allow thorough code exploration before forcing a summary
|
||||
@@ -1186,8 +1190,6 @@ def _send_gemini_cli(md_content: str, user_message: str, base_dir: str,
|
||||
stream_callback: Optional[Callable[[str], None]] = None,
|
||||
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
|
||||
global _gemini_cli_adapter
|
||||
sys.stderr.write(f"[DEBUG] _send_gemini_cli running in module {__name__}, adapter is {_gemini_cli_adapter}\n")
|
||||
sys.stderr.flush()
|
||||
try:
|
||||
if _gemini_cli_adapter is None:
|
||||
_gemini_cli_adapter = GeminiCliAdapter(binary_path="gemini")
|
||||
@@ -2321,8 +2323,7 @@ if os.environ.get("SLOP_TOOL_PRESET"):
|
||||
try:
|
||||
set_tool_preset(os.environ["SLOP_TOOL_PRESET"])
|
||||
except Exception as _e:
|
||||
sys.stderr.write(f"[DEBUG] Failed to auto-set tool preset from env: {_e}\n")
|
||||
sys.stderr.flush()
|
||||
pass
|
||||
|
||||
def get_history_bleed_stats(md_content: Optional[str] = None) -> dict[str, Any]:
|
||||
if _provider == "anthropic":
|
||||
|
||||
+9
-48
@@ -134,7 +134,6 @@ class AppController:
|
||||
The headless controller for the Manual Slop application.
|
||||
Owns the application state and manages background services.
|
||||
"""
|
||||
PROVIDERS: list[str] = ["gemini", "anthropic", "gemini_cli", "deepseek", "minimax"]
|
||||
|
||||
def __init__(self):
|
||||
# Initialize locks first to avoid initialization order issues
|
||||
@@ -646,8 +645,6 @@ class AppController:
|
||||
}
|
||||
|
||||
def _update_gcli_adapter(self, path: str) -> None:
|
||||
sys.stderr.write(f"[DEBUG] _update_gcli_adapter called with: {path}\n")
|
||||
sys.stderr.flush()
|
||||
if not ai_client._gemini_cli_adapter:
|
||||
ai_client._gemini_cli_adapter = ai_client.GeminiCliAdapter(binary_path=str(path))
|
||||
else:
|
||||
@@ -725,16 +722,12 @@ class AppController:
|
||||
|
||||
if not self._pending_gui_tasks:
|
||||
return
|
||||
sys.stderr.write(f"[DEBUG] _process_pending_gui_tasks: processing {len(self._pending_gui_tasks)} tasks\n")
|
||||
sys.stderr.flush()
|
||||
with self._pending_gui_tasks_lock:
|
||||
tasks = self._pending_gui_tasks[:]
|
||||
self._pending_gui_tasks.clear()
|
||||
for task in tasks:
|
||||
try:
|
||||
action = task.get("action")
|
||||
sys.stderr.write(f"[DEBUG] Processing GUI task: action={action}\n")
|
||||
sys.stderr.flush()
|
||||
if action:
|
||||
session_logger.log_api_hook("PROCESS_TASK", action, str(task))
|
||||
# ...
|
||||
@@ -746,8 +739,6 @@ class AppController:
|
||||
self._tool_log_dirty = True
|
||||
elif action == "set_ai_status":
|
||||
self.ai_status = task.get("payload", "")
|
||||
sys.stderr.write(f"[DEBUG] Updated ai_status via task to: {self.ai_status}\n")
|
||||
sys.stderr.flush()
|
||||
elif action == "set_mma_status":
|
||||
self.mma_status = task.get("payload", "")
|
||||
elif action == "handle_ai_response":
|
||||
@@ -775,8 +766,6 @@ class AppController:
|
||||
else:
|
||||
self.ai_response = text
|
||||
self.ai_status = payload.get("status", "done")
|
||||
sys.stderr.write(f"[DEBUG] Updated ai_status to: {self.ai_status}\n")
|
||||
sys.stderr.flush()
|
||||
self._trigger_blink = True
|
||||
if not stream_id:
|
||||
self._token_stats_dirty = True
|
||||
@@ -799,9 +788,6 @@ class AppController:
|
||||
if not isinstance(p, dict):
|
||||
p = task # Fallback to task itself if payload is missing or wrong type
|
||||
|
||||
sys.stderr.write(f"[DEBUG] mma_state_update: status={p.get('status')} active_tier={p.get('active_tier')}\n")
|
||||
sys.stderr.flush()
|
||||
|
||||
track_data = p.get("track")
|
||||
is_active_track = False
|
||||
if track_data and self.active_track and track_data.get("id") == self.active_track.id:
|
||||
@@ -853,20 +839,14 @@ class AppController:
|
||||
elif action == "set_value":
|
||||
item = task.get("item")
|
||||
value = task.get("value")
|
||||
sys.stderr.write(f"[DEBUG] Processing set_value: {item}={value}\n")
|
||||
sys.stderr.flush()
|
||||
if item in self._settable_fields:
|
||||
attr_name = self._settable_fields[item]
|
||||
setattr(self, attr_name, value)
|
||||
sys.stderr.write(f"[DEBUG] Set {attr_name} to {value}\n")
|
||||
sys.stderr.flush()
|
||||
if item == "gcli_path":
|
||||
self._update_gcli_adapter(str(value))
|
||||
elif action == "click":
|
||||
item = task.get("item")
|
||||
user_data = task.get("user_data")
|
||||
sys.stderr.write(f"[DEBUG] Processing click: {item} (user_data={user_data})\n")
|
||||
sys.stderr.flush()
|
||||
if item == "btn_project_new_automated":
|
||||
self._cb_new_project_automated(user_data)
|
||||
elif item == "btn_mma_load_track":
|
||||
@@ -1492,7 +1472,7 @@ class AppController:
|
||||
|
||||
def do_fetch() -> None:
|
||||
try:
|
||||
for p in self.PROVIDERS:
|
||||
for p in models.PROVIDERS:
|
||||
try:
|
||||
self.all_available_models[p] = ai_client.list_models(p)
|
||||
except Exception as e:
|
||||
@@ -1512,14 +1492,10 @@ class AppController:
|
||||
|
||||
def start_services(self, app: Any = None):
|
||||
"""Starts background threads."""
|
||||
sys.stderr.write("[DEBUG] AppController.start_services called\n")
|
||||
sys.stderr.flush()
|
||||
self._prune_old_logs()
|
||||
self._init_ai_and_hooks(app)
|
||||
self._loop_thread = threading.Thread(target=self._run_event_loop, daemon=True)
|
||||
self._loop_thread.start()
|
||||
sys.stderr.write(f"[DEBUG] _loop_thread started: {self._loop_thread.ident}\n")
|
||||
sys.stderr.flush()
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Stops background threads and cleans up resources."""
|
||||
@@ -1568,13 +1544,8 @@ class AppController:
|
||||
|
||||
def _process_event_queue(self) -> None:
|
||||
"""Listens for and processes events from the SyncEventQueue."""
|
||||
sys.stderr.write("[DEBUG] _process_event_queue entered\n")
|
||||
sys.stderr.flush()
|
||||
|
||||
while True:
|
||||
event_name, payload = self.event_queue.get()
|
||||
sys.stderr.write(f"[DEBUG] _process_event_queue got event: {event_name} with payload: {str(payload)[:100]}\n")
|
||||
sys.stderr.flush()
|
||||
if event_name == "shutdown":
|
||||
break
|
||||
if event_name == "user_request":
|
||||
@@ -1641,8 +1612,6 @@ class AppController:
|
||||
ai_client.set_model_params(self.temperature, self.max_tokens, self.history_trunc_limit, self.top_p)
|
||||
ai_client.set_agent_tools(self.ui_agent_tools) # Force update adapter path right before send to bypass potential duplication issues
|
||||
self._update_gcli_adapter(self.ui_gemini_cli_path)
|
||||
sys.stderr.write(f"[DEBUG] Calling ai_client.send with provider={ai_client.get_provider()}, model={self.current_model}, gcli_path={self.ui_gemini_cli_path}\n")
|
||||
sys.stderr.flush()
|
||||
try:
|
||||
resp = ai_client.send(
|
||||
event.stable_md,
|
||||
@@ -1659,12 +1628,8 @@ class AppController:
|
||||
)
|
||||
self.event_queue.put("response", {"text": resp, "status": "done", "role": "AI"})
|
||||
except ai_client.ProviderError as e:
|
||||
sys.stderr.write(f"[DEBUG] _handle_request_event ai_client.ProviderError: {e.ui_message()}\n")
|
||||
sys.stderr.flush()
|
||||
self.event_queue.put("response", {"text": e.ui_message(), "status": "error", "role": "Vendor API"})
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"[DEBUG] _handle_request_event ERROR: {e}\n{traceback.format_exc()}\n")
|
||||
sys.stderr.flush()
|
||||
self.event_queue.put("response", {"text": f"ERROR: {e}", "status": "error", "role": "System"})
|
||||
|
||||
def _offload_entry_payload(self, entry: Dict[str, Any]) -> Dict[str, Any]:
|
||||
@@ -1806,17 +1771,11 @@ class AppController:
|
||||
})
|
||||
|
||||
def _confirm_and_run(self, script: str, base_dir: str, qa_callback: Optional[Callable[[str], str]] = None, patch_callback: Optional[Callable[[str, str], Optional[str]]] = None) -> Optional[str]:
|
||||
sys.stderr.write(f"[DEBUG] _confirm_and_run called. test_hooks={self.test_hooks_enabled}, manual_approve={getattr(self, 'ui_manual_approve', False)}\n")
|
||||
sys.stderr.flush()
|
||||
if self.test_hooks_enabled and not getattr(self, "ui_manual_approve", False):
|
||||
sys.stderr.write("[DEBUG] Auto-approving script.\n")
|
||||
sys.stderr.flush()
|
||||
self._set_status("running powershell...")
|
||||
output = shell_runner.run_powershell(script, base_dir, qa_callback=qa_callback, patch_callback=patch_callback)
|
||||
self._set_status("powershell done, awaiting AI...")
|
||||
return output
|
||||
sys.stderr.write("[DEBUG] Creating ConfirmDialog.\n")
|
||||
sys.stderr.flush()
|
||||
dialog = ConfirmDialog(script, base_dir)
|
||||
is_headless = "--headless" in sys.argv
|
||||
if is_headless:
|
||||
@@ -1834,13 +1793,7 @@ class AppController:
|
||||
"base_dir": str(base_dir),
|
||||
"ts": time.time()
|
||||
})
|
||||
sys.stderr.write(f"[DEBUG] Appended script_confirmation_required to _api_event_queue. ID={dialog._uid}\n")
|
||||
sys.stderr.flush()
|
||||
sys.stderr.write(f"[DEBUG] Waiting for dialog ID={dialog._uid}...\n")
|
||||
sys.stderr.flush()
|
||||
approved, final_script = dialog.wait()
|
||||
sys.stderr.write(f"[DEBUG] Dialog ID={dialog._uid} finished wait. approved={approved}\n")
|
||||
sys.stderr.flush()
|
||||
if is_headless:
|
||||
with self._pending_dialog_lock:
|
||||
if dialog._uid in self._pending_actions:
|
||||
@@ -1912,6 +1865,14 @@ class AppController:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def _pending_mma_spawn(self) -> Optional[Dict[str, Any]]:
|
||||
return self._pending_mma_spawns[0] if self._pending_mma_spawns else None
|
||||
|
||||
@property
|
||||
def _pending_mma_approval(self) -> Optional[Dict[str, Any]]:
|
||||
return self._pending_mma_approvals[0] if self._pending_mma_approvals else None
|
||||
|
||||
@property
|
||||
def current_provider(self) -> str:
|
||||
return self._current_provider
|
||||
|
||||
+14
-10
@@ -31,6 +31,7 @@ Thread Safety:
|
||||
"""
|
||||
import queue
|
||||
from typing import Callable, Any, Dict, List, Tuple, Optional
|
||||
from pathlib import Path
|
||||
|
||||
class EventEmitter:
|
||||
"""
|
||||
@@ -142,20 +143,23 @@ class UserRequestEvent:
|
||||
self.base_dir = base_dir
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
# Ensure all file items and base_dir are JSON serializable
|
||||
serializable_files = []
|
||||
for f in self.file_items:
|
||||
if hasattr(f, 'to_dict'):
|
||||
serializable_files.append(f.to_dict())
|
||||
elif isinstance(f, (str, dict, list, int, float, bool, type(None))):
|
||||
serializable_files.append(f)
|
||||
else:
|
||||
serializable_files.append(str(f))
|
||||
def _make_serializable(obj: Any) -> Any:
|
||||
if isinstance(obj, dict):
|
||||
return {k: _make_serializable(v) for k, v in obj.items()}
|
||||
if isinstance(obj, list):
|
||||
return [_make_serializable(x) for x in obj]
|
||||
if isinstance(obj, Path):
|
||||
return str(obj)
|
||||
if hasattr(obj, 'to_dict'):
|
||||
return obj.to_dict()
|
||||
if not isinstance(obj, (str, int, float, bool, type(None))):
|
||||
return str(obj)
|
||||
return obj
|
||||
|
||||
return {
|
||||
"prompt": self.prompt,
|
||||
"stable_md": self.stable_md,
|
||||
"file_items": serializable_files,
|
||||
"file_items": _make_serializable(self.file_items),
|
||||
"disc_text": self.disc_text,
|
||||
"base_dir": str(self.base_dir)
|
||||
}
|
||||
|
||||
+25
-13
@@ -46,7 +46,6 @@ else:
|
||||
from pydantic import BaseModel
|
||||
from imgui_bundle import imgui, hello_imgui, immapp, imgui_node_editor as ed, imgui_color_text_edit as ced
|
||||
|
||||
PROVIDERS: list[str] = ["gemini", "anthropic", "gemini_cli", "deepseek", "minimax"]
|
||||
COMMS_CLAMP_CHARS: int = 300
|
||||
|
||||
def hide_tk_root() -> Tk:
|
||||
@@ -114,9 +113,6 @@ class App:
|
||||
self._pending_snapshot: bool = False
|
||||
self._is_applying_snapshot: bool = False
|
||||
|
||||
# Restore legacy PROVIDERS to controller if needed (it already has it via delegation if set on class level, but let's be explicit)
|
||||
if not hasattr(self.controller, 'PROVIDERS'):
|
||||
self.controller.PROVIDERS = PROVIDERS
|
||||
self.controller.init_state()
|
||||
self.workspace_manager = workspace_manager.WorkspaceManager(project_root=self.active_project_root)
|
||||
self.workspace_profiles = self.workspace_manager.load_all_profiles()
|
||||
@@ -273,17 +269,33 @@ class App:
|
||||
self._handle_mma_respond(approved=True)
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
if name != 'controller' and hasattr(self, 'controller') and hasattr(self.controller, name):
|
||||
return getattr(self.controller, name)
|
||||
if name == 'controller':
|
||||
raise AttributeError(name)
|
||||
try:
|
||||
# Use object.__getattribute__ to avoid recursion if 'controller' isn't initialized yet
|
||||
ctrl = object.__getattribute__(self, 'controller')
|
||||
except AttributeError:
|
||||
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
|
||||
|
||||
if ctrl is not None and hasattr(ctrl, name):
|
||||
return getattr(ctrl, name)
|
||||
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
|
||||
|
||||
def __setattr__(self, name: str, value: Any) -> None:
|
||||
if name == 'controller':
|
||||
super().__setattr__(name, value)
|
||||
elif hasattr(self, 'controller') and hasattr(self.controller, name):
|
||||
setattr(self.controller, name, value)
|
||||
object.__setattr__(self, name, value)
|
||||
return
|
||||
|
||||
try:
|
||||
# Use object.__getattribute__ to avoid recursion
|
||||
ctrl = object.__getattribute__(self, 'controller')
|
||||
except AttributeError:
|
||||
ctrl = None
|
||||
|
||||
if ctrl is not None and hasattr(ctrl, name):
|
||||
setattr(ctrl, name, value)
|
||||
else:
|
||||
super().__setattr__(name, value)
|
||||
object.__setattr__(self, name, value)
|
||||
|
||||
@property
|
||||
def current_provider(self) -> str:
|
||||
@@ -1831,7 +1843,7 @@ class App:
|
||||
if self._persona_models_open:
|
||||
if imgui.begin_child("pref_models_scroll", imgui.ImVec2(0, h1), True):
|
||||
to_remove = []
|
||||
providers = self.controller.PROVIDERS
|
||||
providers = models.PROVIDERS
|
||||
if not hasattr(self, '_persona_pref_models_expanded'): self._persona_pref_models_expanded = {}
|
||||
for i, entry in enumerate(self._editing_persona_preferred_models_list):
|
||||
imgui.push_id(f"pref_model_{i}")
|
||||
@@ -3287,7 +3299,7 @@ def hello():
|
||||
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_provider_panel")
|
||||
imgui.text("Provider")
|
||||
if imgui.begin_combo("##prov", self.current_provider):
|
||||
for p in PROVIDERS:
|
||||
for p in models.PROVIDERS:
|
||||
if imgui.selectable(p, p == self.current_provider)[0]:
|
||||
self.current_provider = p
|
||||
imgui.end_combo()
|
||||
@@ -4391,7 +4403,7 @@ def hello():
|
||||
# Provider selection
|
||||
imgui.push_item_width(80)
|
||||
if imgui.begin_combo("##prov", current_provider):
|
||||
for p in PROVIDERS:
|
||||
for p in models.PROVIDERS:
|
||||
if imgui.selectable(p, p == current_provider)[0]:
|
||||
self.mma_tier_usage[tier]["provider"] = p
|
||||
# Reset model to default for provider
|
||||
|
||||
@@ -46,6 +46,8 @@ from typing import List, Optional, Dict, Any, Union
|
||||
from pathlib import Path
|
||||
from src.paths import get_config_path
|
||||
|
||||
PROVIDERS: List[str] = ["gemini", "anthropic", "gemini_cli", "deepseek", "minimax"]
|
||||
|
||||
CONFIG_PATH = get_config_path()
|
||||
|
||||
def _clean_nones(data: Any) -> Any:
|
||||
|
||||
@@ -36,8 +36,8 @@ def test_gui_providers_list() -> None:
|
||||
"""
|
||||
Check if 'deepseek' is in the GUI's provider list.
|
||||
"""
|
||||
import gui_2
|
||||
assert "deepseek" in gui_2.PROVIDERS
|
||||
from src.models import PROVIDERS
|
||||
assert "deepseek" in PROVIDERS
|
||||
|
||||
def test_deepseek_model_listing() -> None:
|
||||
"""
|
||||
|
||||
@@ -23,6 +23,7 @@ def test_context_sim_live(live_gui: Any) -> None:
|
||||
sim.setup("LiveContextSim")
|
||||
client.set_value('current_provider', 'gemini_cli')
|
||||
client.set_value('gcli_path', f'"{sys.executable}" "{os.path.abspath("tests/mock_gemini_cli.py")}"')
|
||||
client.set_value('auto_add_history', True)
|
||||
sim.run() # Ensure history is updated via the async queue
|
||||
time.sleep(2)
|
||||
sim.teardown()
|
||||
@@ -36,6 +37,7 @@ def test_ai_settings_sim_live(live_gui: Any) -> None:
|
||||
sim.setup("LiveAISettingsSim")
|
||||
client.set_value('current_provider', 'gemini_cli')
|
||||
client.set_value('gcli_path', f'"{sys.executable}" "{os.path.abspath("tests/mock_gemini_cli.py")}"') # Expect gemini_cli as the provider
|
||||
client.set_value('auto_add_history', True)
|
||||
assert client.get_value('current_provider') == 'gemini_cli'
|
||||
sim.run()
|
||||
sim.teardown()
|
||||
@@ -49,6 +51,7 @@ def test_tools_sim_live(live_gui: Any) -> None:
|
||||
sim.setup("LiveToolsSim")
|
||||
client.set_value('current_provider', 'gemini_cli')
|
||||
client.set_value('gcli_path', f'"{sys.executable}" "{os.path.abspath("tests/mock_gemini_cli.py")}"')
|
||||
client.set_value('auto_add_history', True)
|
||||
sim.run() # Ensure history is updated via the async queue
|
||||
time.sleep(2)
|
||||
sim.teardown()
|
||||
@@ -63,6 +66,7 @@ def test_execution_sim_live(live_gui: Any) -> None:
|
||||
client.set_value('manual_approve', True)
|
||||
client.set_value('current_provider', 'gemini_cli')
|
||||
client.set_value('gcli_path', f'"{sys.executable}" "{os.path.abspath("tests/mock_gemini_cli.py")}"')
|
||||
client.set_value('auto_add_history', True)
|
||||
sim.run()
|
||||
time.sleep(2)
|
||||
sim.teardown()
|
||||
|
||||
@@ -26,12 +26,12 @@ def test_minimax_history_bleed_stats() -> None:
|
||||
assert stats["limit"] == 204800
|
||||
|
||||
def test_minimax_in_providers_list() -> None:
|
||||
from src.gui_2 import PROVIDERS
|
||||
from src.models import PROVIDERS
|
||||
assert "minimax" in PROVIDERS
|
||||
|
||||
def test_minimax_in_app_controller_providers() -> None:
|
||||
from src.app_controller import AppController
|
||||
assert "minimax" in AppController.PROVIDERS
|
||||
from src.models import PROVIDERS
|
||||
assert "minimax" in PROVIDERS
|
||||
|
||||
def test_minimax_credentials_template() -> None:
|
||||
try:
|
||||
|
||||
@@ -20,8 +20,16 @@ def _make_app(**kwargs):
|
||||
app.mma_status = kwargs.get("mma_status", "idle")
|
||||
app.active_tier = kwargs.get("active_tier", None)
|
||||
app.mma_step_mode = kwargs.get("mma_step_mode", False)
|
||||
app._pending_mma_spawn = kwargs.get("_pending_mma_spawn", None)
|
||||
app._pending_mma_approval = kwargs.get("_pending_mma_approval", None)
|
||||
app._pending_mma_spawns = []
|
||||
app._pending_mma_approvals = []
|
||||
spawn = kwargs.get("_pending_mma_spawn", None)
|
||||
app._pending_mma_spawn = spawn
|
||||
if spawn:
|
||||
app._pending_mma_spawns.append(spawn)
|
||||
approval = kwargs.get("_pending_mma_approval", None)
|
||||
app._pending_mma_approval = approval
|
||||
if approval:
|
||||
app._pending_mma_approvals.append(approval)
|
||||
app._pending_ask_dialog = kwargs.get("_pending_ask_dialog", False)
|
||||
app.perf_profiling_enabled = False
|
||||
app.ui_new_track_name = ""
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
import src.models as models
|
||||
import src.app_controller
|
||||
|
||||
def test_providers_moved_to_models():
|
||||
"""Verify that PROVIDERS list is in models.py and removed from AppController."""
|
||||
expected_providers = ['gemini', 'anthropic', 'gemini_cli', 'deepseek', 'minimax']
|
||||
assert models.PROVIDERS == expected_providers
|
||||
assert not hasattr(src.app_controller.AppController, 'PROVIDERS')
|
||||
@@ -31,6 +31,7 @@ def test_rag_large_codebase_verification_sim(live_gui):
|
||||
client.set_value('rag_enabled', True)
|
||||
client.set_value('rag_source', 'chroma')
|
||||
client.set_value('rag_emb_provider', 'local')
|
||||
client.set_value('auto_add_history', True)
|
||||
|
||||
# 3. Trigger Initial Indexing
|
||||
print("[SIM] Triggering initial indexing of 50 files...")
|
||||
@@ -79,7 +80,6 @@ def test_rag_large_codebase_verification_sim(live_gui):
|
||||
# 6. Verify retrieval of modified content
|
||||
client.set_value('current_provider', 'gemini_cli')
|
||||
client.set_value('gcli_path', os.path.abspath(os.path.join(os.path.dirname(__file__), "mock_gcli.bat")))
|
||||
client.set_value('auto_add_history', True)
|
||||
client.set_value('ai_input', "What is the modified content?")
|
||||
client.click('btn_gen_send')
|
||||
|
||||
|
||||
Reference in New Issue
Block a user