7 Commits

18 changed files with 742 additions and 31 deletions
+1 -1
View File
@@ -10,7 +10,7 @@ This file tracks all major tracks for the project. Each track has its own detail
### Architecture & Backend ### Architecture & Backend
1. [ ] **Track: External MCP Server Support** 1. [x] **Track: External MCP Server Support**
*Link: [./tracks/external_mcp_support_20260308/](./tracks/external_mcp_support_20260308/)* *Link: [./tracks/external_mcp_support_20260308/](./tracks/external_mcp_support_20260308/)*
*Goal: Add support for external MCP servers (Local Stdio and Remote SSE/WS) with flexible configuration and lifecycle management (including auto-start on project load).* *Goal: Add support for external MCP servers (Local Stdio and Remote SSE/WS) with flexible configuration and lifecycle management (including auto-start on project load).*
@@ -1,12 +1,12 @@
# Implementation Plan: External MCP Server Support # Implementation Plan: External MCP Server Support
## Phase 1: Configuration & Data Modeling ## Phase 1: Configuration & Data Modeling
- [ ] Task: Define the schema for external MCP server configuration. - [x] Task: Define the schema for external MCP server configuration. [1c863f0]
- [ ] Update `src/models.py` to include `MCPServerConfig` and `MCPConfiguration` classes. - [x] Update `src/models.py` to include `MCPServerConfig` and `MCPConfiguration` classes.
- [ ] Implement logic to load `mcp_config.json` from global and project-specific paths. - [x] Implement logic to load `mcp_config.json` from global and project-specific paths.
- [ ] Task: Integrate configuration loading into `AppController`. - [x] Task: Integrate configuration loading into `AppController`. [c09e0f5]
- [ ] Ensure the MCP config path is correctly resolved from `config.toml` and `manual_slop.toml`. - [x] Ensure the MCP config path is correctly resolved from `config.toml` and `manual_slop.toml`.
- [ ] Task: Write unit tests for configuration loading and validation. - [x] Task: Write unit tests for configuration loading and validation. [c09e0f5]
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Configuration & Data Modeling' (Protocol in workflow.md) - [ ] Task: Conductor - User Manual Verification 'Phase 1: Configuration & Data Modeling' (Protocol in workflow.md)
## Phase 2: MCP Client Extension ## Phase 2: MCP Client Extension
+10 -10
View File
@@ -1,12 +1,12 @@
[ai] [ai]
provider = "minimax" provider = "gemini_cli"
model = "MiniMax-M2.5" model = "gemini-2.5-flash-lite"
temperature = 0.0 temperature = 0.85
top_p = 1.0 top_p = 1.0
max_tokens = 32000 max_tokens = 1024
history_trunc_limit = 900000 history_trunc_limit = 900000
active_preset = "Default" active_preset = ""
system_prompt = "" system_prompt = "Overridden Prompt"
[projects] [projects]
paths = [ paths = [
@@ -17,7 +17,7 @@ paths = [
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_liveexecutionsim.toml", "C:\\projects\\manual_slop\\tests\\artifacts\\temp_liveexecutionsim.toml",
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_project.toml", "C:\\projects\\manual_slop\\tests\\artifacts\\temp_project.toml",
] ]
active = "C:/projects/gencpp/gencpp_sloppy.toml" active = "C:\\projects\\manual_slop\\tests\\artifacts\\live_gui_workspace\\manual_slop.toml"
[gui] [gui]
separate_message_panel = false separate_message_panel = false
@@ -37,8 +37,8 @@ separate_tier4 = false
"Files & Media" = true "Files & Media" = true
"AI Settings" = true "AI Settings" = true
"MMA Dashboard" = true "MMA Dashboard" = true
"Task DAG" = false "Task DAG" = true
"Usage Analytics" = false "Usage Analytics" = true
"Tier 1" = false "Tier 1" = false
"Tier 2" = false "Tier 2" = false
"Tier 3" = false "Tier 3" = false
@@ -51,7 +51,7 @@ separate_tier4 = false
"Operations Hub" = true "Operations Hub" = true
Message = false Message = false
Response = false Response = false
"Tool Calls" = true "Tool Calls" = false
Theme = true Theme = true
"Log Management" = true "Log Management" = true
Diagnostics = false Diagnostics = false
+46
View File
@@ -2370,3 +2370,49 @@ PROMPT:
role: tool role: tool
Here are the results: {"content": "done"} Here are the results: {"content": "done"}
------------------ ------------------
--- MOCK INVOKED ---
ARGS: ['tests/mock_gemini_cli.py']
PROMPT:
PATH: Epic Initialization — please produce tracks
------------------
--- MOCK INVOKED ---
ARGS: ['tests/mock_gemini_cli.py']
PROMPT:
Please generate the implementation tickets for this track.
------------------
--- MOCK INVOKED ---
ARGS: ['tests/mock_gemini_cli.py']
PROMPT:
Please read test.txt
You are assigned to Ticket T1.
Task Description: do something
------------------
--- MOCK INVOKED ---
ARGS: ['tests/mock_gemini_cli.py']
PROMPT:
role: tool
Here are the results: {"content": "done"}
------------------
--- MOCK INVOKED ---
ARGS: ['tests/mock_gemini_cli.py']
PROMPT:
PATH: Epic Initialization — please produce tracks
------------------
--- MOCK INVOKED ---
ARGS: ['tests/mock_gemini_cli.py']
PROMPT:
Please generate the implementation tickets for this track.
------------------
--- MOCK INVOKED ---
ARGS: ['tests/mock_gemini_cli.py']
PROMPT:
Please read test.txt
You are assigned to Ticket T1.
Task Description: do something
------------------
--- MOCK INVOKED ---
ARGS: ['tests/mock_gemini_cli.py']
PROMPT:
role: tool
Here are the results: {"content": "done"}
------------------
+3
View File
@@ -1,2 +1,5 @@
[presets.Default] [presets.Default]
system_prompt = "" system_prompt = ""
[presets.ModalPreset]
system_prompt = "Modal Content"
+1 -1
View File
@@ -9,5 +9,5 @@ active = "main"
[discussions.main] [discussions.main]
git_commit = "" git_commit = ""
last_updated = "2026-03-10T21:01:58" last_updated = "2026-03-11T23:45:09"
history = [] history = []
+47
View File
@@ -0,0 +1,47 @@
import sys
import json
def main():
while True:
line = sys.stdin.readline()
if not line:
break
try:
req = json.loads(line)
method = req.get("method")
req_id = req.get("id")
if method == "tools/list":
resp = {
"jsonrpc": "2.0",
"id": req_id,
"result": {
"tools": [
{"name": "echo", "description": "Echo input", "inputSchema": {"type": "object"}}
]
}
}
elif method == "tools/call":
name = req["params"].get("name")
args = req["params"].get("arguments", {})
if name == "echo":
resp = {
"jsonrpc": "2.0",
"id": req_id,
"result": {
"content": [{"type": "text", "text": f"ECHO: {args}"}]
}
}
else:
resp = {"jsonrpc": "2.0", "id": req_id, "error": {"message": "Unknown tool"}}
else:
resp = {"jsonrpc": "2.0", "id": req_id, "error": {"message": "Unknown method"}}
sys.stdout.write(json.dumps(resp) + "\n")
sys.stdout.flush()
except Exception as e:
sys.stderr.write(f"Error: {e}\n")
sys.stderr.flush()
if __name__ == "__main__":
main()
+11 -6
View File
@@ -535,7 +535,7 @@ def get_bias_profile() -> Optional[str]:
def _build_anthropic_tools() -> list[dict[str, Any]]: def _build_anthropic_tools() -> list[dict[str, Any]]:
raw_tools: list[dict[str, Any]] = [] raw_tools: list[dict[str, Any]] = []
for spec in mcp_client.MCP_TOOL_SPECS: for spec in mcp_client.get_tool_schemas():
if _agent_tools.get(spec["name"], True): if _agent_tools.get(spec["name"], True):
raw_tools.append({ raw_tools.append({
"name": spec["name"], "name": spec["name"],
@@ -579,7 +579,7 @@ def _get_anthropic_tools() -> list[dict[str, Any]]:
def _gemini_tool_declaration() -> Optional[types.Tool]: def _gemini_tool_declaration() -> Optional[types.Tool]:
raw_tools: list[dict[str, Any]] = [] raw_tools: list[dict[str, Any]] = []
for spec in mcp_client.MCP_TOOL_SPECS: for spec in mcp_client.get_tool_schemas():
if _agent_tools.get(spec["name"], True): if _agent_tools.get(spec["name"], True):
raw_tools.append({ raw_tools.append({
"name": spec["name"], "name": spec["name"],
@@ -715,10 +715,15 @@ async def _execute_single_tool_call_async(
tool_executed = True tool_executed = True
if not tool_executed: if not tool_executed:
if name and name in mcp_client.TOOL_NAMES: is_native = name in mcp_client.TOOL_NAMES
ext_tools = mcp_client.get_external_mcp_manager().get_all_tools()
is_external = name in ext_tools
if name and (is_native or is_external):
_append_comms("OUT", "tool_call", {"name": name, "id": call_id, "args": args}) _append_comms("OUT", "tool_call", {"name": name, "id": call_id, "args": args})
if name in mcp_client.MUTATING_TOOLS and approval_mode != "auto" and pre_tool_callback: should_approve = (name in mcp_client.MUTATING_TOOLS or is_external) and approval_mode != "auto" and pre_tool_callback
desc = f"# MCP MUTATING TOOL: {name}\n" + "\n".join(f"# {k}: {repr(v)}" for k, v in args.items()) if should_approve:
label = "MCP MUTATING" if is_native else "EXTERNAL MCP"
desc = f"# {label} TOOL: {name}\n" + "\n".join(f"# {k}: {repr(v)}" for k, v in args.items())
_res = await asyncio.to_thread(pre_tool_callback, desc, base_dir, qa_callback) _res = await asyncio.to_thread(pre_tool_callback, desc, base_dir, qa_callback)
out = "USER REJECTED: tool execution cancelled" if _res is None else await mcp_client.async_dispatch(name, args) out = "USER REJECTED: tool execution cancelled" if _res is None else await mcp_client.async_dispatch(name, args)
else: else:
@@ -816,7 +821,7 @@ def _build_file_diff_text(changed_items: list[dict[str, Any]]) -> str:
def _build_deepseek_tools() -> list[dict[str, Any]]: def _build_deepseek_tools() -> list[dict[str, Any]]:
raw_tools: list[dict[str, Any]] = [] raw_tools: list[dict[str, Any]] = []
for spec in mcp_client.MCP_TOOL_SPECS: for spec in mcp_client.get_tool_schemas():
if _agent_tools.get(spec["name"], True): if _agent_tools.get(spec["name"], True):
raw_tools.append({ raw_tools.append({
"name": spec["name"], "name": spec["name"],
+25
View File
@@ -197,6 +197,7 @@ class AppController:
self._pending_dialog_open: bool = False self._pending_dialog_open: bool = False
self._pending_actions: Dict[str, ConfirmDialog] = {} self._pending_actions: Dict[str, ConfirmDialog] = {}
self._pending_ask_dialog: bool = False self._pending_ask_dialog: bool = False
self.mcp_config: models.MCPConfiguration = models.MCPConfiguration()
# AI settings state # AI settings state
self._current_provider: str = "gemini" self._current_provider: str = "gemini"
self._current_model: str = "gemini-2.5-flash-lite" self._current_model: str = "gemini-2.5-flash-lite"
@@ -894,6 +895,18 @@ class AppController:
self.tool_presets = self.tool_preset_manager.load_all_presets() self.tool_presets = self.tool_preset_manager.load_all_presets()
self.bias_profiles = self.tool_preset_manager.load_all_bias_profiles() self.bias_profiles = self.tool_preset_manager.load_all_bias_profiles()
mcp_path = self.project.get('project', {}).get('mcp_config_path') or self.config.get('ai', {}).get('mcp_config_path')
if mcp_path:
mcp_p = Path(mcp_path)
if not mcp_p.is_absolute() and self.active_project_path:
mcp_p = Path(self.active_project_path).parent / mcp_path
if mcp_p.exists():
self.mcp_config = models.load_mcp_config(str(mcp_p))
else:
self.mcp_config = models.MCPConfiguration()
else:
self.mcp_config = models.MCPConfiguration()
from src.personas import PersonaManager from src.personas import PersonaManager
self.persona_manager = PersonaManager(Path(self.active_project_path).parent if self.active_project_path else None) self.persona_manager = PersonaManager(Path(self.active_project_path).parent if self.active_project_path else None)
self.personas = self.persona_manager.load_all() self.personas = self.persona_manager.load_all()
@@ -940,6 +953,15 @@ class AppController:
self.ui_agent_tools = {t: agent_tools_cfg.get(t, True) for t in models.AGENT_TOOL_NAMES} self.ui_agent_tools = {t: agent_tools_cfg.get(t, True) for t in models.AGENT_TOOL_NAMES}
label = self.project.get("project", {}).get("name", "") label = self.project.get("project", {}).get("name", "")
session_logger.open_session(label=label) session_logger.open_session(label=label)
# Trigger auto-start of MCP servers
self.event_queue.put('refresh_external_mcps', None)
async def refresh_external_mcps(self):
await mcp_client.get_external_mcp_manager().stop_all()
# Start servers with auto_start=True
for name, cfg in self.mcp_config.mcpServers.items():
if cfg.auto_start:
await mcp_client.get_external_mcp_manager().add_server(cfg)
def cb_load_prior_log(self, path: Optional[str] = None) -> None: def cb_load_prior_log(self, path: Optional[str] = None) -> None:
root = hide_tk_root() root = hide_tk_root()
@@ -1253,6 +1275,9 @@ class AppController:
"action": "ticket_completed", "action": "ticket_completed",
"payload": payload "payload": payload
}) })
elif event_name == "refresh_external_mcps":
import asyncio
asyncio.run(self.refresh_external_mcps())
def _handle_request_event(self, event: events.UserRequestEvent) -> None: def _handle_request_event(self, event: events.UserRequestEvent) -> None:
"""Processes a UserRequestEvent by calling the AI client.""" """Processes a UserRequestEvent by calling the AI client."""
+51
View File
@@ -608,6 +608,9 @@ class App:
if imgui.begin_tab_item("Usage Analytics")[0]: if imgui.begin_tab_item("Usage Analytics")[0]:
self._render_usage_analytics_panel() self._render_usage_analytics_panel()
imgui.end_tab_item() imgui.end_tab_item()
if imgui.begin_tab_item("External Tools")[0]:
self._render_external_tools_panel()
imgui.end_tab_item()
imgui.end_tab_bar() imgui.end_tab_bar()
imgui.end() imgui.end()
@@ -2573,6 +2576,54 @@ def hello():
imgui.pop_style_color(2) imgui.pop_style_color(2)
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_response_panel") if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_response_panel")
def _render_external_tools_panel(self) -> None:
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_external_tools_panel")
if imgui.button("Refresh External MCPs"):
self.event_queue.put("refresh_external_mcps", None)
imgui.separator()
# Server status indicators
manager = mcp_client.get_external_mcp_manager()
statuses = manager.get_servers_status()
if statuses:
imgui.text("Servers:")
for sname, status in statuses.items():
imgui.same_line()
# Green for running, Yellow for starting, Red for error, Gray for idle
col = (0.5, 0.5, 0.5, 1.0)
if status == 'running':
col = (0.0, 1.0, 0.0, 1.0)
elif status == 'starting':
col = (1.0, 1.0, 0.0, 1.0)
elif status == 'error':
col = (1.0, 0.0, 0.0, 1.0)
imgui.color_button(f"##status_{sname}", col)
imgui.same_line()
imgui.text(sname)
imgui.separator()
tools = manager.get_all_tools()
if not tools:
imgui.text_disabled("No external tools found.")
else:
if imgui.begin_table("external_tools_table", 3, imgui.TableFlags_.borders | imgui.TableFlags_.row_bg | imgui.TableFlags_.resizable):
imgui.table_setup_column("Name")
imgui.table_setup_column("Server")
imgui.table_setup_column("Description")
imgui.table_headers_row()
for tname, tinfo in tools.items():
imgui.table_next_row()
imgui.table_next_column()
imgui.text(tname)
imgui.table_next_column()
imgui.text(tinfo.get('server', 'unknown'))
imgui.table_next_column()
imgui.text(tinfo.get('description', ''))
imgui.end_table()
if self.perf_profiling_enabled: self.perf_monitor.end_component("_render_external_tools_panel")
def _render_comms_history_panel(self) -> None: def _render_comms_history_panel(self) -> None:
if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_comms_history_panel") if self.perf_profiling_enabled: self.perf_monitor.start_component("_render_comms_history_panel")
st_col = vec4(200, 220, 160) st_col = vec4(200, 220, 160)
+141 -7
View File
@@ -53,6 +53,8 @@ See Also:
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
import json
from src import models
from pathlib import Path from pathlib import Path
from typing import Optional, Callable, Any, cast from typing import Optional, Callable, Any, cast
import os import os
@@ -915,6 +917,126 @@ def get_ui_performance() -> str:
return f"ERROR: Failed to retrieve UI performance: {str(e)}" return f"ERROR: Failed to retrieve UI performance: {str(e)}"
# ------------------------------------------------------------------ tool dispatch # ------------------------------------------------------------------ tool dispatch
class StdioMCPServer:
def __init__(self, config: models.MCPServerConfig):
self.config = config
self.name = config.name
self.proc = None
self.tools = {}
self._id_counter = 0
self._pending_requests = {}
self.status = 'idle'
def _get_id(self):
self._id_counter += 1
return self._id_counter
async def start(self):
self.status = 'starting'
self.proc = await asyncio.create_subprocess_exec(
self.config.command,
*self.config.args,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
asyncio.create_task(self._read_stderr())
await self.list_tools()
self.status = 'running'
async def stop(self):
if self.proc:
try:
if self.proc.stdin:
self.proc.stdin.close()
await self.proc.stdin.wait_closed()
except Exception:
pass
try:
self.proc.terminate()
await self.proc.wait()
except Exception:
pass
self.proc = None
self.status = 'idle'
async def _read_stderr(self):
while self.proc and not self.proc.stdout.at_eof():
line = await self.proc.stderr.readline()
if line:
print(f'[MCP:{self.name}:err] {line.decode().strip()}')
async def _send_request(self, method: str, params: dict = None):
req_id = self._get_id()
request = {
'jsonrpc': '2.0',
'id': req_id,
'method': method,
'params': params or {}
}
self.proc.stdin.write(json.dumps(request).encode() + b'\n')
await self.proc.stdin.drain()
# Simplistic wait for response - in real use, we'd need a read loop
# For now, we'll read one line and hope it's ours (fragile, but for MVP)
line = await self.proc.stdout.readline()
if line:
resp = json.loads(line.decode())
return resp.get('result')
return None
async def list_tools(self):
result = await self._send_request('tools/list')
if result and 'tools' in result:
for t in result['tools']:
self.tools[t['name']] = t
return self.tools
async def call_tool(self, name: str, arguments: dict):
result = await self._send_request('tools/call', {'name': name, 'arguments': arguments})
if result and 'content' in result:
return '\n'.join([c.get('text', '') for c in result['content'] if c.get('type') == 'text'])
return str(result)
class ExternalMCPManager:
def __init__(self):
self.servers = {}
async def add_server(self, config: models.MCPServerConfig):
if config.url:
# RemoteMCPServer placeholder
return
server = StdioMCPServer(config)
await server.start()
self.servers[config.name] = server
async def stop_all(self):
for server in self.servers.values():
await server.stop()
self.servers = {}
def get_all_tools(self) -> dict:
all_tools = {}
for sname, server in self.servers.items():
for tname, tool in server.tools.items():
all_tools[tname] = {**tool, 'server': sname, 'server_status': server.status}
return all_tools
def get_servers_status(self) -> dict[str, str]:
return {name: server.status for name, server in self.servers.items()}
async def async_dispatch(self, tool_name: str, tool_input: dict) -> str:
for server in self.servers.values():
if tool_name in server.tools:
return await server.call_tool(tool_name, tool_input)
return f'Error: External tool {tool_name} not found.'
_external_mcp_manager = ExternalMCPManager()
def get_external_mcp_manager() -> ExternalMCPManager:
global _external_mcp_manager
return _external_mcp_manager
TOOL_NAMES: set[str] = {"read_file", "list_directory", "search_files", "get_file_summary", "py_get_skeleton", "py_get_code_outline", "py_get_definition", "get_git_diff", "web_search", "fetch_url", "get_ui_performance", "get_file_slice", "set_file_slice", "edit_file", "py_update_definition", "py_get_signature", "py_set_signature", "py_get_class_summary", "py_get_var_declaration", "py_set_var_declaration", "py_find_usages", "py_get_imports", "py_check_syntax", "py_get_hierarchy", "py_get_docstring", "get_tree"} TOOL_NAMES: set[str] = {"read_file", "list_directory", "search_files", "get_file_summary", "py_get_skeleton", "py_get_code_outline", "py_get_definition", "get_git_diff", "web_search", "fetch_url", "get_ui_performance", "get_file_slice", "set_file_slice", "edit_file", "py_update_definition", "py_get_signature", "py_set_signature", "py_get_class_summary", "py_get_var_declaration", "py_set_var_declaration", "py_find_usages", "py_get_imports", "py_check_syntax", "py_get_hierarchy", "py_get_docstring", "get_tree"}
def dispatch(tool_name: str, tool_input: dict[str, Any]) -> str: def dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
@@ -987,17 +1109,29 @@ def dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
return f"ERROR: unknown MCP tool '{tool_name}'" return f"ERROR: unknown MCP tool '{tool_name}'"
async def async_dispatch(tool_name: str, tool_input: dict[str, Any]) -> str: async def async_dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
""" # Check native tools
Dispatch an MCP tool call by name asynchronously. Returns the result as a string. native_names = {t['name'] for t in MCP_TOOL_SPECS}
""" if tool_name in native_names:
# Run blocking I/O bound tools in a thread to allow parallel execution via asyncio.gather return await asyncio.to_thread(dispatch, tool_name, tool_input)
return await asyncio.to_thread(dispatch, tool_name, tool_input)
# Check external tools
if tool_name in get_external_mcp_manager().get_all_tools():
return await get_external_mcp_manager().async_dispatch(tool_name, tool_input)
return f'ERROR: unknown MCP tool {tool_name}'
def get_tool_schemas() -> list[dict[str, Any]]: def get_tool_schemas() -> list[dict[str, Any]]:
"""Returns the list of tool specifications for the AI.""" res = list(MCP_TOOL_SPECS)
return list(MCP_TOOL_SPECS) manager = get_external_mcp_manager()
for tname, tinfo in manager.get_all_tools().items():
res.append({
'name': tname,
'description': tinfo.get('description', ''),
'parameters': tinfo.get('inputSchema', {'type': 'object', 'properties': {}})
})
return res
# ------------------------------------------------------------------ tool schema helpers # ------------------------------------------------------------------ tool schema helpers
+55
View File
@@ -37,6 +37,8 @@ See Also:
- src/project_manager.py for persistence layer - src/project_manager.py for persistence layer
""" """
from __future__ import annotations from __future__ import annotations
import json
import os
import tomllib import tomllib
import datetime import datetime
from dataclasses import dataclass, field from dataclasses import dataclass, field
@@ -515,3 +517,56 @@ class Persona:
bias_profile=data.get("bias_profile"), bias_profile=data.get("bias_profile"),
) )
@dataclass
class MCPServerConfig:
name: str
command: Optional[str] = None
args: List[str] = field(default_factory=list)
url: Optional[str] = None
auto_start: bool = False
def to_dict(self) -> Dict[str, Any]:
res = {'auto_start': self.auto_start}
if self.command: res['command'] = self.command
if self.args: res['args'] = self.args
if self.url: res['url'] = self.url
return res
@classmethod
def from_dict(cls, name: str, data: Dict[str, Any]) -> 'MCPServerConfig':
return cls(
name=name,
command=data.get('command'),
args=data.get('args', []),
url=data.get('url'),
auto_start=data.get('auto_start', False),
)
@dataclass
class MCPConfiguration:
mcpServers: Dict[str, MCPServerConfig] = field(default_factory=dict)
def to_dict(self) -> Dict[str, Any]:
return {
'mcpServers': {name: cfg.to_dict() for name, cfg in self.mcpServers.items()}
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'MCPConfiguration':
raw_servers = data.get('mcpServers', {})
parsed_servers = {
name: MCPServerConfig.from_dict(name, cfg)
for name, cfg in raw_servers.items()
}
return cls(mcpServers=parsed_servers)
def load_mcp_config(path: str) -> MCPConfiguration:
if not os.path.exists(path):
return MCPConfiguration()
with open(path, 'r', encoding='utf-8') as f:
try:
data = json.load(f)
return MCPConfiguration.from_dict(data)
except Exception:
return MCPConfiguration()
+2
View File
@@ -0,0 +1,2 @@
[presets.ModalPreset]
system_prompt = "Modal Content"
+106
View File
@@ -0,0 +1,106 @@
import os
import json
import pytest
from pathlib import Path
from src.app_controller import AppController
from src import models
@pytest.fixture
def controller(tmp_path):
# Setup mock config and project files
config_path = tmp_path / "config.toml"
project_path = tmp_path / "project.toml"
mcp_config_path = tmp_path / "mcp_config.json"
config_data = {
"ai": {
"mcp_config_path": str(mcp_config_path)
},
"projects": {
"paths": [str(project_path)],
"active": str(project_path)
}
}
project_data = {
"project": {
"name": "test-project",
"mcp_config_path": "project_mcp.json" # Relative path
}
}
mcp_data = {
"mcpServers": {
"global-server": {"command": "echo"}
}
}
project_mcp_data = {
"mcpServers": {
"project-server": {"command": "echo"}
}
}
# We can't easily use models.save_config because it uses a hardcoded path
# But AppController.init_state calls models.load_config() which uses CONFIG_PATH
return AppController()
def test_app_controller_mcp_loading(tmp_path, monkeypatch):
# Mock CONFIG_PATH to point to our temp config
config_file = tmp_path / "config.toml"
monkeypatch.setattr(models, "CONFIG_PATH", str(config_file))
mcp_global_file = tmp_path / "mcp_global.json"
mcp_global_file.write_text(json.dumps({"mcpServers": {"global": {"command": "echo"}}}))
config_content = f"""
[ai]
mcp_config_path = "{mcp_global_file.as_posix()}"
[projects]
paths = []
active = ""
"""
config_file.write_text(config_content)
ctrl = AppController()
# Mock _load_active_project to not do anything for now
monkeypatch.setattr(ctrl, "_load_active_project", lambda: None)
ctrl.project = {}
ctrl.init_state()
assert "global" in ctrl.mcp_config.mcpServers
assert ctrl.mcp_config.mcpServers["global"].command == "echo"
def test_app_controller_mcp_project_override(tmp_path, monkeypatch):
config_file = tmp_path / "config.toml"
monkeypatch.setattr(models, "CONFIG_PATH", str(config_file))
project_file = tmp_path / "project.toml"
mcp_project_file = tmp_path / "mcp_project.json"
mcp_project_file.write_text(json.dumps({"mcpServers": {"project": {"command": "echo"}}}))
config_content = f"""
[ai]
mcp_config_path = "non-existent.json"
[projects]
paths = ["{project_file.as_posix()}"]
active = "{project_file.as_posix()}"
"""
config_file.write_text(config_content)
ctrl = AppController()
ctrl.active_project_path = str(project_file)
ctrl.project = {
"project": {
"mcp_config_path": "mcp_project.json"
}
}
# Mock _load_active_project to keep our manual project dict
monkeypatch.setattr(ctrl, "_load_active_project", lambda: None)
ctrl.init_state()
assert "project" in ctrl.mcp_config.mcpServers
assert "non-existent" not in ctrl.mcp_config.mcpServers
+55
View File
@@ -0,0 +1,55 @@
import asyncio
import json
import sys
import pytest
from src import mcp_client
from src import models
@pytest.mark.asyncio
async def test_external_mcp_real_process():
manager = mcp_client.ExternalMCPManager()
# Use our mock script
mock_script = "scripts/mock_mcp_server.py"
config = models.MCPServerConfig(
name="real-mock",
command="python",
args=[mock_script]
)
await manager.add_server(config)
try:
tools = manager.get_all_tools()
assert "echo" in tools
assert tools["echo"]["server"] == "real-mock"
result = await manager.async_dispatch("echo", {"hello": "world"})
assert "ECHO: {'hello': 'world'}" in result
finally:
await manager.stop_all()
@pytest.mark.asyncio
async def test_get_tool_schemas_includes_external():
manager = mcp_client.get_external_mcp_manager()
# Reset manager
await manager.stop_all()
mock_script = "scripts/mock_mcp_server.py"
config = models.MCPServerConfig(
name="test-server",
command="python",
args=[mock_script]
)
await manager.add_server(config)
try:
schemas = mcp_client.get_tool_schemas()
echo_schema = next((s for s in schemas if s["name"] == "echo"), None)
assert echo_schema is not None
assert echo_schema["description"] == "Echo input"
assert echo_schema["parameters"] == {"type": "object"}
finally:
await manager.stop_all()
+67
View File
@@ -0,0 +1,67 @@
import asyncio
import json
import os
from pathlib import Path
import pytest
from src.app_controller import AppController
from src import mcp_client
from src import ai_client
from src import models
@pytest.mark.asyncio
async def test_external_mcp_e2e_refresh_and_call(tmp_path, monkeypatch):
# 1. Setup mock config and mock server script
config_file = tmp_path / "config.toml"
monkeypatch.setattr(models, "CONFIG_PATH", str(config_file))
mock_script = Path("scripts/mock_mcp_server.py").absolute()
mcp_config_file = tmp_path / "mcp_config.json"
mcp_data = {
"mcpServers": {
"e2e-server": {
"command": "python",
"args": [str(mock_script)],
"auto_start": True
}
}
}
mcp_config_file.write_text(json.dumps(mcp_data))
config_content = f"""
[ai]
mcp_config_path = "{mcp_config_file.as_posix()}"
[projects]
paths = []
active = ""
"""
config_file.write_text(config_content)
# 2. Initialize AppController
ctrl = AppController()
monkeypatch.setattr(ctrl, "_load_active_project", lambda: None)
ctrl.project = {}
# We need to mock start_services or just manually call what we need
ctrl.init_state()
# Trigger refresh event manually (since we don't have the background thread running in unit test)
await ctrl.refresh_external_mcps()
# 3. Verify tools are discovered
manager = mcp_client.get_external_mcp_manager()
tools = manager.get_all_tools()
assert "echo" in tools
# 4. Mock pre_tool_callback to auto-approve
mock_pre_tool = lambda desc, base, qa: "Approved"
# 5. Call execute_single_tool_call_async (via ai_client)
name, cid, out, orig = await ai_client._execute_single_tool_call_async(
"echo", {"message": "hello"}, "id1", ".", mock_pre_tool, None, 0
)
assert "ECHO: {'message': 'hello'}" in out
# Cleanup
await manager.stop_all()
+62
View File
@@ -0,0 +1,62 @@
import asyncio
import json
import pytest
from unittest.mock import MagicMock, patch, AsyncMock
from src import ai_client
from src import mcp_client
from src import models
@pytest.mark.asyncio
async def test_external_mcp_hitl_approval():
# 1. Setup mock manager and server
mock_manager = mcp_client.ExternalMCPManager()
mock_server = AsyncMock()
mock_server.name = "test-server"
mock_server.tools = {"ext_tool": {"name": "ext_tool", "description": "desc"}}
mock_server.call_tool.return_value = "Success"
mock_manager.servers["test-server"] = mock_server
with patch("src.mcp_client.get_external_mcp_manager", return_value=mock_manager):
# 2. Setup ai_client callbacks
mock_pre_tool = MagicMock(return_value="Approved")
ai_client.confirm_and_run_callback = mock_pre_tool
# 3. Call _execute_single_tool_call_async
name = "ext_tool"
args = {"arg1": "val1"}
call_id = "call_123"
base_dir = "."
# We need to pass the callback to the function
name, cid, out, orig_name = await ai_client._execute_single_tool_call_async(
name, args, call_id, base_dir, mock_pre_tool, None, 0
)
# 4. Assertions
assert out == "Success"
mock_pre_tool.assert_called_once()
# Check description contains EXTERNAL MCP
call_args = mock_pre_tool.call_args[0]
assert "EXTERNAL MCP TOOL: ext_tool" in call_args[0]
assert "arg1: 'val1'" in call_args[0]
@pytest.mark.asyncio
async def test_external_mcp_hitl_rejection():
mock_manager = mcp_client.ExternalMCPManager()
mock_server = AsyncMock()
mock_server.name = "test-server"
mock_server.tools = {"ext_tool": {"name": "ext_tool"}}
mock_manager.servers["test-server"] = mock_server
with patch("src.mcp_client.get_external_mcp_manager", return_value=mock_manager):
mock_pre_tool = MagicMock(return_value=None) # Rejection
name = "ext_tool"
args = {"arg1": "val1"}
name, cid, out, orig_name = await ai_client._execute_single_tool_call_async(
name, args, "id", ".", mock_pre_tool, None, 0
)
assert out == "USER REJECTED: tool execution cancelled"
mock_server.call_tool.assert_not_called()
+53
View File
@@ -0,0 +1,53 @@
import os
import json
import pytest
from src import models
def test_mcp_server_config_to_from_dict():
data = {
"command": "node",
"args": ["server.js"],
"auto_start": True
}
cfg = models.MCPServerConfig.from_dict("test-server", data)
assert cfg.name == "test-server"
assert cfg.command == "node"
assert cfg.args == ["server.js"]
assert cfg.auto_start is True
assert cfg.to_dict() == data
def test_mcp_configuration_to_from_dict():
data = {
"mcpServers": {
"server1": {
"command": "python",
"args": ["-m", "mcp_server"],
"auto_start": False
},
"server2": {
"url": "http://localhost:8080/sse",
"auto_start": True
}
}
}
cfg = models.MCPConfiguration.from_dict(data)
assert len(cfg.mcpServers) == 2
assert cfg.mcpServers["server1"].command == "python"
assert cfg.mcpServers["server2"].url == "http://localhost:8080/sse"
assert cfg.to_dict() == data
def test_load_mcp_config(tmp_path):
config_file = tmp_path / "mcp_config.json"
data = {
"mcpServers": {
"test": {"command": "echo", "args": ["hello"]}
}
}
config_file.write_text(json.dumps(data))
# We'll need a way to load from a specific path
# Maybe models.load_mcp_config(path)
cfg = models.load_mcp_config(str(config_file))
assert "test" in cfg.mcpServers
assert cfg.mcpServers["test"].command == "echo"