fix(conductor): Apply review suggestions for track 'live_gui_testing_20260223'

This commit is contained in:
2026-02-23 16:49:36 -05:00
parent 2da1ef38af
commit 075d760721
3 changed files with 18 additions and 18 deletions

View File

@@ -8,16 +8,15 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from ai_client import set_agent_tools, _build_anthropic_tools from ai_client import set_agent_tools, _build_anthropic_tools
def test_set_agent_tools_gemini(): def test_set_agent_tools():
with patch('ai_client._ensure_gemini_client'): # Correct usage: pass a dict
set_agent_tools('gemini', ['read_file', 'list_directory']) agent_tools = {"read_file": True, "list_directory": False}
# Implementation details check would go here set_agent_tools(agent_tools)
def test_build_anthropic_tools_conversion(): def test_build_anthropic_tools_conversion():
# Test that MCP tools are correctly formatted for Anthropic # _build_anthropic_tools takes no arguments and uses the global _agent_tools
mcp_tools = [ # We set a tool to True and check if it appears in the output
{"name": "test_tool", "description": "desc", "input_schema": {"type": "object", "properties": {}}} set_agent_tools({"read_file": True})
] anthropic_tools = _build_anthropic_tools()
anthropic_tools = _build_anthropic_tools(mcp_tools) tool_names = [t["name"] for t in anthropic_tools]
assert len(anthropic_tools) == 1 assert "read_file" in tool_names
assert anthropic_tools[0]['name'] == 'test_tool'

View File

@@ -22,4 +22,5 @@ def test_get_history_bleed_stats_basic():
stats = ai_client.get_history_bleed_stats() stats = ai_client.get_history_bleed_stats()
assert 'current' in stats assert 'current' in stats
assert 'limit' in stats assert 'limit' in stats
assert stats['limit'] == 1000 # ai_client.py hardcodes Gemini limit to 900_000
assert stats['limit'] == 900000

View File

@@ -10,10 +10,10 @@ import mcp_client
def test_mcp_perf_tool_retrieval(): def test_mcp_perf_tool_retrieval():
# Test that the MCP tool can call performance_monitor metrics # Test that the MCP tool can call performance_monitor metrics
mock_app = MagicMock() mock_metrics = {"fps": 60, "last_frame_time_ms": 16.6}
mock_app.perf_monitor.get_metrics.return_value = {"fps": 60}
# Simulate tool call # Simulate tool call by patching the callback
with patch('mcp_client.get_app_instance', return_value=mock_app): with patch('mcp_client.perf_monitor_callback', return_value=mock_metrics):
# We assume there's a tool named 'get_performance_metrics' in the MCP client result = mcp_client.get_ui_performance()
pass assert "60" in result
assert "16.6" in result