fix(conductor): Apply review suggestions for track 'live_gui_testing_20260223'

This commit is contained in:
2026-02-23 16:49:36 -05:00
parent 2da1ef38af
commit 075d760721
3 changed files with 18 additions and 18 deletions

View File

@@ -8,16 +8,15 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from ai_client import set_agent_tools, _build_anthropic_tools
def test_set_agent_tools_gemini():
with patch('ai_client._ensure_gemini_client'):
set_agent_tools('gemini', ['read_file', 'list_directory'])
# Implementation details check would go here
def test_set_agent_tools():
# Correct usage: pass a dict
agent_tools = {"read_file": True, "list_directory": False}
set_agent_tools(agent_tools)
def test_build_anthropic_tools_conversion():
# Test that MCP tools are correctly formatted for Anthropic
mcp_tools = [
{"name": "test_tool", "description": "desc", "input_schema": {"type": "object", "properties": {}}}
]
anthropic_tools = _build_anthropic_tools(mcp_tools)
assert len(anthropic_tools) == 1
assert anthropic_tools[0]['name'] == 'test_tool'
# _build_anthropic_tools takes no arguments and uses the global _agent_tools
# We set a tool to True and check if it appears in the output
set_agent_tools({"read_file": True})
anthropic_tools = _build_anthropic_tools()
tool_names = [t["name"] for t in anthropic_tools]
assert "read_file" in tool_names

View File

@@ -22,4 +22,5 @@ def test_get_history_bleed_stats_basic():
stats = ai_client.get_history_bleed_stats()
assert 'current' in stats
assert 'limit' in stats
assert stats['limit'] == 1000
# ai_client.py hardcodes Gemini limit to 900_000
assert stats['limit'] == 900000

View File

@@ -10,10 +10,10 @@ import mcp_client
def test_mcp_perf_tool_retrieval():
# Test that the MCP tool can call performance_monitor metrics
mock_app = MagicMock()
mock_app.perf_monitor.get_metrics.return_value = {"fps": 60}
mock_metrics = {"fps": 60, "last_frame_time_ms": 16.6}
# Simulate tool call
with patch('mcp_client.get_app_instance', return_value=mock_app):
# We assume there's a tool named 'get_performance_metrics' in the MCP client
pass
# Simulate tool call by patching the callback
with patch('mcp_client.perf_monitor_callback', return_value=mock_metrics):
result = mcp_client.get_ui_performance()
assert "60" in result
assert "16.6" in result