Files
manual_slop/tests/test_mma_exec.py

99 lines
3.7 KiB
Python

import pytest
from unittest.mock import patch, MagicMock
from scripts.mma_exec import create_parser, get_role_documents, execute_agent, get_model_for_role, get_dependencies
def test_parser_role_choices():
"""Test that the parser accepts valid roles and the prompt argument."""
parser = create_parser()
valid_roles = ['tier1', 'tier2', 'tier3', 'tier4']
test_prompt = "Analyze the codebase for bottlenecks."
for role in valid_roles:
args = parser.parse_args(['--role', role, test_prompt])
assert args.role == role
assert args.prompt == test_prompt
def test_parser_invalid_role():
"""Test that the parser rejects roles outside the specified choices."""
parser = create_parser()
with pytest.raises(SystemExit):
parser.parse_args(['--role', 'tier5', 'Some prompt'])
def test_parser_prompt_required():
"""Test that the prompt argument is mandatory."""
parser = create_parser()
with pytest.raises(SystemExit):
parser.parse_args(['--role', 'tier3'])
def test_parser_help():
"""Test that the help flag works without raising errors (exits with 0)."""
parser = create_parser()
with pytest.raises(SystemExit) as excinfo:
parser.parse_args(['--help'])
assert excinfo.value.code == 0
def test_get_role_documents():
"""Test that get_role_documents returns the correct documentation paths for each tier."""
assert get_role_documents('tier1') == ['conductor/product.md', 'conductor/product-guidelines.md']
assert get_role_documents('tier2') == ['conductor/tech-stack.md', 'conductor/workflow.md']
assert get_role_documents('tier3') == ['conductor/workflow.md']
assert get_role_documents('tier4') == []
def test_get_model_for_role():
"""Test that get_model_for_role returns the correct model for each role."""
assert get_model_for_role('tier1-orchestrator') == 'gemini-3.1-pro-preview'
assert get_model_for_role('tier2-tech-lead') == 'gemini-3-flash-preview'
assert get_model_for_role('tier3-worker') == 'gemini-2.5-flash-lite'
assert get_model_for_role('tier4-qa') == 'gemini-2.5-flash-lite'
def test_execute_agent():
"""
Test that execute_agent calls subprocess.run with the correct gemini CLI arguments
including the model specified for the role.
"""
role = "tier3-worker"
prompt = "Write a unit test."
docs = ["file1.py", "docs/spec.md"]
expected_gemini_arg = "Use the mma-tier3-worker skill. Write a unit test. @file1.py @docs/spec.md"
expected_model = "gemini-2.5-flash-lite"
mock_stdout = "Mocked AI Response"
with patch("subprocess.run") as mock_run:
mock_process = MagicMock()
mock_process.stdout = mock_stdout
mock_process.returncode = 0
mock_run.return_value = mock_process
result = execute_agent(role, prompt, docs)
mock_run.assert_called_once()
args, kwargs = mock_run.call_args
cmd_list = args[0]
assert cmd_list[0] == "gemini"
assert cmd_list[1] == "-p"
assert cmd_list[2] == expected_gemini_arg
# Verify the correct model is passed via --model flag
assert "--model" in cmd_list
model_idx = cmd_list.index("--model")
assert cmd_list[model_idx + 1] == expected_model
assert kwargs.get("capture_output") is True
assert kwargs.get("text") is True
assert result == mock_stdout
def test_get_dependencies(tmp_path):
content = (
"import os\n"
"import sys\n"
"import file_cache\n"
"from mcp_client import something\n"
)
filepath = tmp_path / "mock_script.py"
filepath.write_text(content)
dependencies = get_dependencies(filepath)
assert dependencies == ['os', 'sys', 'file_cache', 'mcp_client']