fix(mma): Assign dedicated models per tier in execute_agent

This commit is contained in:
2026-02-25 19:51:00 -05:00
parent 00fbf5c44e
commit be2a77cc79
2 changed files with 35 additions and 11 deletions

View File

@@ -2,26 +2,36 @@ import argparse
import subprocess
import json
def get_model_for_role(role: str) -> str:
"""Returns the specific model to use for a given tier role."""
if role == 'tier1-orchestrator' or role == 'tier1':
return 'gemini-3.1-pro-preview'
elif role == 'tier2-tech-lead' or role == 'tier2':
return 'gemini-3.0-flash-preview'
else:
return 'gemini-2.5-flash-lite'
def get_role_documents(role: str) -> list[str]:
if role == 'tier1':
if role == 'tier1-orchestrator' or role == 'tier1':
return ['conductor/product.md', 'conductor/product-guidelines.md']
elif role == 'tier2':
elif role == 'tier2-tech-lead' or role == 'tier2':
return ['conductor/tech-stack.md', 'conductor/workflow.md']
elif role == 'tier3':
elif role == 'tier3-worker' or role == 'tier3':
return ['conductor/workflow.md']
return []
def execute_agent(role: str, prompt: str, docs: list[str]) -> str:
model = get_model_for_role(role)
command_text = f"Activate the mma-{role} skill. {prompt}"
for doc in docs:
command_text += f" @{doc}"
cmd = ['gemini', '-p', command_text, '--output-format', 'json', '--model', 'gemini-2.5-flash-lite']
cmd = ['gemini', '-p', command_text, '--output-format', 'json', '--model', model]
try:
process = subprocess.run(cmd, capture_output=True, text=True, shell=True)
if not process.stdout and process.stderr:
return f"Error: {process.stderr}"
stdout = process.stdout
start_index = stdout.find('{')
if start_index != -1:
@@ -39,7 +49,7 @@ def create_parser():
parser = argparse.ArgumentParser(description="MMA Execution Script")
parser.add_argument(
"--role",
choices=['tier1', 'tier2', 'tier3', 'tier4'],
choices=['tier1', 'tier2', 'tier3', 'tier4', 'tier1-orchestrator', 'tier2-tech-lead', 'tier3-worker', 'tier4-qa'],
required=True,
help="The tier role to execute"
)

View File

@@ -1,6 +1,6 @@
import pytest
from unittest.mock import patch, MagicMock
from scripts.mma_exec import create_parser, get_role_documents, execute_agent
from scripts.mma_exec import create_parser, get_role_documents, execute_agent, get_model_for_role
def test_parser_role_choices():
"""Test that the parser accepts valid roles and the prompt argument."""
@@ -39,16 +39,24 @@ def test_get_role_documents():
assert get_role_documents('tier3') == ['conductor/workflow.md']
assert get_role_documents('tier4') == []
def test_get_model_for_role():
"""Test that get_model_for_role returns the correct model for each role."""
assert get_model_for_role('tier1-orchestrator') == 'gemini-3.1-pro-preview'
assert get_model_for_role('tier2-tech-lead') == 'gemini-3.0-flash-preview'
assert get_model_for_role('tier3-worker') == 'gemini-2.5-flash-lite'
assert get_model_for_role('tier4-qa') == 'gemini-2.5-flash-lite'
def test_execute_agent():
"""
Test that execute_agent calls subprocess.run with the correct gemini CLI arguments
for context amnesia.
including the model specified for the role.
"""
role = "tier3"
role = "tier3-worker"
prompt = "Write a unit test."
docs = ["file1.py", "docs/spec.md"]
expected_gemini_arg = "Activate the mma-tier3 skill. Write a unit test. @file1.py @docs/spec.md"
expected_gemini_arg = "Activate the mma-tier3-worker skill. Write a unit test. @file1.py @docs/spec.md"
expected_model = "gemini-2.5-flash-lite"
mock_stdout = "Mocked AI Response"
with patch("subprocess.run") as mock_run:
@@ -66,7 +74,13 @@ def test_execute_agent():
assert cmd_list[0] == "gemini"
assert cmd_list[1] == "-p"
assert cmd_list[2] == expected_gemini_arg
# Verify the correct model is passed via --model flag
assert "--model" in cmd_list
model_idx = cmd_list.index("--model")
assert cmd_list[model_idx + 1] == expected_model
assert kwargs.get("capture_output") is True
assert kwargs.get("text") is True
assert result == mock_stdout