feat(tier4): Add patch generation for auto-patching

- Add TIER4_PATCH_PROMPT to mma_prompts.py with unified diff format
- Add run_tier4_patch_generation function to ai_client.py
- Import mma_prompts in ai_client.py
- Add unit tests for patch generation
This commit is contained in:
2026-03-07 00:10:35 -05:00
parent 5277b11279
commit c8e8cb3bf3
5 changed files with 123 additions and 3 deletions

View File

@@ -5,8 +5,8 @@
## Phase 1: Patch Generation ## Phase 1: Patch Generation
Focus: Generate unified diff on test failure Focus: Generate unified diff on test failure
- [ ] Task 1.1: Initialize MMA Environment - [x] Task 1.1: Initialize MMA Environment
- [ ] Task 1.2: Extend Tier 4 prompt for patch generation - [~] Task 1.2: Extend Tier 4 prompt for patch generation
- WHERE: `src/mma_prompts.py` or inline in `ai_client.py` - WHERE: `src/mma_prompts.py` or inline in `ai_client.py`
- WHAT: Prompt to generate unified diff - WHAT: Prompt to generate unified diff
- HOW: - HOW:

View File

@@ -16,7 +16,7 @@ paths = [
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_liveexecutionsim.toml", "C:\\projects\\manual_slop\\tests\\artifacts\\temp_liveexecutionsim.toml",
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_simproject.toml", "C:\\projects\\manual_slop\\tests\\artifacts\\temp_simproject.toml",
] ]
active = "C:\\projects\\manual_slop\\tests\\artifacts\\temp_project.toml" active = "C:\\projects\\manual_slop\\tests\\artifacts\\temp_livetoolssim.toml"
[gui] [gui]
separate_message_panel = false separate_message_panel = false

View File

@@ -28,6 +28,7 @@ from pathlib import Path
from src import project_manager from src import project_manager
from src import file_cache from src import file_cache
from src import mcp_client from src import mcp_client
from src import mma_prompts
import anthropic import anthropic
from src.gemini_cli_adapter import GeminiCliAdapter as GeminiCliAdapter from src.gemini_cli_adapter import GeminiCliAdapter as GeminiCliAdapter
from google import genai from google import genai
@@ -1960,6 +1961,33 @@ def run_tier4_analysis(stderr: str) -> str:
except Exception as e: except Exception as e:
return f"[QA ANALYSIS FAILED] {e}" return f"[QA ANALYSIS FAILED] {e}"
def run_tier4_patch_generation(error: str, file_context: str) -> str:
if not error or not error.strip():
return ""
try:
_ensure_gemini_client()
if not _gemini_client:
return ""
prompt = (
f"{mma_prompts.TIER4_PATCH_PROMPT}\n\n"
f"Error:\n```\n{error}\n```\n\n"
f"File Context:\n```\n{file_context}\n```\n"
)
model_name = "gemini-2.5-flash-lite"
resp = _gemini_client.models.generate_content(
model=model_name,
contents=prompt,
config=types.GenerateContentConfig(
temperature=0.0,
max_output_tokens=2048,
)
)
patch = resp.text.strip() if resp.text else ""
return patch
except Exception as e:
return f"[PATCH GENERATION FAILED] {e}"
def get_token_stats(md_content: str) -> dict[str, Any]: def get_token_stats(md_content: str) -> dict[str, Any]:
global _provider, _gemini_client, _model, _CHARS_PER_TOKEN global _provider, _gemini_client, _model, _CHARS_PER_TOKEN
total_tokens = 0 total_tokens = 0

View File

@@ -153,3 +153,28 @@ PROMPTS: Dict[str, str] = {
"tier2_track_finalization": TIER2_TRACK_FINALIZATION, "tier2_track_finalization": TIER2_TRACK_FINALIZATION,
"tier2_contract_first": TIER2_CONTRACT_FIRST, "tier2_contract_first": TIER2_CONTRACT_FIRST,
} }
TIER4_PATCH_PROMPT: str = """You are a Tier 4 QA Agent specializing in error analysis and patch generation.
When a test or command fails, analyze the error and generate a unified diff patch to fix it.
OUTPUT FORMAT: Unified diff format (diff -u)
```
--- a/path/to/file.py
+++ b/path/to/file.py
@@ -start,count +start,count @@
context line
-removed line
+added line
context line
```
RULES:
1. Use "--- a/" and "+++ b/" prefixes with the actual file path
2. Include sufficient context lines (3 above/below minimum)
3. Use @@ -X,Y +X,Y @@ format for hunk headers
4. Only include changes necessary to fix the error
5. If multiple files need changes, include each in the patch
6. Output ONLY the unified diff - no explanations, no markdown code blocks
Analyze this error and generate the patch:
"""

View File

@@ -0,0 +1,67 @@
from unittest.mock import MagicMock, patch
import pytest
from src import ai_client
from src import mma_prompts
def test_tier4_patch_prompt_exists() -> None:
"""Test that TIER4_PATCH_PROMPT is defined in mma_prompts.py."""
assert hasattr(mma_prompts, "TIER4_PATCH_PROMPT")
prompt = mma_prompts.TIER4_PATCH_PROMPT
assert "unified diff" in prompt.lower() or "diff -u" in prompt.lower()
assert "---" in prompt
assert "+++" in prompt
assert "@@" in prompt
def test_tier4_patch_prompt_format_instructions() -> None:
"""Test that the patch prompt includes format instructions for unified diff."""
prompt = mma_prompts.TIER4_PATCH_PROMPT
assert "--- a/" in prompt or "---" in prompt
assert "+++ b/" in prompt or "+++" in prompt
def test_run_tier4_patch_generation_exists() -> None:
"""Test that run_tier4_patch_generation function exists in ai_client."""
assert hasattr(ai_client, "run_tier4_patch_generation")
assert callable(ai_client.run_tier4_patch_generation)
def test_run_tier4_patch_generation_empty_error() -> None:
"""Test that run_tier4_patch_generation returns empty string on empty error."""
with patch("src.ai_client._ensure_gemini_client"), \
patch("src.ai_client._gemini_client") as mock_client:
mock_resp = MagicMock()
mock_resp.text = ""
mock_client.models.generate_content.return_value = mock_resp
result = ai_client.run_tier4_patch_generation("", "file context")
assert result == ""
def test_run_tier4_patch_generation_calls_ai() -> None:
"""Test that run_tier4_patch_generation calls the AI with the correct prompt."""
with patch("src.ai_client._ensure_gemini_client"), \
patch("src.ai_client._gemini_client", create=True) as mock_client, \
patch("src.ai_client.types") as mock_types:
mock_resp = MagicMock()
mock_resp.text = "--- a/test.py\n+++ b/test.py\n@@ -1 +1 @@\n-old\n+new"
mock_client.models.generate_content.return_value = mock_resp
mock_types.GenerateContentConfig = MagicMock()
error = "TypeError: unsupported operand"
file_context = "def foo():\n pass"
result = ai_client.run_tier4_patch_generation(error, file_context)
mock_client.models.generate_content.assert_called()
def test_run_tier4_patch_generation_returns_diff() -> None:
"""Test that run_tier4_patch_generation returns diff text."""
with patch("src.ai_client._ensure_gemini_client"), \
patch("src.ai_client._gemini_client", create=True) as mock_client, \
patch("src.ai_client.types") as mock_types:
expected_diff = "--- a/src/test.py\n+++ b/src/test.py\n@@ -10,5 +10,6 @@\n def test_func():\n- old_value = 1\n+ old_value = 1\n+ new_value = 2"
mock_resp = MagicMock()
mock_resp.text = expected_diff
mock_client.models.generate_content.return_value = mock_resp
mock_types.GenerateContentConfig = MagicMock()
result = ai_client.run_tier4_patch_generation("error", "context")
assert "---" in result
assert "+++" in result
assert "@@" in result