feat(tier4): Add patch generation for auto-patching

- Add TIER4_PATCH_PROMPT to mma_prompts.py with unified diff format
- Add run_tier4_patch_generation function to ai_client.py
- Import mma_prompts in ai_client.py
- Add unit tests for patch generation
This commit is contained in:
2026-03-07 00:10:35 -05:00
parent 5277b11279
commit c8e8cb3bf3
5 changed files with 123 additions and 3 deletions

View File

@@ -28,6 +28,7 @@ from pathlib import Path
from src import project_manager
from src import file_cache
from src import mcp_client
from src import mma_prompts
import anthropic
from src.gemini_cli_adapter import GeminiCliAdapter as GeminiCliAdapter
from google import genai
@@ -1960,6 +1961,33 @@ def run_tier4_analysis(stderr: str) -> str:
except Exception as e:
return f"[QA ANALYSIS FAILED] {e}"
def run_tier4_patch_generation(error: str, file_context: str) -> str:
if not error or not error.strip():
return ""
try:
_ensure_gemini_client()
if not _gemini_client:
return ""
prompt = (
f"{mma_prompts.TIER4_PATCH_PROMPT}\n\n"
f"Error:\n```\n{error}\n```\n\n"
f"File Context:\n```\n{file_context}\n```\n"
)
model_name = "gemini-2.5-flash-lite"
resp = _gemini_client.models.generate_content(
model=model_name,
contents=prompt,
config=types.GenerateContentConfig(
temperature=0.0,
max_output_tokens=2048,
)
)
patch = resp.text.strip() if resp.text else ""
return patch
except Exception as e:
return f"[PATCH GENERATION FAILED] {e}"
def get_token_stats(md_content: str) -> dict[str, Any]:
global _provider, _gemini_client, _model, _CHARS_PER_TOKEN
total_tokens = 0