feat(mma): Implement run_tier4_analysis in ai_client.py
This commit is contained in:
37
ai_client.py
37
ai_client.py
@@ -1764,6 +1764,43 @@ def _send_deepseek(md_content: str, user_message: str, base_dir: str,
|
||||
raise _classify_deepseek_error(e) from e
|
||||
|
||||
|
||||
def run_tier4_analysis(stderr: str) -> str:
|
||||
"""
|
||||
Stateless Tier 4 QA analysis of an error message.
|
||||
Uses gemini-2.5-flash-lite to summarize the error and suggest a fix.
|
||||
"""
|
||||
if not stderr or not stderr.strip():
|
||||
return ""
|
||||
|
||||
try:
|
||||
_ensure_gemini_client()
|
||||
prompt = (
|
||||
f"You are a Tier 4 QA Agent specializing in error analysis.\n"
|
||||
f"Analyze the following stderr output from a PowerShell command:\n\n"
|
||||
f"```\n{stderr}\n```\n\n"
|
||||
f"Provide a concise summary of the failure and suggest a fix in approximately 20 words."
|
||||
)
|
||||
|
||||
# Use flash-lite for cost-effective stateless analysis
|
||||
model_name = "gemini-2.5-flash-lite"
|
||||
|
||||
# We don't use the chat session here to keep it stateless
|
||||
resp = _gemini_client.models.generate_content(
|
||||
model=model_name,
|
||||
contents=prompt,
|
||||
config=types.GenerateContentConfig(
|
||||
temperature=0.0,
|
||||
max_output_tokens=150,
|
||||
)
|
||||
)
|
||||
|
||||
analysis = resp.text.strip()
|
||||
return analysis
|
||||
except Exception as e:
|
||||
# We don't want to crash the main loop if QA analysis fails
|
||||
return f"[QA ANALYSIS FAILED] {e}"
|
||||
|
||||
|
||||
# ------------------------------------------------------------------ unified send
|
||||
|
||||
def send(
|
||||
|
||||
Reference in New Issue
Block a user