135 lines
4.8 KiB
Python
135 lines
4.8 KiB
Python
|
|
import json
|
|
import ai_client
|
|
import mma_prompts
|
|
import aggregate
|
|
import summarize
|
|
from pathlib import Path
|
|
|
|
CONDUCTOR_PATH = Path("conductor")
|
|
|
|
def get_track_history_summary() -> str:
|
|
"""
|
|
Scans conductor/archive/ and conductor/tracks/ to build a summary of past work.
|
|
"""
|
|
summary_parts = []
|
|
|
|
archive_path = CONDUCTOR_PATH / "archive"
|
|
tracks_path = CONDUCTOR_PATH / "tracks"
|
|
|
|
paths_to_scan = []
|
|
if archive_path.exists():
|
|
paths_to_scan.extend(list(archive_path.iterdir()))
|
|
if tracks_path.exists():
|
|
paths_to_scan.extend(list(tracks_path.iterdir()))
|
|
|
|
for track_dir in paths_to_scan:
|
|
if not track_dir.is_dir():
|
|
continue
|
|
|
|
metadata_file = track_dir / "metadata.json"
|
|
spec_file = track_dir / "spec.md"
|
|
|
|
title = track_dir.name
|
|
status = "unknown"
|
|
overview = "No overview available."
|
|
|
|
if metadata_file.exists():
|
|
try:
|
|
with open(metadata_file, "r", encoding="utf-8") as f:
|
|
meta = json.load(f)
|
|
title = meta.get("title", title)
|
|
status = meta.get("status", status)
|
|
except Exception:
|
|
pass
|
|
|
|
if spec_file.exists():
|
|
try:
|
|
with open(spec_file, "r", encoding="utf-8") as f:
|
|
content = f.read()
|
|
# Basic extraction of Overview section if it exists
|
|
if "## Overview" in content:
|
|
overview = content.split("## Overview")[1].split("##")[0].strip()
|
|
else:
|
|
# Just take a snippet of the beginning
|
|
overview = content[:200] + "..."
|
|
except Exception:
|
|
pass
|
|
|
|
summary_parts.append(f"Track: {title}\nStatus: {status}\nOverview: {overview}\n---")
|
|
|
|
if not summary_parts:
|
|
return "No previous tracks found."
|
|
|
|
return "\n".join(summary_parts)
|
|
|
|
def generate_tracks(user_request: str, project_config: dict, file_items: list[dict], history_summary: str = None) -> list[dict]:
|
|
"""
|
|
Tier 1 (Strategic PM) call.
|
|
Analyzes the project state and user request to generate a list of Tracks.
|
|
"""
|
|
# 1. Build Repository Map (Summary View)
|
|
repo_map = summarize.build_summary_markdown(file_items)
|
|
|
|
# 2. Construct Prompt
|
|
system_prompt = mma_prompts.PROMPTS.get("tier1_epic_init")
|
|
|
|
user_message_parts = [
|
|
f"### USER REQUEST:\n{user_request}\n",
|
|
f"### REPOSITORY MAP:\n{repo_map}\n"
|
|
]
|
|
|
|
if history_summary:
|
|
user_message_parts.append(f"### TRACK HISTORY:\n{history_summary}\n")
|
|
|
|
user_message_parts.append("Please generate the implementation tracks for this request.")
|
|
|
|
user_message = "\n".join(user_message_parts)
|
|
|
|
# Set custom system prompt for this call
|
|
old_system_prompt = ai_client._custom_system_prompt
|
|
ai_client.set_custom_system_prompt(system_prompt)
|
|
|
|
try:
|
|
# 3. Call Tier 1 Model (Strategic - Pro)
|
|
# Note: We use gemini-1.5-pro or similar high-reasoning model for Tier 1
|
|
response = ai_client.send(
|
|
md_content="", # We pass everything in user_message for clarity
|
|
user_message=user_message
|
|
)
|
|
|
|
# 4. Parse JSON Output
|
|
try:
|
|
# The prompt asks for a JSON array. We need to extract it if the AI added markdown blocks.
|
|
json_match = response.strip()
|
|
if "```json" in json_match:
|
|
json_match = json_match.split("```json")[1].split("```")[0].strip()
|
|
elif "```" in json_match:
|
|
json_match = json_match.split("```")[1].split("```")[0].strip()
|
|
|
|
tracks = json.loads(json_match)
|
|
# Ensure each track has a 'title' for the GUI
|
|
for t in tracks:
|
|
if "title" not in t:
|
|
t["title"] = t.get("goal", "Untitled Track")[:50]
|
|
return tracks
|
|
except Exception as e:
|
|
print(f"Error parsing Tier 1 response: {e}")
|
|
print(f"Raw response: {response}")
|
|
return []
|
|
finally:
|
|
# Restore old system prompt
|
|
ai_client.set_custom_system_prompt(old_system_prompt)
|
|
|
|
if __name__ == "__main__":
|
|
# Quick CLI test
|
|
import project_manager
|
|
proj = project_manager.load_project("manual_slop.toml")
|
|
flat = project_manager.flat_config(proj)
|
|
file_items = aggregate.build_file_items(Path("."), flat.get("files", {}).get("paths", []))
|
|
|
|
print("Testing Tier 1 Track Generation...")
|
|
history = get_track_history_summary()
|
|
tracks = generate_tracks("Implement a basic unit test for the ai_client.py module.", flat, file_items, history_summary=history)
|
|
print(json.dumps(tracks, indent=2))
|