From f05fa3d34044e025ad4526f444c2a47939dde228 Mon Sep 17 00:00:00 2001 From: Ed_ Date: Thu, 26 Feb 2026 22:06:18 -0500 Subject: [PATCH] checkpoint --- conductor/tracks.md | 4 +- orchestrator_pm.py | 65 +++++++++++++++++++++++++++++++++ tests/temp_project_history.toml | 2 +- 3 files changed, 68 insertions(+), 3 deletions(-) create mode 100644 orchestrator_pm.py diff --git a/conductor/tracks.md b/conductor/tracks.md index 64509bb..12c6075 100644 --- a/conductor/tracks.md +++ b/conductor/tracks.md @@ -20,8 +20,8 @@ This file tracks all major tracks for the project. Each track has its own detail --- -- [ ] **Track: Update ./docs/* & ./Readme.md, review ./MainContext.md significance (should we keep it..).** -*Link: [./tracks/documentation_refresh_20260224/](./tracks/documentation_refresh_20260224/)* +- [~] **Track: MMA Orchestrator Integration** +*Link: [./tracks/mma_orchestrator_integration_20260226/](./tracks/mma_orchestrator_integration_20260226/)* --- diff --git a/orchestrator_pm.py b/orchestrator_pm.py new file mode 100644 index 0000000..3a9203e --- /dev/null +++ b/orchestrator_pm.py @@ -0,0 +1,65 @@ + +import json +import ai_client +import mma_prompts +import aggregate +import summarize +from pathlib import Path + +def generate_tracks(user_request: str, project_config: dict, file_items: list[dict]) -> list[dict]: + """ + Tier 1 (Strategic PM) call. + Analyzes the project state and user request to generate a list of Tracks. + """ + # 1. Build Repository Map (Summary View) + repo_map = summarize.build_summary_markdown(file_items) + + # 2. Construct Prompt + system_prompt = mma_prompts.PROMPTS.get("tier1_epic_init") + user_message = ( + f"### USER REQUEST: +{user_request} + +" + f"### REPOSITORY MAP: +{repo_map} + +" + "Please generate the implementation tracks for this request." + ) + + # 3. Call Tier 1 Model (Strategic - Pro) + # Note: We use gemini-1.5-pro or similar high-reasoning model for Tier 1 + response = ai_client.send( + md_content="", # We pass everything in user_message for clarity + user_message=user_message, + system_prompt=system_prompt, + model_name="gemini-1.5-pro" # Strategic Tier + ) + + # 4. Parse JSON Output + try: + # The prompt asks for a JSON array. We need to extract it if the AI added markdown blocks. + json_match = response.strip() + if "```json" in json_match: + json_match = json_match.split("```json")[1].split("```")[0].strip() + elif "```" in json_match: + json_match = json_match.split("```")[1].split("```")[0].strip() + + tracks = json.loads(json_match) + return tracks + except Exception as e: + print(f"Error parsing Tier 1 response: {e}") + print(f"Raw response: {response}") + return [] + +if __name__ == "__main__": + # Quick CLI test + import project_manager + proj = project_manager.load_project("manual_slop.toml") + flat = project_manager.flat_config(proj) + file_items = aggregate.build_file_items(Path("."), flat.get("files", {}).get("paths", [])) + + print("Testing Tier 1 Track Generation...") + tracks = generate_tracks("Implement a basic unit test for the ai_client.py module.", flat, file_items) + print(json.dumps(tracks, indent=2)) diff --git a/tests/temp_project_history.toml b/tests/temp_project_history.toml index a79fbbf..dc5096b 100644 --- a/tests/temp_project_history.toml +++ b/tests/temp_project_history.toml @@ -17,5 +17,5 @@ history = [ [discussions."mma_human veriffication"] git_commit = "" -last_updated = "2026-02-26T22:02:01" +last_updated = "2026-02-26T22:06:01" history = []