phase 2 checkpoint
This commit is contained in:
@@ -113,7 +113,7 @@ This file tracks all major tracks for the project. Each track has its own detail
|
||||
*Link: [./tracks/external_editor_integration_20260308/](./tracks/external_editor_integration_20260308/)*
|
||||
*Goal: Add support to open files modified by agents in external editors (10xNotepad/VSCode) for native diffing and manual editing during the tool approval flow.*
|
||||
|
||||
4. [ ] **Track: Agent Personas: Unified Profiles & Tool Presets**
|
||||
4. [~] **Track: Agent Personas: Unified Profiles & Tool Presets**
|
||||
*Link: [./tracks/agent_personas_20260309/](./tracks/agent_personas_20260309/)*
|
||||
*Goal: Consolidate model settings, prompts, and tool presets into a unified "Persona" model with granular MMA assignment.*
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# Implementation Plan: Agent Personas - Unified Profiles
|
||||
|
||||
## Phase 1: Core Model and Migration
|
||||
- [ ] Task: Audit `src/models.py` and `src/app_controller.py` for all existing AI settings.
|
||||
- [ ] Task: Write Tests: Verify the `Persona` dataclass can be serialized/deserialized to TOML.
|
||||
- [ ] Task: Implement: Create the `Persona` model in `src/models.py` and implement the `PersonaManager` in `src/personas.py` (inheriting logic from `PresetManager`).
|
||||
- [ ] Task: Implement: Create a migration utility to convert existing `active_preset` and system prompts into an "Initial Legacy" Persona.
|
||||
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Core Model and Migration' (Protocol in workflow.md)
|
||||
- [x] Task: Audit `src/models.py` and `src/app_controller.py` for all existing AI settings.
|
||||
- [x] Task: Write Tests: Verify the `Persona` dataclass can be serialized/deserialized to TOML.
|
||||
- [x] Task: Implement: Create the `Persona` model in `src/models.py` and implement the `PersonaManager` in `src/personas.py` (inheriting logic from `PresetManager`).
|
||||
- [x] Task: Implement: Create a migration utility to convert existing `active_preset` and system prompts into an "Initial Legacy" Persona.
|
||||
- [x] Task: Conductor - User Manual Verification 'Phase 1: Core Model and Migration' (Protocol in workflow.md)
|
||||
|
||||
## Phase 2: Granular MMA Integration
|
||||
- [ ] Task: Write Tests: Verify that a `Ticket` or `Track` can hold a `persona_id` override.
|
||||
|
||||
66
scripts/migrate_personas.py
Normal file
66
scripts/migrate_personas.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from src import models
|
||||
from src.paths import get_config_path, get_global_presets_path, get_project_presets_path
|
||||
from src.presets import PresetManager
|
||||
from src.personas import PersonaManager
|
||||
|
||||
def migrate():
|
||||
print("Starting Persona Migration...")
|
||||
|
||||
config_path = get_config_path()
|
||||
try:
|
||||
with open(config_path, "rb") as f:
|
||||
import tomllib
|
||||
config = tomllib.load(f)
|
||||
except Exception as e:
|
||||
print(f"Could not load config: {e}")
|
||||
return
|
||||
|
||||
ai_cfg = config.get("ai", {})
|
||||
provider = ai_cfg.get("provider")
|
||||
model = ai_cfg.get("model")
|
||||
|
||||
global_presets_path = get_global_presets_path()
|
||||
preset_manager = PresetManager()
|
||||
|
||||
persona_manager = PersonaManager()
|
||||
|
||||
# Migrate global presets
|
||||
if global_presets_path.exists():
|
||||
global_data = preset_manager._load_file(global_presets_path)
|
||||
for name, data in global_data.get("presets", {}).items():
|
||||
preset = models.Preset.from_dict(name, data)
|
||||
persona = models.Persona(
|
||||
name=name,
|
||||
provider=provider,
|
||||
model=model,
|
||||
preferred_models=[model] if model else [],
|
||||
system_prompt=preset.system_prompt,
|
||||
temperature=preset.temperature,
|
||||
top_p=preset.top_p,
|
||||
max_output_tokens=preset.max_output_tokens
|
||||
)
|
||||
persona_manager.save_persona(persona, scope="global")
|
||||
print(f"Migrated global preset to persona: {name}")
|
||||
|
||||
# Create Initial Legacy Persona from config if not in presets
|
||||
active_preset = ai_cfg.get("active_preset")
|
||||
if active_preset and active_preset not in persona_manager.load_all():
|
||||
persona = models.Persona(
|
||||
name=active_preset,
|
||||
provider=provider,
|
||||
model=model,
|
||||
preferred_models=[model] if model else [],
|
||||
system_prompt=ai_cfg.get("system_prompt", ""),
|
||||
temperature=ai_cfg.get("temperature"),
|
||||
max_output_tokens=ai_cfg.get("max_tokens")
|
||||
)
|
||||
persona_manager.save_persona(persona, scope="global")
|
||||
print(f"Created Initial Legacy persona from active_preset: {active_preset}")
|
||||
|
||||
print("Migration complete.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
migrate()
|
||||
@@ -435,3 +435,53 @@ class BiasProfile:
|
||||
tool_weights=data.get("tool_weights", {}),
|
||||
category_multipliers=data.get("category_multipliers", {}),
|
||||
)
|
||||
|
||||
@dataclass
|
||||
class Persona:
|
||||
name: str
|
||||
provider: Optional[str] = None
|
||||
model: Optional[str] = None
|
||||
preferred_models: List[str] = field(default_factory=list)
|
||||
system_prompt: str = ''
|
||||
temperature: Optional[float] = None
|
||||
top_p: Optional[float] = None
|
||||
max_output_tokens: Optional[int] = None
|
||||
tool_preset: Optional[str] = None
|
||||
bias_profile: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
res = {
|
||||
"system_prompt": self.system_prompt,
|
||||
}
|
||||
if self.provider is not None:
|
||||
res["provider"] = self.provider
|
||||
if self.model is not None:
|
||||
res["model"] = self.model
|
||||
if self.preferred_models:
|
||||
res["preferred_models"] = self.preferred_models
|
||||
if self.temperature is not None:
|
||||
res["temperature"] = self.temperature
|
||||
if self.top_p is not None:
|
||||
res["top_p"] = self.top_p
|
||||
if self.max_output_tokens is not None:
|
||||
res["max_output_tokens"] = self.max_output_tokens
|
||||
if self.tool_preset is not None:
|
||||
res["tool_preset"] = self.tool_preset
|
||||
if self.bias_profile is not None:
|
||||
res["bias_profile"] = self.bias_profile
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, name: str, data: Dict[str, Any]) -> "Persona":
|
||||
return cls(
|
||||
name=name,
|
||||
provider=data.get("provider"),
|
||||
model=data.get("model"),
|
||||
preferred_models=data.get("preferred_models", []),
|
||||
system_prompt=data.get("system_prompt", ""),
|
||||
temperature=data.get("temperature"),
|
||||
top_p=data.get("top_p"),
|
||||
max_output_tokens=data.get("max_output_tokens"),
|
||||
tool_preset=data.get("tool_preset"),
|
||||
bias_profile=data.get("bias_profile"),
|
||||
)
|
||||
|
||||
@@ -64,6 +64,13 @@ def get_global_tool_presets_path() -> Path:
|
||||
def get_project_tool_presets_path(project_root: Path) -> Path:
|
||||
return project_root / "project_tool_presets.toml"
|
||||
|
||||
def get_global_personas_path() -> Path:
|
||||
root_dir = Path(__file__).resolve().parent.parent
|
||||
return Path(os.environ.get("SLOP_GLOBAL_PERSONAS", root_dir / "personas.toml"))
|
||||
|
||||
def get_project_personas_path(project_root: Path) -> Path:
|
||||
return project_root / "project_personas.toml"
|
||||
|
||||
def _resolve_path(env_var: str, config_key: str, default: str) -> Path:
|
||||
if env_var in os.environ:
|
||||
return Path(os.environ[env_var])
|
||||
|
||||
69
src/personas.py
Normal file
69
src/personas.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import tomllib
|
||||
import tomli_w
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional
|
||||
from src.models import Persona
|
||||
from src import paths
|
||||
|
||||
class PersonaManager:
|
||||
"""Manages Persona profiles across global and project-specific files."""
|
||||
|
||||
def __init__(self, project_root: Optional[Path] = None):
|
||||
self.project_root = project_root
|
||||
|
||||
def _get_path(self, scope: str) -> Path:
|
||||
if scope == "global":
|
||||
return paths.get_global_personas_path()
|
||||
elif scope == "project":
|
||||
if not self.project_root:
|
||||
raise ValueError("Project root is not set, cannot resolve project scope.")
|
||||
return paths.get_project_personas_path(self.project_root)
|
||||
else:
|
||||
raise ValueError("Invalid scope, must be 'global' or 'project'")
|
||||
|
||||
def load_all(self) -> Dict[str, Persona]:
|
||||
"""Merges global and project personas into a single dictionary."""
|
||||
personas = {}
|
||||
|
||||
global_path = paths.get_global_personas_path()
|
||||
global_data = self._load_file(global_path)
|
||||
for name, data in global_data.get("personas", {}).items():
|
||||
personas[name] = Persona.from_dict(name, data)
|
||||
|
||||
if self.project_root:
|
||||
project_path = paths.get_project_personas_path(self.project_root)
|
||||
project_data = self._load_file(project_path)
|
||||
for name, data in project_data.get("personas", {}).items():
|
||||
personas[name] = Persona.from_dict(name, data)
|
||||
|
||||
return personas
|
||||
|
||||
def save_persona(self, persona: Persona, scope: str = "project") -> None:
|
||||
path = self._get_path(scope)
|
||||
data = self._load_file(path)
|
||||
if "personas" not in data:
|
||||
data["personas"] = {}
|
||||
|
||||
data["personas"][persona.name] = persona.to_dict()
|
||||
self._save_file(path, data)
|
||||
|
||||
def delete_persona(self, name: str, scope: str = "project") -> None:
|
||||
path = self._get_path(scope)
|
||||
data = self._load_file(path)
|
||||
if "personas" in data and name in data["personas"]:
|
||||
del data["personas"][name]
|
||||
self._save_file(path, data)
|
||||
|
||||
def _load_file(self, path: Path) -> Dict[str, Any]:
|
||||
if not path.exists():
|
||||
return {}
|
||||
try:
|
||||
with open(path, "rb") as f:
|
||||
return tomllib.load(f)
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def _save_file(self, path: Path, data: Dict[str, Any]) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(path, "wb") as f:
|
||||
tomli_w.dump(data, f)
|
||||
81
tests/test_persona_manager.py
Normal file
81
tests/test_persona_manager.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import pytest
|
||||
import tomli_w
|
||||
from pathlib import Path
|
||||
from src.models import Persona
|
||||
from src.personas import PersonaManager
|
||||
from src import paths
|
||||
|
||||
@pytest.fixture
|
||||
def temp_paths(tmp_path, monkeypatch):
|
||||
global_dir = tmp_path / "global"
|
||||
global_dir.mkdir()
|
||||
project_dir = tmp_path / "project"
|
||||
project_dir.mkdir()
|
||||
|
||||
global_path = global_dir / "personas.toml"
|
||||
project_path = project_dir / "project_personas.toml"
|
||||
|
||||
monkeypatch.setattr(paths, "get_global_personas_path", lambda: global_path)
|
||||
monkeypatch.setattr(paths, "get_project_personas_path", lambda _: project_path)
|
||||
|
||||
return {"global": global_path, "project": project_path, "project_root": project_dir}
|
||||
|
||||
def test_load_all_merged(temp_paths):
|
||||
global_data = {
|
||||
"personas": {
|
||||
"default": {
|
||||
"provider": "anthropic",
|
||||
"model": "claude-3",
|
||||
"system_prompt": "Global prompt"
|
||||
},
|
||||
"global_only": {
|
||||
"provider": "gemini",
|
||||
"system_prompt": "Gemini prompt"
|
||||
}
|
||||
}
|
||||
}
|
||||
with open(temp_paths["global"], "wb") as f:
|
||||
tomli_w.dump(global_data, f)
|
||||
|
||||
project_data = {
|
||||
"personas": {
|
||||
"default": {
|
||||
"provider": "anthropic",
|
||||
"model": "claude-3.5", # Overrides global
|
||||
"system_prompt": "Project prompt"
|
||||
}
|
||||
}
|
||||
}
|
||||
with open(temp_paths["project"], "wb") as f:
|
||||
tomli_w.dump(project_data, f)
|
||||
|
||||
manager = PersonaManager(project_root=temp_paths["project_root"])
|
||||
all_personas = manager.load_all()
|
||||
|
||||
assert "global_only" in all_personas
|
||||
assert "default" in all_personas
|
||||
assert all_personas["default"].model == "claude-3.5"
|
||||
assert all_personas["default"].system_prompt == "Project prompt"
|
||||
|
||||
def test_save_persona(temp_paths):
|
||||
manager = PersonaManager(project_root=temp_paths["project_root"])
|
||||
persona = Persona(name="New", provider="gemini", system_prompt="Test")
|
||||
|
||||
manager.save_persona(persona, scope="project")
|
||||
loaded = manager.load_all()
|
||||
assert "New" in loaded
|
||||
assert loaded["New"].provider == "gemini"
|
||||
|
||||
def test_delete_persona(temp_paths):
|
||||
project_data = {
|
||||
"personas": {
|
||||
"to_delete": {"provider": "gemini", "system_prompt": "Del"}
|
||||
}
|
||||
}
|
||||
with open(temp_paths["project"], "wb") as f:
|
||||
tomli_w.dump(project_data, f)
|
||||
|
||||
manager = PersonaManager(project_root=temp_paths["project_root"])
|
||||
manager.delete_persona("to_delete", scope="project")
|
||||
loaded = manager.load_all()
|
||||
assert "to_delete" not in loaded
|
||||
70
tests/test_persona_models.py
Normal file
70
tests/test_persona_models.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import pytest
|
||||
from src.models import Persona
|
||||
|
||||
def test_persona_serialization():
|
||||
persona = Persona(
|
||||
name="SecuritySpecialist",
|
||||
provider="anthropic",
|
||||
model="claude-3-7-sonnet-20250219",
|
||||
preferred_models=["claude-3-7-sonnet-20250219", "claude-3-5-sonnet-20241022"],
|
||||
system_prompt="You are a security expert.",
|
||||
temperature=0.2,
|
||||
top_p=0.9,
|
||||
max_output_tokens=4000,
|
||||
tool_preset="SecurityTools",
|
||||
bias_profile="Execution-Focused"
|
||||
)
|
||||
|
||||
data = persona.to_dict()
|
||||
|
||||
assert data["provider"] == "anthropic"
|
||||
assert data["model"] == "claude-3-7-sonnet-20250219"
|
||||
assert "claude-3-5-sonnet-20241022" in data["preferred_models"]
|
||||
assert data["system_prompt"] == "You are a security expert."
|
||||
assert data["temperature"] == 0.2
|
||||
assert data["top_p"] == 0.9
|
||||
assert data["max_output_tokens"] == 4000
|
||||
assert data["tool_preset"] == "SecurityTools"
|
||||
assert data["bias_profile"] == "Execution-Focused"
|
||||
|
||||
def test_persona_deserialization():
|
||||
data = {
|
||||
"provider": "gemini",
|
||||
"model": "gemini-2.5-flash",
|
||||
"preferred_models": ["gemini-2.5-flash"],
|
||||
"system_prompt": "You are a helpful assistant.",
|
||||
"temperature": 0.5,
|
||||
"top_p": 1.0,
|
||||
"max_output_tokens": 8192,
|
||||
"tool_preset": "Default",
|
||||
"bias_profile": "Balanced"
|
||||
}
|
||||
|
||||
persona = Persona.from_dict("Assistant", data)
|
||||
|
||||
assert persona.name == "Assistant"
|
||||
assert persona.provider == "gemini"
|
||||
assert persona.model == "gemini-2.5-flash"
|
||||
assert persona.preferred_models == ["gemini-2.5-flash"]
|
||||
assert persona.system_prompt == "You are a helpful assistant."
|
||||
assert persona.temperature == 0.5
|
||||
assert persona.top_p == 1.0
|
||||
assert persona.max_output_tokens == 8192
|
||||
assert persona.tool_preset == "Default"
|
||||
assert persona.bias_profile == "Balanced"
|
||||
|
||||
def test_persona_defaults():
|
||||
persona = Persona(name="Minimal", system_prompt="Just the basics")
|
||||
assert persona.provider is None
|
||||
assert persona.model is None
|
||||
assert persona.preferred_models == []
|
||||
assert persona.temperature is None
|
||||
assert persona.tool_preset is None
|
||||
|
||||
data = persona.to_dict()
|
||||
assert "provider" not in data
|
||||
assert "preferred_models" not in data
|
||||
assert "temperature" not in data
|
||||
|
||||
loaded = Persona.from_dict("Minimal", data)
|
||||
assert loaded.preferred_models == []
|
||||
Reference in New Issue
Block a user