refactor(gui): redesign persona editor UI and replace popup modals with standard windows
This commit is contained in:
@@ -300,7 +300,9 @@ class AppController:
|
||||
self._inject_mode: str = "skeleton"
|
||||
self._inject_preview: str = ""
|
||||
self._show_inject_modal: bool = False
|
||||
self.show_preset_manager_modal: bool = False
|
||||
self.show_preset_manager_window: bool = False
|
||||
self.show_tool_preset_manager_window: bool = False
|
||||
self.show_persona_editor_window: bool = False
|
||||
self._editing_preset_name: str = ""
|
||||
self._editing_preset_content: str = ""
|
||||
self._editing_preset_temperature: float = 0.0
|
||||
@@ -342,7 +344,9 @@ class AppController:
|
||||
'ui_active_tool_preset': 'ui_active_tool_preset',
|
||||
'temperature': 'temperature',
|
||||
'max_tokens': 'max_tokens',
|
||||
'show_preset_manager_modal': 'show_preset_manager_modal',
|
||||
'show_preset_manager_window': 'show_preset_manager_window',
|
||||
'show_tool_preset_manager_window': 'show_tool_preset_manager_window',
|
||||
'show_persona_editor_window': 'show_persona_editor_window',
|
||||
'_editing_preset_name': '_editing_preset_name',
|
||||
'_editing_preset_content': '_editing_preset_content',
|
||||
'_editing_preset_temperature': '_editing_preset_temperature',
|
||||
@@ -390,7 +394,9 @@ class AppController:
|
||||
'ui_active_tool_preset': 'ui_active_tool_preset',
|
||||
'temperature': 'temperature',
|
||||
'max_tokens': 'max_tokens',
|
||||
'show_preset_manager_modal': 'show_preset_manager_modal',
|
||||
'show_preset_manager_window': 'show_preset_manager_window',
|
||||
'show_tool_preset_manager_window': 'show_tool_preset_manager_window',
|
||||
'show_persona_editor_window': 'show_persona_editor_window',
|
||||
'_editing_preset_name': '_editing_preset_name',
|
||||
'_editing_preset_content': '_editing_preset_content',
|
||||
'_editing_preset_temperature': '_editing_preset_temperature',
|
||||
@@ -2567,3 +2573,4 @@ class AppController:
|
||||
tasks=self.active_track.tickets
|
||||
)
|
||||
project_manager.save_track_state(self.active_track.id, state, self.ui_files_base_dir)
|
||||
|
||||
|
||||
1063
src/gui_2.py
1063
src/gui_2.py
File diff suppressed because it is too large
Load Diff
@@ -434,32 +434,48 @@ class BiasProfile:
|
||||
@dataclass
|
||||
class Persona:
|
||||
name: str
|
||||
provider: Optional[str] = None
|
||||
model: Optional[str] = None
|
||||
preferred_models: List[str] = field(default_factory=list)
|
||||
preferred_models: List[Dict[str, Any]] = field(default_factory=list)
|
||||
system_prompt: str = ''
|
||||
temperature: Optional[float] = None
|
||||
top_p: Optional[float] = None
|
||||
max_output_tokens: Optional[int] = None
|
||||
tool_preset: Optional[str] = None
|
||||
bias_profile: Optional[str] = None
|
||||
|
||||
@property
|
||||
def provider(self) -> Optional[str]:
|
||||
if not self.preferred_models: return None
|
||||
return self.preferred_models[0].get("provider")
|
||||
|
||||
@property
|
||||
def model(self) -> Optional[str]:
|
||||
if not self.preferred_models: return None
|
||||
return self.preferred_models[0].get("model")
|
||||
|
||||
@property
|
||||
def temperature(self) -> Optional[float]:
|
||||
if not self.preferred_models: return None
|
||||
return self.preferred_models[0].get("temperature")
|
||||
|
||||
@property
|
||||
def top_p(self) -> Optional[float]:
|
||||
if not self.preferred_models: return None
|
||||
return self.preferred_models[0].get("top_p")
|
||||
|
||||
@property
|
||||
def max_output_tokens(self) -> Optional[int]:
|
||||
if not self.preferred_models: return None
|
||||
return self.preferred_models[0].get("max_output_tokens")
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
res = {
|
||||
"system_prompt": self.system_prompt,
|
||||
}
|
||||
if self.provider is not None:
|
||||
res["provider"] = self.provider
|
||||
if self.model is not None:
|
||||
res["model"] = self.model
|
||||
if self.preferred_models:
|
||||
res["preferred_models"] = self.preferred_models
|
||||
if self.temperature is not None:
|
||||
res["temperature"] = self.temperature
|
||||
if self.top_p is not None:
|
||||
res["top_p"] = self.top_p
|
||||
if self.max_output_tokens is not None:
|
||||
res["max_output_tokens"] = self.max_output_tokens
|
||||
processed = []
|
||||
for m in self.preferred_models:
|
||||
if isinstance(m, str):
|
||||
processed.append({"model": m})
|
||||
else:
|
||||
processed.append(m)
|
||||
res["preferred_models"] = processed
|
||||
if self.tool_preset is not None:
|
||||
res["tool_preset"] = self.tool_preset
|
||||
if self.bias_profile is not None:
|
||||
@@ -468,15 +484,34 @@ class Persona:
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, name: str, data: Dict[str, Any]) -> "Persona":
|
||||
raw_models = data.get("preferred_models", [])
|
||||
parsed_models = []
|
||||
for m in raw_models:
|
||||
if isinstance(m, str):
|
||||
parsed_models.append({"model": m})
|
||||
else:
|
||||
parsed_models.append(m)
|
||||
|
||||
# Migration logic: merge legacy fields if they exist
|
||||
legacy = {}
|
||||
for k in ["provider", "model", "temperature", "top_p", "max_output_tokens"]:
|
||||
if data.get(k) is not None:
|
||||
legacy[k] = data[k]
|
||||
|
||||
if legacy:
|
||||
if not parsed_models:
|
||||
parsed_models.append(legacy)
|
||||
else:
|
||||
# Merge into first item if it's missing these specific legacy fields
|
||||
for k, v in legacy.items():
|
||||
if k not in parsed_models[0] or parsed_models[0][k] is None:
|
||||
parsed_models[0][k] = v
|
||||
|
||||
return cls(
|
||||
name=name,
|
||||
provider=data.get("provider"),
|
||||
model=data.get("model"),
|
||||
preferred_models=data.get("preferred_models", []),
|
||||
preferred_models=parsed_models,
|
||||
system_prompt=data.get("system_prompt", ""),
|
||||
temperature=data.get("temperature"),
|
||||
top_p=data.get("top_p"),
|
||||
max_output_tokens=data.get("max_output_tokens"),
|
||||
tool_preset=data.get("tool_preset"),
|
||||
bias_profile=data.get("bias_profile"),
|
||||
)
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ def test_load_all_merged(temp_paths):
|
||||
|
||||
def test_save_persona(temp_paths):
|
||||
manager = PersonaManager(project_root=temp_paths["project_root"])
|
||||
persona = Persona(name="New", provider="gemini", system_prompt="Test")
|
||||
persona = Persona(name="New", preferred_models=[{"provider": "gemini"}], system_prompt="Test")
|
||||
|
||||
manager.save_persona(persona, scope="project")
|
||||
loaded = manager.load_all()
|
||||
|
||||
@@ -4,30 +4,38 @@ from src.models import Persona
|
||||
def test_persona_serialization():
|
||||
persona = Persona(
|
||||
name="SecuritySpecialist",
|
||||
provider="anthropic",
|
||||
model="claude-3-7-sonnet-20250219",
|
||||
preferred_models=["claude-3-7-sonnet-20250219", "claude-3-5-sonnet-20241022"],
|
||||
preferred_models=[
|
||||
{"provider": "anthropic", "model": "claude-3-7-sonnet-20250219", "temperature": 0.2, "top_p": 0.9, "max_output_tokens": 4000},
|
||||
"claude-3-5-sonnet-20241022"
|
||||
],
|
||||
system_prompt="You are a security expert.",
|
||||
temperature=0.2,
|
||||
top_p=0.9,
|
||||
max_output_tokens=4000,
|
||||
tool_preset="SecurityTools",
|
||||
bias_profile="Execution-Focused"
|
||||
)
|
||||
|
||||
assert persona.provider == "anthropic"
|
||||
assert persona.model == "claude-3-7-sonnet-20250219"
|
||||
assert persona.temperature == 0.2
|
||||
assert persona.top_p == 0.9
|
||||
assert persona.max_output_tokens == 4000
|
||||
|
||||
data = persona.to_dict()
|
||||
|
||||
assert data["provider"] == "anthropic"
|
||||
assert data["model"] == "claude-3-7-sonnet-20250219"
|
||||
assert "claude-3-5-sonnet-20241022" in data["preferred_models"]
|
||||
# data should NOT have top-level provider/model anymore, it's in preferred_models
|
||||
assert "provider" not in data
|
||||
assert "model" not in data
|
||||
assert data["preferred_models"][0]["provider"] == "anthropic"
|
||||
assert data["preferred_models"][0]["model"] == "claude-3-7-sonnet-20250219"
|
||||
assert data["preferred_models"][1] == {"model": "claude-3-5-sonnet-20241022"}
|
||||
assert data["system_prompt"] == "You are a security expert."
|
||||
assert data["temperature"] == 0.2
|
||||
assert data["top_p"] == 0.9
|
||||
assert data["max_output_tokens"] == 4000
|
||||
assert data["preferred_models"][0]["temperature"] == 0.2
|
||||
assert data["preferred_models"][0]["top_p"] == 0.9
|
||||
assert data["preferred_models"][0]["max_output_tokens"] == 4000
|
||||
assert data["tool_preset"] == "SecurityTools"
|
||||
assert data["bias_profile"] == "Execution-Focused"
|
||||
|
||||
def test_persona_deserialization():
|
||||
# Old config format (legacy)
|
||||
data = {
|
||||
"provider": "gemini",
|
||||
"model": "gemini-2.5-flash",
|
||||
@@ -45,7 +53,9 @@ def test_persona_deserialization():
|
||||
assert persona.name == "Assistant"
|
||||
assert persona.provider == "gemini"
|
||||
assert persona.model == "gemini-2.5-flash"
|
||||
assert persona.preferred_models == ["gemini-2.5-flash"]
|
||||
# Migration logic should have put legacy fields into preferred_models since it only had a string
|
||||
assert persona.preferred_models[0]["provider"] == "gemini"
|
||||
assert persona.preferred_models[0]["model"] == "gemini-2.5-flash"
|
||||
assert persona.system_prompt == "You are a helpful assistant."
|
||||
assert persona.temperature == 0.5
|
||||
assert persona.top_p == 1.0
|
||||
|
||||
Reference in New Issue
Block a user