fix(gui): Resolve AppController state regressions
This commit is contained in:
+16
-9
@@ -233,6 +233,8 @@ class AppController:
|
||||
self.ui_auto_add_history: bool = False
|
||||
self.ui_active_tool_preset: str | None = None
|
||||
self.ui_global_system_prompt: str = ""
|
||||
self.ui_global_tool_instructions: str = ""
|
||||
self.ui_project_context_marker: str = ""
|
||||
self.ui_agent_tools: Dict[str, bool] = {}
|
||||
self.available_models: List[str] = []
|
||||
self.all_available_models: Dict[str, List[str]] = {} # provider -> list of models
|
||||
@@ -913,6 +915,8 @@ class AppController:
|
||||
self.ui_word_wrap = proj_meta.get("word_wrap", True)
|
||||
self.ui_auto_add_history = disc_sec.get("auto_add", False)
|
||||
self.ui_global_system_prompt = self.config.get("ai", {}).get("system_prompt", "")
|
||||
self.ui_global_tool_instructions = self.config.get("ai", {}).get("tool_instructions", "")
|
||||
self.ui_project_context_marker = proj_meta.get("context_marker", "")
|
||||
|
||||
self.preset_manager = presets.PresetManager(Path(self.active_project_path).parent if self.active_project_path else None)
|
||||
self.presets = self.preset_manager.load_all()
|
||||
@@ -1404,9 +1408,10 @@ class AppController:
|
||||
self.ai_response = ""
|
||||
csp = filter(bool, [self.ui_global_system_prompt.strip(), self.ui_project_system_prompt.strip()])
|
||||
ai_client.set_custom_system_prompt("\n\n".join(csp))
|
||||
ai_client.set_global_tool_instructions(self.ui_global_tool_instructions)
|
||||
ai_client.set_project_context_marker(self.ui_project_context_marker)
|
||||
ai_client.set_model_params(self.temperature, self.max_tokens, self.history_trunc_limit, self.top_p)
|
||||
ai_client.set_agent_tools(self.ui_agent_tools)
|
||||
# Force update adapter path right before send to bypass potential duplication issues
|
||||
ai_client.set_agent_tools(self.ui_agent_tools) # Force update adapter path right before send to bypass potential duplication issues
|
||||
self._update_gcli_adapter(self.ui_gemini_cli_path)
|
||||
sys.stderr.write(f"[DEBUG] Calling ai_client.send with provider={ai_client.get_provider()}, model={self.current_model}, gcli_path={self.ui_gemini_cli_path}\n")
|
||||
sys.stderr.flush()
|
||||
@@ -1465,11 +1470,11 @@ class AppController:
|
||||
|
||||
if kind == "response" and "usage" in payload:
|
||||
u = payload["usage"]
|
||||
inp = u.get("input_tokens", u.get("prompt_tokens", 0))
|
||||
out = u.get("output_tokens", u.get("completion_tokens", 0))
|
||||
cache_read = u.get("cache_read_input_tokens", 0)
|
||||
cache_create = u.get("cache_creation_input_tokens", 0)
|
||||
total = u.get("total_tokens", 0)
|
||||
inp = u.get("input_tokens") or u.get("prompt_tokens") or 0
|
||||
out = u.get("output_tokens") or u.get("completion_tokens") or 0
|
||||
cache_read = u.get("cache_read_input_tokens") or 0
|
||||
cache_create = u.get("cache_creation_input_tokens") or 0
|
||||
total = u.get("total_tokens") or 0
|
||||
|
||||
# Store normalized usage back in payload for history rendering
|
||||
u["input_tokens"] = inp
|
||||
@@ -1481,8 +1486,8 @@ class AppController:
|
||||
self.session_usage["cache_read_input_tokens"] += cache_read
|
||||
self.session_usage["cache_creation_input_tokens"] += cache_create
|
||||
self.session_usage["total_tokens"] += total
|
||||
input_t = u.get("input_tokens", 0)
|
||||
output_t = u.get("output_tokens", 0)
|
||||
input_t = u.get("input_tokens") or 0
|
||||
output_t = u.get("output_tokens") or 0
|
||||
model = payload.get("model", "unknown")
|
||||
self._token_history.append({
|
||||
"time": time.time(),
|
||||
@@ -1823,6 +1828,8 @@ class AppController:
|
||||
base_dir = self.active_project_root
|
||||
csp = filter(bool, [self.ui_global_system_prompt.strip(), self.ui_project_system_prompt.strip()])
|
||||
ai_client.set_custom_system_prompt("\n\n".join(csp))
|
||||
ai_client.set_global_tool_instructions(self.ui_global_tool_instructions)
|
||||
ai_client.set_project_context_marker(self.ui_project_context_marker)
|
||||
temp = req.temperature if req.temperature is not None else self.temperature
|
||||
top_p = req.top_p if req.top_p is not None else self.top_p
|
||||
tokens = req.max_tokens if req.max_tokens is not None else self.max_tokens
|
||||
|
||||
Reference in New Issue
Block a user