add history truncation
This commit is contained in:
14
gui.py
14
gui.py
@@ -3,10 +3,9 @@
|
||||
Note(Gemini):
|
||||
The main DearPyGui interface orchestrator.
|
||||
This is not a simple UI wrapper; it's a complex state machine that:
|
||||
1. Handles multi-viewport docking (allowing panels to act as OS windows).
|
||||
2. Manages background daemon threads for AI requests so the UI doesn't block.
|
||||
3. Implements lock-protected comms queues for safe main-thread rendering.
|
||||
4. Pauses AI execution to prompt the human for destructive PowerShell script approval.
|
||||
1. Manages background daemon threads for AI requests so the UI doesn't block.
|
||||
2. Implements lock-protected comms queues for safe main-thread rendering.
|
||||
3. Pauses AI execution to prompt the human for destructive PowerShell script approval.
|
||||
"""
|
||||
# gui.py
|
||||
import dearpygui.dearpygui as dpg
|
||||
@@ -377,6 +376,7 @@ class App:
|
||||
self.current_model: str = ai_cfg.get("model", "gemini-2.5-flash")
|
||||
self.temperature: float = ai_cfg.get("temperature", 0.0)
|
||||
self.max_tokens: int = ai_cfg.get("max_tokens", 8192)
|
||||
self.history_trunc_limit: int = ai_cfg.get("history_trunc_limit", 8000)
|
||||
self.available_models: list[str] = []
|
||||
|
||||
# ---- project management ----
|
||||
@@ -845,6 +845,7 @@ class App:
|
||||
"model": self.current_model,
|
||||
"temperature": dpg.get_value("ai_temperature") if dpg.does_item_exist("ai_temperature") else self.temperature,
|
||||
"max_tokens": dpg.get_value("ai_max_tokens") if dpg.does_item_exist("ai_max_tokens") else self.max_tokens,
|
||||
"history_trunc_limit": dpg.get_value("ai_history_trunc") if dpg.does_item_exist("ai_history_trunc") else self.history_trunc_limit,
|
||||
}
|
||||
if dpg.does_item_exist("global_system_prompt"):
|
||||
self.config["ai"]["system_prompt"] = dpg.get_value("global_system_prompt")
|
||||
@@ -1153,7 +1154,8 @@ class App:
|
||||
ai_client.set_custom_system_prompt("\n\n".join(combined_sp))
|
||||
temp = dpg.get_value("ai_temperature") if dpg.does_item_exist("ai_temperature") else 0.0
|
||||
max_tok = dpg.get_value("ai_max_tokens") if dpg.does_item_exist("ai_max_tokens") else 8192
|
||||
ai_client.set_model_params(temp, max_tok)
|
||||
trunc = dpg.get_value("ai_history_trunc") if dpg.does_item_exist("ai_history_trunc") else 8000
|
||||
ai_client.set_model_params(temp, max_tok, trunc)
|
||||
|
||||
def do_send():
|
||||
auto_add = dpg.get_value("auto_add_history") if dpg.does_item_exist("auto_add_history") else False
|
||||
@@ -1785,6 +1787,7 @@ class App:
|
||||
dpg.add_text("Parameters")
|
||||
dpg.add_input_float(tag="ai_temperature", label="Temperature", default_value=self.temperature, min_value=0.0, max_value=2.0)
|
||||
dpg.add_input_int(tag="ai_max_tokens", label="Max Tokens (Output)", default_value=self.max_tokens, step=1024)
|
||||
dpg.add_input_int(tag="ai_history_trunc", label="History Truncation Limit", default_value=self.history_trunc_limit, step=1024)
|
||||
|
||||
# ---- Message panel ----
|
||||
with dpg.window(
|
||||
@@ -2106,6 +2109,7 @@ class App:
|
||||
|
||||
dpg.save_init_file("dpg_layout.ini")
|
||||
session_logger.close_session()
|
||||
ai_client.cleanup() # Destroy active API caches to stop billing
|
||||
dpg.destroy_context()
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user