feat(ui): AI Settings Overhaul - added dual sliders for model params including top_p

This commit is contained in:
2026-03-11 20:22:06 -04:00
parent 55475b80e7
commit 09902701b4
3 changed files with 55 additions and 8 deletions

View File

@@ -42,6 +42,7 @@ from src.events import EventEmitter
_provider: str = "gemini"
_model: str = "gemini-2.5-flash-lite"
_temperature: float = 0.0
_top_p: float = 1.0
_max_tokens: int = 8192
_history_trunc_limit: int = 8000
@@ -49,11 +50,12 @@ _history_trunc_limit: int = 8000
# Global event emitter for API lifecycle events
events: EventEmitter = EventEmitter()
def set_model_params(temp: float, max_tok: int, trunc_limit: int = 8000) -> None:
global _temperature, _max_tokens, _history_trunc_limit
def set_model_params(temp: float, max_tok: int, trunc_limit: int = 8000, top_p: float = 1.0) -> None:
global _temperature, _max_tokens, _history_trunc_limit, _top_p
_temperature = temp
_max_tokens = max_tok
_history_trunc_limit = trunc_limit
_top_p = top_p
def get_history_trunc_limit() -> int:
return _history_trunc_limit
@@ -939,6 +941,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
system_instruction=sys_instr,
tools=cast(Any, tools_decl),
temperature=_temperature,
top_p=_top_p,
max_output_tokens=_max_tokens,
safety_settings=[types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold=types.HarmBlockThreshold.BLOCK_ONLY_HIGH)]
)
@@ -1010,6 +1013,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
config = types.GenerateContentConfig(
tools=[td] if td else [],
temperature=_temperature,
top_p=_top_p,
max_output_tokens=_max_tokens,
)
@@ -1455,6 +1459,7 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
model=_model,
max_tokens=_max_tokens,
temperature=_temperature,
top_p=_top_p,
system=cast(Iterable[anthropic.types.TextBlockParam], system_blocks),
tools=cast(Iterable[anthropic.types.ToolParam], _get_anthropic_tools()),
messages=cast(Iterable[anthropic.types.MessageParam], _strip_private_keys(_anthropic_history)),
@@ -1468,6 +1473,7 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
model=_model,
max_tokens=_max_tokens,
temperature=_temperature,
top_p=_top_p,
system=cast(Iterable[anthropic.types.TextBlockParam], system_blocks),
tools=cast(Iterable[anthropic.types.ToolParam], _get_anthropic_tools()),
messages=cast(Iterable[anthropic.types.MessageParam], _strip_private_keys(_anthropic_history)),
@@ -1696,6 +1702,7 @@ def _send_deepseek(md_content: str, user_message: str, base_dir: str,
if not is_reasoner:
request_payload["temperature"] = _temperature
request_payload["top_p"] = _top_p
# DeepSeek max_tokens is for the output, clamp to 8192 which is their hard limit for V3/Chat
request_payload["max_tokens"] = min(_max_tokens, 8192)
tools = _get_deepseek_tools()
@@ -1927,6 +1934,7 @@ def _send_minimax(md_content: str, user_message: str, base_dir: str,
request_payload["stream_options"] = {"include_usage": True}
request_payload["temperature"] = 1.0
request_payload["top_p"] = _top_p
request_payload["max_tokens"] = min(_max_tokens, 8192)
tools = _get_deepseek_tools()