refactor(ai): Audit and cleanup ai_client.py and gemini_cli_adapter.py
This commit is contained in:
@@ -51,6 +51,7 @@ _history_trunc_limit: int = 8000
|
||||
events: EventEmitter = EventEmitter()
|
||||
|
||||
def set_model_params(temp: float, max_tok: int, trunc_limit: int = 8000, top_p: float = 1.0) -> None:
|
||||
"""Sets global generation parameters like temperature and max tokens."""
|
||||
global _temperature, _max_tokens, _history_trunc_limit, _top_p
|
||||
_temperature = temp
|
||||
_max_tokens = max_tok
|
||||
@@ -146,6 +147,7 @@ _global_tool_instructions: str = ""
|
||||
_project_context_marker: str = ""
|
||||
|
||||
def set_custom_system_prompt(prompt: str) -> None:
|
||||
"""Sets a custom system prompt to be combined with the default instructions."""
|
||||
global _custom_system_prompt
|
||||
_custom_system_prompt = prompt
|
||||
|
||||
@@ -355,6 +357,7 @@ def _classify_minimax_error(exc: Exception) -> ProviderError:
|
||||
return ProviderError("unknown", "minimax", Exception(body))
|
||||
|
||||
def set_provider(provider: str, model: str) -> None:
|
||||
"""Updates the active LLM provider and model name."""
|
||||
global _provider, _model
|
||||
_provider = provider
|
||||
if provider == "gemini_cli":
|
||||
@@ -373,9 +376,11 @@ def set_provider(provider: str, model: str) -> None:
|
||||
_model = model
|
||||
|
||||
def get_provider() -> str:
|
||||
"""Returns the current active provider name."""
|
||||
return _provider
|
||||
|
||||
def cleanup() -> None:
|
||||
"""Performs cleanup operations like deleting server-side Gemini caches."""
|
||||
global _gemini_client, _gemini_cache, _gemini_cached_file_paths
|
||||
if _gemini_client and _gemini_cache:
|
||||
try:
|
||||
@@ -385,6 +390,7 @@ def cleanup() -> None:
|
||||
_gemini_cached_file_paths = []
|
||||
|
||||
def reset_session() -> None:
|
||||
"""Clears conversation history and resets provider-specific session state."""
|
||||
global _gemini_client, _gemini_chat, _gemini_cache
|
||||
global _gemini_cache_md_hash, _gemini_cache_created_at, _gemini_cached_file_paths
|
||||
global _anthropic_client, _anthropic_history
|
||||
@@ -494,6 +500,7 @@ TOOL_NAME: str = "run_powershell"
|
||||
_agent_tools: dict[str, bool] = {}
|
||||
|
||||
def set_agent_tools(tools: dict[str, bool]) -> None:
|
||||
"""Configures which tools are enabled for the AI agent."""
|
||||
global _agent_tools, _CACHED_ANTHROPIC_TOOLS
|
||||
_agent_tools = tools
|
||||
_CACHED_ANTHROPIC_TOOLS = None
|
||||
@@ -529,6 +536,7 @@ def set_tool_preset(preset_name: Optional[str]) -> None:
|
||||
_CACHED_ANTHROPIC_TOOLS = None
|
||||
|
||||
def set_bias_profile(profile_name: Optional[str]) -> None:
|
||||
"""Sets the active tool bias profile for tuning model behavior."""
|
||||
global _active_bias_profile
|
||||
if not profile_name or profile_name == "None":
|
||||
_active_bias_profile = None
|
||||
@@ -544,6 +552,7 @@ def set_bias_profile(profile_name: Optional[str]) -> None:
|
||||
sys.stderr.flush()
|
||||
|
||||
def get_bias_profile() -> Optional[str]:
|
||||
"""Returns the name of the currently active bias profile."""
|
||||
return _active_bias_profile.name if _active_bias_profile else None
|
||||
|
||||
def _build_anthropic_tools() -> list[dict[str, Any]]:
|
||||
|
||||
Reference in New Issue
Block a user