{ "C:\\projects\\manual_slop\\src\\ai_client.py": { "hash": "db4b3aad82599499d7796860757e229d2d412c5ccb3e821ddaee68ca0d3ad5d3", "summary": "This Python module serves as a unified client interface for multiple Large Language Model (LLM) providers, abstracting away provider-specific differences in tool handling, history management, and caching. It includes specialized logic for Anthropic to manage token limits and for Gemini to inject initial context efficiently.\n\n* **Multi-Provider Abstraction:** Provides a single interface for interacting with LLMs from Anthropic, Gemini, DeepSeek, and Minimax.\n* **Provider-Specific Optimization:** Implements tailored strategies for managing token limits (Anthropic) and context injection (Gemini).\n* **Tooling and Bias Management:** Supports setting agent tools, tool presets, and bias profiles to influence LLM behavior.\n* **Communication Logging:** Tracks and logs communication events with LLM providers.\n* **Configuration and State Management:** Manages global generation parameters, credentials, and session state.\n\n**Outline:**\n**Python** \u2014 2501 lines\nimports: __future__, anthropic, asyncio, collections, datetime, difflib, google, hashlib, json, openai, os, pathlib, requests, src, sys, threading, time, tomllib, typing\nconstants: _GEMINI_CACHE_TTL, _BIAS_ENGINE, MAX_TOOL_ROUNDS, _MAX_TOOL_OUTPUT_BYTES, _ANTHROPIC_CHUNK_SIZE, _SYSTEM_PROMPT, COMMS_CLAMP_CHARS, TOOL_NAME, _CACHED_ANTHROPIC_TOOLS, _DIFF_LINE_THRESHOLD, _CACHED_DEEPSEEK_TOOLS, _CHARS_PER_TOKEN, _ANTHROPIC_MAX_PROMPT_TOKENS, _GEMINI_MAX_INPUT_TOKENS, _FILE_REFRESH_MARKER\nclass ProviderError: __init__, ui_message\nfunctions: set_model_params, get_history_trunc_limit, set_history_trunc_limit, get_current_tier, set_current_tier, set_custom_system_prompt, set_base_system_prompt, set_use_default_base_prompt, set_project_context_marker, _get_context_marker, _get_combined_system_prompt, get_combined_system_prompt, _append_comms, get_comms_log, clear_comms_log, get_credentials_path, _load_credentials, _classify_anthropic_error, _classify_gemini_error, _classify_deepseek_error, _classify_minimax_error, set_provider, get_provider, cleanup, reset_session, get_gemini_cache_stats, list_models, _list_gemini_cli_models, _list_gemini_models, _list_anthropic_models, _list_deepseek_models, _list_minimax_models, set_agent_tools, set_tool_preset, set_bias_profile, get_bias_profile, _build_anthropic_tools, _get_anthropic_tools, _gemini_tool_declaration, _execute_tool_calls_concurrently, _execute_single_tool_call_async, _run_script, _truncate_tool_output, _reread_file_items, _build_file_context_text, _build_file_diff_text, _build_deepseek_tools, _get_deepseek_tools, _content_block_to_dict, _ensure_gemini_client, _get_gemini_history_list, _send_gemini, _send_gemini_cli, _estimate_message_tokens, _invalidate_token_estimate, _estimate_prompt_tokens, _strip_stale_file_refreshes, _trim_anthropic_history, _ensure_anthropic_client, _chunk_text, _build_chunked_context_blocks, _strip_cache_controls, _add_history_cache_breakpoint, _repair_anthropic_history, _send_anthropic, _ensure_deepseek_client, _ensure_minimax_client, _repair_deepseek_history, _send_deepseek, _send_minimax, run_tier4_analysis, run_tier4_patch_callback, run_tier4_patch_generation, get_token_stats, send, _add_bleed_derived, get_history_bleed_stats, run_subagent_summarization" } }