feat(bias): implement ToolBiasEngine and integrate into ai_client orchestration loop

This commit is contained in:
2026-03-10 09:53:59 -04:00
parent ddc148ca4e
commit cad04bfbfc
4 changed files with 282 additions and 85 deletions

View File

@@ -31,6 +31,8 @@ from src import project_manager
from src import file_cache
from src import mcp_client
from src import mma_prompts
from src.tool_bias import ToolBiasEngine
from src.models import ToolPreset, BiasProfile, Tool
import anthropic
from src.gemini_cli_adapter import GeminiCliAdapter as GeminiCliAdapter
from google import genai
@@ -85,6 +87,10 @@ _minimax_history_lock: threading.Lock = threading.Lock()
_send_lock: threading.Lock = threading.Lock()
_BIAS_ENGINE = ToolBiasEngine()
_active_tool_preset: Optional[ToolPreset] = None
_active_bias_profile: Optional[BiasProfile] = None
_gemini_cli_adapter: Optional[GeminiCliAdapter] = None
# Injected by gui.py - called when AI wants to run a command.
@@ -139,10 +145,17 @@ def set_custom_system_prompt(prompt: str) -> None:
global _custom_system_prompt
_custom_system_prompt = prompt
def _get_combined_system_prompt() -> str:
def _get_combined_system_prompt(preset: Optional[ToolPreset] = None, bias: Optional[BiasProfile] = None) -> str:
if preset is None: preset = _active_tool_preset
if bias is None: bias = _active_bias_profile
base = _SYSTEM_PROMPT
if _custom_system_prompt.strip():
return f"{_SYSTEM_PROMPT}\n\n[USER SYSTEM PROMPT]\n{_custom_system_prompt}"
return _SYSTEM_PROMPT
base = f"{_SYSTEM_PROMPT}\n\n[USER SYSTEM PROMPT]\n{_custom_system_prompt}"
if preset and bias:
strategy = _BIAS_ENGINE.generate_tooling_strategy(preset, bias)
if strategy:
base += f"\n\n{strategy}"
return base
from collections import deque
@@ -472,12 +485,13 @@ def set_agent_tools(tools: dict[str, bool]) -> None:
def set_tool_preset(preset_name: Optional[str]) -> None:
"""Loads a tool preset and applies it via set_agent_tools."""
global _agent_tools, _CACHED_ANTHROPIC_TOOLS, _tool_approval_modes
global _agent_tools, _CACHED_ANTHROPIC_TOOLS, _tool_approval_modes, _active_tool_preset
_tool_approval_modes = {}
if not preset_name or preset_name == "None":
# Enable all tools if no preset
_agent_tools = {name: True for name in mcp_client.TOOL_NAMES}
_agent_tools[TOOL_NAME] = True
_active_tool_preset = None
else:
try:
from src.tool_presets import ToolPresetManager
@@ -485,32 +499,49 @@ def set_tool_preset(preset_name: Optional[str]) -> None:
presets = manager.load_all()
if preset_name in presets:
preset = presets[preset_name]
_active_tool_preset = preset
new_tools = {name: False for name in mcp_client.TOOL_NAMES}
new_tools[TOOL_NAME] = False
for cat in preset.categories.values():
for tool_entry in cat:
if isinstance(tool_entry, dict) and "name" in tool_entry:
name = tool_entry["name"]
new_tools[name] = True
_tool_approval_modes[name] = tool_entry.get("mode", "ask")
for tool in cat:
name = tool.name
new_tools[name] = True
_tool_approval_modes[name] = tool.approval
_agent_tools = new_tools
except Exception as e:
sys.stderr.write(f"[ERROR] Failed to set tool preset '{preset_name}': {e}\n")
sys.stderr.flush()
_CACHED_ANTHROPIC_TOOLS = None
def set_bias_profile(profile_name: Optional[str]) -> None:
global _active_bias_profile
if not profile_name or profile_name == "None":
_active_bias_profile = None
else:
try:
from src.tool_presets import ToolPresetManager
manager = ToolPresetManager()
profiles = manager.load_all_bias_profiles()
if profile_name in profiles:
_active_bias_profile = profiles[profile_name]
except Exception as e:
sys.stderr.write(f"[ERROR] Failed to set bias profile '{profile_name}': {e}\n")
sys.stderr.flush()
def get_bias_profile() -> Optional[str]:
return _active_bias_profile.name if _active_bias_profile else None
def _build_anthropic_tools() -> list[dict[str, Any]]:
mcp_tools: list[dict[str, Any]] = []
raw_tools: list[dict[str, Any]] = []
for spec in mcp_client.MCP_TOOL_SPECS:
if _agent_tools.get(spec["name"], True):
mcp_tools.append({
"name": spec["name"],
raw_tools.append({
"name": spec["name"],
"description": spec["description"],
"input_schema": spec["parameters"],
})
tools_list = mcp_tools
if _agent_tools.get(TOOL_NAME, True):
powershell_tool: dict[str, Any] = {
raw_tools.append({
"name": TOOL_NAME,
"description": (
"Run a PowerShell script within the project base_dir. "
@@ -528,13 +559,13 @@ def _build_anthropic_tools() -> list[dict[str, Any]]:
}
},
"required": ["script"]
},
"cache_control": {"type": "ephemeral"},
}
tools_list.append(powershell_tool)
elif tools_list:
tools_list[-1]["cache_control"] = {"type": "ephemeral"}
return tools_list
}
})
if _active_tool_preset:
_BIAS_ENGINE.apply_semantic_nudges(raw_tools, _active_tool_preset)
if raw_tools:
raw_tools[-1]["cache_control"] = {"type": "ephemeral"}
return raw_tools
_CACHED_ANTHROPIC_TOOLS: Optional[list[dict[str, Any]]] = None
@@ -545,12 +576,42 @@ def _get_anthropic_tools() -> list[dict[str, Any]]:
return _CACHED_ANTHROPIC_TOOLS
def _gemini_tool_declaration() -> Optional[types.Tool]:
declarations: list[types.FunctionDeclaration] = []
raw_tools: list[dict[str, Any]] = []
for spec in mcp_client.MCP_TOOL_SPECS:
if not _agent_tools.get(spec["name"], True):
continue
if _agent_tools.get(spec["name"], True):
raw_tools.append({
"name": spec["name"],
"description": spec["description"],
"parameters": spec["parameters"]
})
if _agent_tools.get(TOOL_NAME, True):
raw_tools.append({
"name": TOOL_NAME,
"description": (
"Run a PowerShell script within the project base_dir. "
"Use this to create, edit, rename, or delete files and directories. "
"The working directory is set to base_dir automatically. "
"Always prefer targeted edits over full rewrites where possible. "
"stdout and stderr are returned to you as the result."
),
"parameters": {
"type": "object",
"properties": {
"script": {
"type": "string",
"description": "The PowerShell script to execute."
}
},
"required": ["script"]
}
})
if _active_tool_preset:
_BIAS_ENGINE.apply_semantic_nudges(raw_tools, _active_tool_preset)
declarations: list[types.FunctionDeclaration] = []
for tool_def in raw_tools:
props = {}
for pname, pdef in spec["parameters"].get("properties", {}).items():
params = tool_def.get("parameters", {})
for pname, pdef in params.get("properties", {}).items():
ptype_str = pdef.get("type", "string").upper()
ptype = getattr(types.Type, ptype_str, types.Type.STRING)
props[pname] = types.Schema(
@@ -558,34 +619,14 @@ def _gemini_tool_declaration() -> Optional[types.Tool]:
description=pdef.get("description", ""),
)
declarations.append(types.FunctionDeclaration(
name=spec["name"],
description=spec["description"],
parameters=types.Schema(
type=types.Type.OBJECT,
properties=props,
required=spec["parameters"].get("required", []),
),
))
if _agent_tools.get(TOOL_NAME, True):
declarations.append(types.FunctionDeclaration(
name=TOOL_NAME,
description=(
"Run a PowerShell script within the project base_dir. "
"Use this to create, edit, rename, or delete files and directories. "
"The working directory is set to base_dir automatically. "
"stdout and stderr are returned to you as the result."
),
parameters=types.Schema(
type=types.Type.OBJECT,
properties={
"script": types.Schema(
type=types.Type.STRING,
description="The PowerShell script to execute."
)
},
required=["script"]
),
))
name=tool_def["name"],
description=tool_def["description"],
parameters=types.Schema(
type=types.Type.OBJECT,
properties=props,
required=params.get("required", []),
),
))
return types.Tool(function_declarations=declarations) if declarations else None
async def _execute_tool_calls_concurrently(
@@ -772,43 +813,47 @@ def _build_file_diff_text(changed_items: list[dict[str, Any]]) -> str:
return "\n\n---\n\n".join(parts)
def _build_deepseek_tools() -> list[dict[str, Any]]:
mcp_tools: list[dict[str, Any]] = []
raw_tools: list[dict[str, Any]] = []
for spec in mcp_client.MCP_TOOL_SPECS:
if _agent_tools.get(spec["name"], True):
mcp_tools.append({
"type": "function",
"function": {
"name": spec["name"],
"description": spec["description"],
"parameters": spec["parameters"],
}
})
tools_list = mcp_tools
raw_tools.append({
"name": spec["name"],
"description": spec["description"],
"parameters": spec["parameters"]
})
if _agent_tools.get(TOOL_NAME, True):
powershell_tool: dict[str, Any] = {
raw_tools.append({
"name": TOOL_NAME,
"description": (
"Run a PowerShell script within the project base_dir. "
"Use this to create, edit, rename, or delete files and directories. "
"The working directory is set to base_dir automatically. "
"Always prefer targeted edits over full rewrites where possible. "
"stdout and stderr are returned to you as the result."
),
"parameters": {
"type": "object",
"properties": {
"script": {
"type": "string",
"description": "The PowerShell script to execute."
}
},
"required": ["script"]
}
})
if _active_tool_preset:
_BIAS_ENGINE.apply_semantic_nudges(raw_tools, _active_tool_preset)
tools_list: list[dict[str, Any]] = []
for tool_def in raw_tools:
tools_list.append({
"type": "function",
"function": {
"name": TOOL_NAME,
"description": (
"Run a PowerShell script within the project base_dir. "
"Use this to create, edit, rename, or delete files and directories. "
"The working directory is set to base_dir automatically. "
"Always prefer targeted edits over full rewrites where possible. "
"stdout and stderr are returned to you as the result."
),
"parameters": {
"type": "object",
"properties": {
"script": {
"type": "string",
"description": "The PowerShell script to execute."
}
},
"required": ["script"]
}
"name": tool_def["name"],
"description": tool_def["description"],
"parameters": tool_def["parameters"],
}
}
tools_list.append(powershell_tool)
})
return tools_list
_CACHED_DEEPSEEK_TOOLS: Optional[list[dict[str, Any]]] = None
@@ -2154,7 +2199,7 @@ def send(
) -> str:
monitor = performance_monitor.get_monitor()
if monitor.enabled: monitor.start_component("ai_client.send")
_append_comms("OUT", "request", {"message": user_message, "system": _get_combined_system_prompt()})
_append_comms("OUT", "request", {"message": user_message, "system": _get_combined_system_prompt(_active_tool_preset, _active_bias_profile)})
with _send_lock:
if _provider == "gemini":
res = _send_gemini(

55
src/tool_bias.py Normal file
View File

@@ -0,0 +1,55 @@
from typing import List, Dict, Any, Optional
from src.models import Tool, ToolPreset, BiasProfile
class ToolBiasEngine:
def apply_semantic_nudges(self, tool_definitions: List[Dict[str, Any]], preset: ToolPreset) -> List[Dict[str, Any]]:
weight_map = {
5: "[HIGH PRIORITY] ",
4: "[PREFERRED] ",
2: "[NOT RECOMMENDED] ",
1: "[LOW PRIORITY] "
}
preset_tools: Dict[str, Tool] = {}
for cat_tools in preset.categories.values():
for t in cat_tools:
if isinstance(t, Tool):
preset_tools[t.name] = t
for defn in tool_definitions:
name = defn.get("name")
if name in preset_tools:
tool = preset_tools[name]
prefix = weight_map.get(tool.weight, "")
if prefix:
defn["description"] = prefix + defn.get("description", "")
if tool.parameter_bias:
params = defn.get("parameters") or defn.get("input_schema")
if params and "properties" in params:
props = params["properties"]
for p_name, bias in tool.parameter_bias.items():
if p_name in props:
p_desc = props[p_name].get("description", "")
props[p_name]["description"] = f"[{bias}] {p_desc}".strip()
return tool_definitions
def generate_tooling_strategy(self, preset: ToolPreset, global_bias: BiasProfile) -> str:
lines = ["### Tooling Strategy"]
preferred = []
for cat_tools in preset.categories.values():
for t in cat_tools:
if isinstance(t, Tool) and t.weight >= 4:
preferred.append(t.name)
if preferred:
lines.append(f"Preferred tools: {', '.join(preferred)}.")
if global_bias.category_multipliers:
lines.append("Category focus multipliers:")
for cat, mult in global_bias.category_multipliers.items():
lines.append(f"- {cat}: {mult}x")
return "\n\n".join(lines)