adding better gemini support

This commit is contained in:
2026-02-21 21:53:57 -05:00
parent 2182bfbd4a
commit 9a23941e59
3 changed files with 50 additions and 14 deletions

View File

@@ -33,7 +33,7 @@ MAX_TOOL_ROUNDS = 5
# Kept well under the ~200k token API limit.
_ANTHROPIC_CHUNK_SIZE = 180_000
_ANTHROPIC_SYSTEM = (
_SYSTEM_PROMPT = (
"You are a helpful coding assistant with access to a PowerShell tool and MCP file tools (read_file, list_directory, search_files, get_file_summary). "
"When asked to create or edit files, prefer targeted edits over full rewrites. "
"Always explain what you are doing before invoking the tool.\n\n"
@@ -403,6 +403,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
_gemini_chat = _gemini_client.chats.create(
model=_model,
config=types.GenerateContentConfig(
system_instruction=_SYSTEM_PROMPT,
tools=[_gemini_tool_declaration()]
)
)
@@ -429,16 +430,34 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
if hasattr(part, "function_call") and part.function_call is not None
]
_append_comms("IN", "response", {
"round": round_idx,
"text": "\n".join(text_parts_raw),
"tool_calls": [{"name": fc.name, "args": dict(fc.args)} for fc in tool_calls],
usage_dict = {}
if hasattr(response, "usage_metadata") and response.usage_metadata:
meta = response.usage_metadata
if hasattr(meta, "prompt_token_count") and meta.prompt_token_count is not None:
usage_dict["input_tokens"] = meta.prompt_token_count
if hasattr(meta, "candidates_token_count") and meta.candidates_token_count is not None:
usage_dict["output_tokens"] = meta.candidates_token_count
if hasattr(meta, "cached_content_token_count") and meta.cached_content_token_count:
usage_dict["cache_read_input_tokens"] = meta.cached_content_token_count
stop_reason = ""
if response.candidates and hasattr(response.candidates[0], "finish_reason"):
fr = response.candidates[0].finish_reason
stop_reason = str(fr.name) if hasattr(fr, "name") else str(fr)
_append_comms("IN", "response", {
"round": round_idx,
"stop_reason": stop_reason,
"text": "\n".join(text_parts_raw),
"tool_calls": [{"name": fc.name, "args": dict(fc.args)} for fc in tool_calls],
"usage": usage_dict,
})
if not tool_calls:
break
function_responses = []
sent_results_log = []
for fc in tool_calls:
fc_name = fc.name
fc_args = dict(fc.args)
@@ -452,6 +471,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
response={"output": output}
)
)
sent_results_log.append({"tool_use_id": fc_name, "content": output})
elif fc_name == TOOL_NAME:
script = fc_args.get("script", "")
_append_comms("OUT", "tool_call", {
@@ -469,13 +489,29 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
response={"output": output}
)
)
sent_results_log.append({"tool_use_id": TOOL_NAME, "content": output})
if not function_responses:
break
# Refresh file context after tool calls
# Refresh file context after tool calls and inject as a text part
if file_items:
file_items = _reread_file_items(file_items)
refreshed_ctx = _build_file_context_text(file_items)
if refreshed_ctx:
refreshed_text = (
"[FILES UPDATED — current contents below. "
"Do NOT re-read these files with PowerShell.]\n\n"
+ refreshed_ctx
)
if hasattr(types.Part, "from_text"):
function_responses.append(types.Part.from_text(text=refreshed_text))
else:
function_responses.append(types.Part(text=refreshed_text))
_append_comms("OUT", "tool_result_send", {
"results": sent_results_log
})
response = _gemini_chat.send_message(function_responses)
@@ -599,7 +635,7 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
system=[
{
"type": "text",
"text": _ANTHROPIC_SYSTEM,
"text": _SYSTEM_PROMPT,
"cache_control": {"type": "ephemeral"},
}
],