adding better gemini support
This commit is contained in:
42
ai_client.py
42
ai_client.py
@@ -33,7 +33,7 @@ MAX_TOOL_ROUNDS = 5
|
|||||||
# Kept well under the ~200k token API limit.
|
# Kept well under the ~200k token API limit.
|
||||||
_ANTHROPIC_CHUNK_SIZE = 180_000
|
_ANTHROPIC_CHUNK_SIZE = 180_000
|
||||||
|
|
||||||
_ANTHROPIC_SYSTEM = (
|
_SYSTEM_PROMPT = (
|
||||||
"You are a helpful coding assistant with access to a PowerShell tool and MCP file tools (read_file, list_directory, search_files, get_file_summary). "
|
"You are a helpful coding assistant with access to a PowerShell tool and MCP file tools (read_file, list_directory, search_files, get_file_summary). "
|
||||||
"When asked to create or edit files, prefer targeted edits over full rewrites. "
|
"When asked to create or edit files, prefer targeted edits over full rewrites. "
|
||||||
"Always explain what you are doing before invoking the tool.\n\n"
|
"Always explain what you are doing before invoking the tool.\n\n"
|
||||||
@@ -403,6 +403,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
|
|||||||
_gemini_chat = _gemini_client.chats.create(
|
_gemini_chat = _gemini_client.chats.create(
|
||||||
model=_model,
|
model=_model,
|
||||||
config=types.GenerateContentConfig(
|
config=types.GenerateContentConfig(
|
||||||
|
system_instruction=_SYSTEM_PROMPT,
|
||||||
tools=[_gemini_tool_declaration()]
|
tools=[_gemini_tool_declaration()]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -429,16 +430,34 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
|
|||||||
if hasattr(part, "function_call") and part.function_call is not None
|
if hasattr(part, "function_call") and part.function_call is not None
|
||||||
]
|
]
|
||||||
|
|
||||||
|
usage_dict = {}
|
||||||
|
if hasattr(response, "usage_metadata") and response.usage_metadata:
|
||||||
|
meta = response.usage_metadata
|
||||||
|
if hasattr(meta, "prompt_token_count") and meta.prompt_token_count is not None:
|
||||||
|
usage_dict["input_tokens"] = meta.prompt_token_count
|
||||||
|
if hasattr(meta, "candidates_token_count") and meta.candidates_token_count is not None:
|
||||||
|
usage_dict["output_tokens"] = meta.candidates_token_count
|
||||||
|
if hasattr(meta, "cached_content_token_count") and meta.cached_content_token_count:
|
||||||
|
usage_dict["cache_read_input_tokens"] = meta.cached_content_token_count
|
||||||
|
|
||||||
|
stop_reason = ""
|
||||||
|
if response.candidates and hasattr(response.candidates[0], "finish_reason"):
|
||||||
|
fr = response.candidates[0].finish_reason
|
||||||
|
stop_reason = str(fr.name) if hasattr(fr, "name") else str(fr)
|
||||||
|
|
||||||
_append_comms("IN", "response", {
|
_append_comms("IN", "response", {
|
||||||
"round": round_idx,
|
"round": round_idx,
|
||||||
|
"stop_reason": stop_reason,
|
||||||
"text": "\n".join(text_parts_raw),
|
"text": "\n".join(text_parts_raw),
|
||||||
"tool_calls": [{"name": fc.name, "args": dict(fc.args)} for fc in tool_calls],
|
"tool_calls": [{"name": fc.name, "args": dict(fc.args)} for fc in tool_calls],
|
||||||
|
"usage": usage_dict,
|
||||||
})
|
})
|
||||||
|
|
||||||
if not tool_calls:
|
if not tool_calls:
|
||||||
break
|
break
|
||||||
|
|
||||||
function_responses = []
|
function_responses = []
|
||||||
|
sent_results_log = []
|
||||||
for fc in tool_calls:
|
for fc in tool_calls:
|
||||||
fc_name = fc.name
|
fc_name = fc.name
|
||||||
fc_args = dict(fc.args)
|
fc_args = dict(fc.args)
|
||||||
@@ -452,6 +471,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
|
|||||||
response={"output": output}
|
response={"output": output}
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
sent_results_log.append({"tool_use_id": fc_name, "content": output})
|
||||||
elif fc_name == TOOL_NAME:
|
elif fc_name == TOOL_NAME:
|
||||||
script = fc_args.get("script", "")
|
script = fc_args.get("script", "")
|
||||||
_append_comms("OUT", "tool_call", {
|
_append_comms("OUT", "tool_call", {
|
||||||
@@ -469,13 +489,29 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
|
|||||||
response={"output": output}
|
response={"output": output}
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
sent_results_log.append({"tool_use_id": TOOL_NAME, "content": output})
|
||||||
|
|
||||||
if not function_responses:
|
if not function_responses:
|
||||||
break
|
break
|
||||||
|
|
||||||
# Refresh file context after tool calls
|
# Refresh file context after tool calls and inject as a text part
|
||||||
if file_items:
|
if file_items:
|
||||||
file_items = _reread_file_items(file_items)
|
file_items = _reread_file_items(file_items)
|
||||||
|
refreshed_ctx = _build_file_context_text(file_items)
|
||||||
|
if refreshed_ctx:
|
||||||
|
refreshed_text = (
|
||||||
|
"[FILES UPDATED — current contents below. "
|
||||||
|
"Do NOT re-read these files with PowerShell.]\n\n"
|
||||||
|
+ refreshed_ctx
|
||||||
|
)
|
||||||
|
if hasattr(types.Part, "from_text"):
|
||||||
|
function_responses.append(types.Part.from_text(text=refreshed_text))
|
||||||
|
else:
|
||||||
|
function_responses.append(types.Part(text=refreshed_text))
|
||||||
|
|
||||||
|
_append_comms("OUT", "tool_result_send", {
|
||||||
|
"results": sent_results_log
|
||||||
|
})
|
||||||
|
|
||||||
response = _gemini_chat.send_message(function_responses)
|
response = _gemini_chat.send_message(function_responses)
|
||||||
|
|
||||||
@@ -599,7 +635,7 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
|
|||||||
system=[
|
system=[
|
||||||
{
|
{
|
||||||
"type": "text",
|
"type": "text",
|
||||||
"text": _ANTHROPIC_SYSTEM,
|
"text": _SYSTEM_PROMPT,
|
||||||
"cache_control": {"type": "ephemeral"},
|
"cache_control": {"type": "ephemeral"},
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[ai]
|
[ai]
|
||||||
provider = "gemini"
|
provider = "gemini"
|
||||||
model = "gemini-3.1-pro-preview"
|
model = "gemini-3-flash-preview"
|
||||||
|
|
||||||
[theme]
|
[theme]
|
||||||
palette = "10x Dark"
|
palette = "10x Dark"
|
||||||
|
|||||||
@@ -28,10 +28,7 @@ paths = [
|
|||||||
|
|
||||||
[screenshots]
|
[screenshots]
|
||||||
base_dir = "C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02"
|
base_dir = "C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02"
|
||||||
paths = [
|
paths = []
|
||||||
"C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02/2026-02-21_21-34-14.png",
|
|
||||||
"C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02/Fork_2026-02-21_21-34-27.png",
|
|
||||||
]
|
|
||||||
|
|
||||||
[discussion]
|
[discussion]
|
||||||
roles = [
|
roles = [
|
||||||
@@ -87,5 +84,8 @@ history = [
|
|||||||
|
|
||||||
[discussion.discussions."Support for system prompt"]
|
[discussion.discussions."Support for system prompt"]
|
||||||
git_commit = "f258fc5765d647eb821716bce3d2000fb2ba2857"
|
git_commit = "f258fc5765d647eb821716bce3d2000fb2ba2857"
|
||||||
last_updated = "2026-02-21T21:49:59"
|
last_updated = "2026-02-21T21:53:43"
|
||||||
history = []
|
history = [
|
||||||
|
"@2026-02-21T21:52:16\nUser:\ngemini testing 123 you get this message, first time using you proper in this custom frontend.",
|
||||||
|
"@2026-02-21T21:52:17\nAI:\nLoud and clear! I've received your message through your **Manual Slop** frontend. It looks like a very capable environment—I can see you've implemented:\n\n1. **Multi-provider support** (Anthropic and Gemini).\n2. **Tool use/MCP-like capabilities** (file reading, directory listing, searching, etc.).\n3. **Project & Discussion management** via TOML files.\n4. **Rich GUI** using `dearpygui`.\n\nSince this is our first \"proper\" session in this interface, I'm ready to help you develop, debug, or refine the project. I have access to your local files through the tools you've provided, so I can see the codebase and even run PowerShell scripts if needed.\n\nIs there a specific feature you'd like to work on next, or any bugs in the Gemini integration you've noticed during your \"123\" test?",
|
||||||
|
]
|
||||||
|
|||||||
Reference in New Issue
Block a user