fixes and possible wip gui_2/theme_2 for multi-viewport support
This commit is contained in:
168
ai_client.py
168
ai_client.py
@@ -405,137 +405,55 @@ def _ensure_gemini_client():
|
||||
|
||||
def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items: list[dict] | None = None) -> str:
|
||||
global _gemini_chat
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
|
||||
try:
|
||||
_ensure_gemini_client()
|
||||
mcp_client.configure(file_items or [], [base_dir])
|
||||
_ensure_gemini_client(); mcp_client.configure(file_items or [], [base_dir])
|
||||
sys_instr = f"{_get_combined_system_prompt()}\n\n<context>\n{md_content}\n</context>"
|
||||
if not _gemini_chat:
|
||||
_gemini_chat = _gemini_client.chats.create(model=_model, config=types.GenerateContentConfig(system_instruction=sys_instr, tools=[_gemini_tool_declaration()]))
|
||||
|
||||
_append_comms("OUT", "request", {"message": f"[ctx {len(md_content)} + msg {len(user_message)}]"})
|
||||
payload, all_text = user_message, []
|
||||
|
||||
for r_idx in range(MAX_TOOL_ROUNDS + 2):
|
||||
resp = _gemini_chat.send_message(payload)
|
||||
txt = "\n".join(p.text for c in resp.candidates for p in c.content.parts if hasattr(p, "text") and p.text)
|
||||
if txt: all_text.append(txt)
|
||||
|
||||
calls = [p.function_call for c in resp.candidates for p in c.content.parts if hasattr(p, "function_call") and p.function_call]
|
||||
usage = {"input_tokens": getattr(resp.usage_metadata, "prompt_token_count", 0), "output_tokens": getattr(resp.usage_metadata, "candidates_token_count", 0)}
|
||||
reason = resp.candidates[0].finish_reason.name if resp.candidates and hasattr(resp.candidates[0], "finish_reason") else "STOP"
|
||||
|
||||
_append_comms("IN", "response", {"round": r_idx, "stop_reason": reason, "text": txt, "tool_calls": [{"name": c.name, "args": dict(c.args)} for c in calls], "usage": usage})
|
||||
if not calls or r_idx > MAX_TOOL_ROUNDS: break
|
||||
|
||||
system_text = _get_combined_system_prompt() + f"\n\n<context>\n{md_content}\n</context>"
|
||||
if _gemini_chat is None:
|
||||
_gemini_chat = _gemini_client.chats.create(
|
||||
model=_model,
|
||||
config=types.GenerateContentConfig(
|
||||
system_instruction=system_text,
|
||||
tools=[_gemini_tool_declaration()]
|
||||
)
|
||||
)
|
||||
else:
|
||||
_gemini_chat = _gemini_client.chats.create(
|
||||
model=_model,
|
||||
config=types.GenerateContentConfig(
|
||||
system_instruction=system_text,
|
||||
tools=[_gemini_tool_declaration()]
|
||||
),
|
||||
history=_gemini_chat.get_history()
|
||||
)
|
||||
f_resps, log = [], []
|
||||
for i, fc in enumerate(calls):
|
||||
name, args = fc.name, dict(fc.args)
|
||||
if name in mcp_client.TOOL_NAMES:
|
||||
_append_comms("OUT", "tool_call", {"name": name, "args": args})
|
||||
out = mcp_client.dispatch(name, args)
|
||||
elif name == TOOL_NAME:
|
||||
scr = args.get("script", "")
|
||||
_append_comms("OUT", "tool_call", {"name": TOOL_NAME, "script": scr})
|
||||
out = _run_script(scr, base_dir)
|
||||
else: out = f"ERROR: unknown tool '{name}'"
|
||||
|
||||
payload_to_send = user_message
|
||||
|
||||
_append_comms("OUT", "request", {
|
||||
"message": f"[context {len(md_content)} chars + user message {len(user_message)} chars]",
|
||||
})
|
||||
|
||||
all_text_parts = []
|
||||
|
||||
# We allow MAX_TOOL_ROUNDS, plus 1 final loop to get the text synthesis
|
||||
for round_idx in range(MAX_TOOL_ROUNDS + 2):
|
||||
response = _gemini_chat.send_message(payload_to_send)
|
||||
|
||||
text_parts_raw = [
|
||||
part.text
|
||||
for candidate in response.candidates
|
||||
for part in candidate.content.parts
|
||||
if hasattr(part, "text") and part.text
|
||||
]
|
||||
if text_parts_raw:
|
||||
all_text_parts.append("\n".join(text_parts_raw))
|
||||
|
||||
tool_calls = [
|
||||
part.function_call
|
||||
for candidate in response.candidates
|
||||
for part in candidate.content.parts
|
||||
if hasattr(part, "function_call") and part.function_call is not None
|
||||
]
|
||||
|
||||
usage_dict = {}
|
||||
if hasattr(response, "usage_metadata") and response.usage_metadata:
|
||||
meta = response.usage_metadata
|
||||
if hasattr(meta, "prompt_token_count") and meta.prompt_token_count is not None:
|
||||
usage_dict["input_tokens"] = meta.prompt_token_count
|
||||
if hasattr(meta, "candidates_token_count") and meta.candidates_token_count is not None:
|
||||
usage_dict["output_tokens"] = meta.candidates_token_count
|
||||
if hasattr(meta, "cached_content_token_count") and meta.cached_content_token_count:
|
||||
usage_dict["cache_read_input_tokens"] = meta.cached_content_token_count
|
||||
|
||||
stop_reason = ""
|
||||
if response.candidates and hasattr(response.candidates[0], "finish_reason"):
|
||||
fr = response.candidates[0].finish_reason
|
||||
stop_reason = str(fr.name) if hasattr(fr, "name") else str(fr)
|
||||
|
||||
_append_comms("IN", "response", {
|
||||
"round": round_idx,
|
||||
"stop_reason": stop_reason,
|
||||
"text": "\n".join(text_parts_raw),
|
||||
"tool_calls": [{"name": fc.name, "args": dict(fc.args)} for fc in tool_calls],
|
||||
"usage": usage_dict,
|
||||
})
|
||||
|
||||
if not tool_calls:
|
||||
break
|
||||
|
||||
if round_idx > MAX_TOOL_ROUNDS:
|
||||
# The model ignored the MAX ROUNDS warning and kept calling tools.
|
||||
# Force abort to prevent infinite loop.
|
||||
break
|
||||
|
||||
function_responses = []
|
||||
sent_results_log = []
|
||||
|
||||
for i, fc in enumerate(tool_calls):
|
||||
fc_name = fc.name
|
||||
fc_args = dict(fc.args)
|
||||
|
||||
if fc_name in mcp_client.TOOL_NAMES:
|
||||
_append_comms("OUT", "tool_call", {"name": fc_name, "args": fc_args})
|
||||
output = mcp_client.dispatch(fc_name, fc_args)
|
||||
_append_comms("IN", "tool_result", {"name": fc_name, "output": output})
|
||||
elif fc_name == TOOL_NAME:
|
||||
script = fc_args.get("script", "")
|
||||
_append_comms("OUT", "tool_call", {"name": TOOL_NAME, "script": script})
|
||||
output = _run_script(script, base_dir)
|
||||
_append_comms("IN", "tool_result", {"name": TOOL_NAME, "output": output})
|
||||
else:
|
||||
output = f"ERROR: unknown tool '{fc_name}'"
|
||||
|
||||
# Inject dynamic updates directly into the LAST tool's output string.
|
||||
# Gemini strictly expects function_responses only, so we piggyback on the string.
|
||||
if i == len(tool_calls) - 1:
|
||||
if i == len(calls) - 1:
|
||||
if file_items:
|
||||
file_items = _reread_file_items(file_items)
|
||||
refreshed_ctx = _build_file_context_text(file_items)
|
||||
if refreshed_ctx:
|
||||
output += f"\n\n[SYSTEM: FILES UPDATED — current contents below. Do NOT re-read these files.]\n\n{refreshed_ctx}"
|
||||
|
||||
if round_idx == MAX_TOOL_ROUNDS:
|
||||
output += "\n\n[SYSTEM WARNING: MAX TOOL ROUNDS REACHED. YOU MUST PROVIDE YOUR FINAL ANSWER NOW WITHOUT CALLING ANY MORE TOOLS.]"
|
||||
ctx = _build_file_context_text(_reread_file_items(file_items))
|
||||
if ctx: out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
|
||||
if r_idx == MAX_TOOL_ROUNDS: out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
|
||||
|
||||
f_resps.append(types.Part.from_function_response(name=name, response={"output": out}))
|
||||
log.append({"tool_use_id": name, "content": out})
|
||||
|
||||
_append_comms("OUT", "tool_result_send", {"results": log})
|
||||
payload = f_resps
|
||||
|
||||
return "\n\n".join(all_text) if all_text else "(No text returned)"
|
||||
except Exception as e: raise _classify_gemini_error(e) from e
|
||||
|
||||
function_responses.append(
|
||||
types.Part.from_function_response(name=fc_name, response={"output": output})
|
||||
)
|
||||
sent_results_log.append({"tool_use_id": fc_name, "content": output})
|
||||
|
||||
_append_comms("OUT", "tool_result_send", {"results": sent_results_log})
|
||||
payload_to_send = function_responses
|
||||
|
||||
final_text = "\n\n".join(all_text_parts)
|
||||
return final_text if final_text.strip() else "(No text returned by the model)"
|
||||
|
||||
except ProviderError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise _classify_gemini_error(exc) from exc
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user