fix(ai_client): Add missing response_received events for gemini streaming and non-streaming paths

This commit is contained in:
2026-03-05 19:21:57 -05:00
parent 937759a7a3
commit 03b68c9cea
3 changed files with 4 additions and 2 deletions

View File

@@ -8,5 +8,5 @@ active = "main"
[discussions.main] [discussions.main]
git_commit = "" git_commit = ""
last_updated = "2026-03-05T19:00:38" last_updated = "2026-03-05T19:13:27"
history = [] history = []

View File

@@ -712,6 +712,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
txt = "".join(txt_chunks) txt = "".join(txt_chunks)
# Final validation of response object for subsequent code # Final validation of response object for subsequent code
resp = final_resp resp = final_resp
events.emit("response_received", payload={"provider": "gemini", "model": _model, "usage": usage, "round": r_idx})
else: else:
config = types.GenerateContentConfig( config = types.GenerateContentConfig(
tools=[types.Tool(function_declarations=[types.FunctionDeclaration(**s) for s in mcp_client.get_tool_schemas()])] if enable_tools else [], tools=[types.Tool(function_declarations=[types.FunctionDeclaration(**s) for s in mcp_client.get_tool_schemas()])] if enable_tools else [],
@@ -728,6 +729,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
"cache_read_input_tokens": getattr(resp.usage_metadata, "cached_content_token_count", 0) "cache_read_input_tokens": getattr(resp.usage_metadata, "cached_content_token_count", 0)
} }
reason = resp.candidates[0].finish_reason.name if (resp.candidates and hasattr(resp.candidates[0], "finish_reason")) else "STOP" reason = resp.candidates[0].finish_reason.name if (resp.candidates and hasattr(resp.candidates[0], "finish_reason")) else "STOP"
events.emit("response_received", payload={"provider": "gemini", "model": _model, "usage": usage, "round": r_idx})
_append_comms("IN", "response", {"round": r_idx, "stop_reason": reason, "text": txt, "tool_calls": [{"name": c.name, "args": dict(c.args)} for c in calls], "usage": usage}) _append_comms("IN", "response", {"round": r_idx, "stop_reason": reason, "text": txt, "tool_calls": [{"name": c.name, "args": dict(c.args)} for c in calls], "usage": usage})
total_in = usage.get("input_tokens", 0) total_in = usage.get("input_tokens", 0)

View File

@@ -60,7 +60,7 @@ def test_send_emits_events_proper() -> None:
ai_client.events.on("request_start", start_callback) ai_client.events.on("request_start", start_callback)
ai_client.events.on("response_received", response_callback) ai_client.events.on("response_received", response_callback)
ai_client.set_provider("gemini", "gemini-2.5-flash-lite") ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
ai_client.send("context", "message", stream_callback=lambda x: None) ai_client.send("context", "message", )
assert start_callback.called assert start_callback.called
assert response_callback.called assert response_callback.called
args, kwargs = start_callback.call_args args, kwargs = start_callback.call_args