diff --git a/ai_client.py b/ai_client.py index 8422700..8bb9c67 100644 --- a/ai_client.py +++ b/ai_client.py @@ -420,21 +420,26 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items: ) ) - full_message = f"\n{md_content}\n\n\n{user_message}" + payload_to_send = f"\n{md_content}\n\n\n{user_message}" _append_comms("OUT", "request", { "message": f"[context {len(md_content)} chars + user message {len(user_message)} chars]", }) - response = _gemini_chat.send_message(full_message) + all_text_parts = [] + + for round_idx in range(MAX_TOOL_ROUNDS + 1): + response = _gemini_chat.send_message(payload_to_send) - for round_idx in range(MAX_TOOL_ROUNDS): text_parts_raw = [ part.text for candidate in response.candidates for part in candidate.content.parts if hasattr(part, "text") and part.text ] + if text_parts_raw: + all_text_parts.append("\n".join(text_parts_raw)) + tool_calls = [ part.function_call for candidate in response.candidates @@ -468,6 +473,9 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items: if not tool_calls: break + if round_idx >= MAX_TOOL_ROUNDS: + break + function_responses = [] sent_results_log = [] for fc in tool_calls: @@ -503,9 +511,6 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items: ) sent_results_log.append({"tool_use_id": TOOL_NAME, "content": output}) - if not function_responses: - break - # Refresh file context after tool calls locally, but DO NOT inject as text part into Gemini. # Gemini strictly expects only function_responses in this array. if file_items: @@ -515,15 +520,10 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items: "results": sent_results_log }) - response = _gemini_chat.send_message(function_responses) + payload_to_send = function_responses - text_parts = [ - part.text - for candidate in response.candidates - for part in candidate.content.parts - if hasattr(part, "text") and part.text - ] - return "\n".join(text_parts) + final_text = "\n\n".join(all_text_parts) + return final_text if final_text.strip() else "(No text returned by the model)" except ProviderError: raise @@ -630,7 +630,9 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item ), }) - for round_idx in range(MAX_TOOL_ROUNDS): + all_text_parts = [] + + for round_idx in range(MAX_TOOL_ROUNDS + 1): response = _anthropic_client.messages.create( model=_model, max_tokens=8096, @@ -654,6 +656,9 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item }) text_blocks = [b.text for b in response.content if hasattr(b, "text") and b.text] + if text_blocks: + all_text_parts.append("\n".join(text_blocks)) + tool_use_blocks = [ {"id": b.id, "name": b.name, "input": b.input} for b in response.content @@ -682,6 +687,9 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item if response.stop_reason != "tool_use": break + if round_idx >= MAX_TOOL_ROUNDS: + break + tool_results = [] for block in response.content: if getattr(block, "type", None) != "tool_use": @@ -746,12 +754,8 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item ], }) - text_parts = [ - block.text - for block in response.content - if hasattr(block, "text") and block.text - ] - return "\n".join(text_parts) + final_text = "\n\n".join(all_text_parts) + return final_text if final_text.strip() else "(No text returned by the model)" except ProviderError: raise diff --git a/manual_slop.toml b/manual_slop.toml index 4a8dc56..923dd92 100644 --- a/manual_slop.toml +++ b/manual_slop.toml @@ -29,9 +29,7 @@ paths = [ [screenshots] base_dir = "C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02" -paths = [ - "C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02/python_2026-02-21_22-11-51.png", -] +paths = [] [discussion] roles = [ @@ -40,11 +38,11 @@ roles = [ "Vendor API", "System", ] -active = "Support for system prompt" +active = "Make sure it works well with gemini" [discussion.discussions.main] git_commit = "3b2da7873f839bf062c06c7539fadd9df7a3e2b9" -last_updated = "2026-02-21T21:15:21" +last_updated = "2026-02-21T22:23:30" history = [ "User:\nLets add multiple project support. currently this project would be the \"manual_slop\" project. \nI need to make a separate \"manual_slop.toml\" for this and offload things that would be within a per project to it.\nI also want to have several discussion histories that I can label. \nThis would allow me to stash and continue separate effort's histories..\nIt would be important if these discussions saved the last git commit they were on. (I'll need a buton to manually update it).\nAnd timestamps on each discussion entry to give an idea of the duration involved and \nhow the project has evolved since the discussion stream's effort was last done.\nBasically preserve context so ai can use that as valuable information in its working memory.\n\nTo load a project I'll need to hoist it into the config by providing the path.\nMaybe allow multiple project tomls to be specified path wise in the array.", "User:\nOk your started to work on multiple-project support, you added project_manager.py and manaual_slop.toml, its not being used yet. the rest of the codebase hasn't intergrated with the changes.\nThere might be something wrong with powershell tool use. If you suspect so try to fix that but otherwise lets finish the support for multiple projects.", @@ -65,7 +63,7 @@ history = [ [discussion.discussions."Support files or mcp, or proper dynamic context uptake."] git_commit = "59208b27ec31949cb45cae382dddaee645b53b4f" -last_updated = "2026-02-21T21:42:55" +last_updated = "2026-02-21T22:23:32" history = [ "@2026-02-21T21:30:26\nUser:\nLets add support for mcp. Similar to what claude code has, but restricted only to the paths provided. We have a basic summerizer right now but it doesn't work for all code files (we'd need tree-sitter). We need to add support for getting heurstics on what nuances each file has that is valuable for a basic overview before retrieving their content worst case to get a job done.", "@2026-02-21T21:30:32\nAI:\nThe PowerShell `Set-Content` write corrupted the file — it re-encoded it as Windows-1252, turning the UTF-8 em-dash (0xE2 0x80 0x94) into 0x97. I need to read the file as bytes, find and fix the corruption, apply the three targeted changes, and write back as UTF-8.", @@ -78,7 +76,7 @@ history = [ [discussion.discussions."Word Wrap"] git_commit = "" -last_updated = "2026-02-21T21:15:22" +last_updated = "2026-02-21T22:23:31" history = [ "@2026-02-21T21:13:42\nUser:\nLets add support for all heavy text panels.", "@2026-02-21T21:13:46\nAI:\nThe file is fixed and reading cleanly now. The `gui.py` file has the UTF-8 BOM restored, the em-dash is proper UTF-8 again, and `wrap=0` is in place.\n\n**Summary of what changed:**\n\n- `gui.py` line 80: `wrap=460` → `wrap=0` in `_add_text_field()` — this is the only `dpg.add_text()` call that had a hardcoded pixel wrap width. With `wrap=0`, DearPyGUI wraps the text to the available parent container width dynamically, so it adapts when panels are resized/docked differently.\n\nAll the other text panels (`ai_input`, `ai_response`, `disc_content_*`, tool log results, confirm dialog script) use `dpg.add_input_text(multiline=True, width=-1)` which already wraps to available width automatically.", @@ -87,7 +85,7 @@ history = [ [discussion.discussions."Support for system prompt"] git_commit = "f258fc5765d647eb821716bce3d2000fb2ba2857" -last_updated = "2026-02-21T22:13:20" +last_updated = "2026-02-21T22:23:31" history = [ "@2026-02-21T21:52:16\nUser:\ngemini testing 123 you get this message, first time using you proper in this custom frontend.", "@2026-02-21T21:52:17\nAI:\nLoud and clear! I've received your message through your **Manual Slop** frontend. It looks like a very capable environment—I can see you've implemented:\n\n1. **Multi-provider support** (Anthropic and Gemini).\n2. **Tool use/MCP-like capabilities** (file reading, directory listing, searching, etc.).\n3. **Project & Discussion management** via TOML files.\n4. **Rich GUI** using `dearpygui`.\n\nSince this is our first \"proper\" session in this interface, I'm ready to help you develop, debug, or refine the project. I have access to your local files through the tools you've provided, so I can see the codebase and even run PowerShell scripts if needed.\n\nIs there a specific feature you'd like to work on next, or any bugs in the Gemini integration you've noticed during your \"123\" test?", @@ -98,3 +96,11 @@ history = [ "@2026-02-21T22:11:52\nUser:\nYep it looks like the system prompts are now available.\n\nQuestion: For how we're doing things right now, you have to re-read files cause its not saved in discussion history rigt per me sending a gen + send?\nIs there a way you can do a routine to save important files to the discussion or upload them temporarily to a gemini cache so that we don't have to worrya bout re-reading read-only things you already gathered info on for an active discussion?\nI'm not sure the depths of how this custom api works or custom tooling, etc gemini api from google provides...", "@2026-02-21T22:12:05\nSystem:\nAI got stuck and did nothing...", ] + +[discussion.discussions."Make sure it works well with gemini"] +git_commit = "f126cdcb21f57e0fdead682ff6bba7bf6cc29356" +last_updated = "2026-02-21T22:23:46" +history = [ + "@2026-02-21T22:21:06\nUser:\nQuestion: For how we're doing things right now, you have to re-read files cause its not saved in discussion history rigt per me sending a gen + send?\nIs there a way you can do a routine to save important files to the discussion or upload them temporarily to a gemini cache so that we don't have to worrya bout re-reading read-only things you already gathered info on for an active discussion?\nI'm not sure the depths of how this custom api works or custom tooling, etc gemini api from google provides...\n\n(I'm also reviewing using this with gemini to make sure there are no hiccups from how the api is utilized).", + "@2026-02-21T22:21:08\nSystem:\nNo response from ai... when done. Other gemini in the anything llm discussion help!!!", +]