6 Commits

Author SHA1 Message Date
ed 6d825e6585 wip: gemini doing gui_2.py catchup track 2026-02-23 21:07:06 -05:00
ed 3db6a32e7c conductor(plan): Update plan after merge from cache branch 2026-02-23 20:34:14 -05:00
ed c19b13e4ac Merge branch 'origin/cache' 2026-02-23 20:32:49 -05:00
ed 1b9a2ab640 chore: Update discussion timestamp 2026-02-23 20:24:51 -05:00
ed 4300a8a963 conductor(plan): Mark task 'Integrate events.py into gui_2.py' as complete 2026-02-23 20:23:26 -05:00
r00tz 69401365be Port missing features to gui_2 and optimize caching
- Port 10 missing features from gui.py to gui_2.py: performance
    diagnostics, prior session log viewing, token budget visualization,
    agent tools config, API hooks server, GUI task queue, discussion
    truncation, THINKING/LIVE indicators, event subscriptions, and
    session usage tracking
  - Persist window visibility state in config.toml
  - Fix Gemini cache invalidation by separating discussion history
    from cached context (use MD5 hash instead of built-in hash)
  - Add cost optimizations: tool output truncation at source, proactive
    history trimming at 40%, summary_only support in aggregate.run()
  - Add cleanup() for destroying API caches on exit
2026-02-23 20:06:13 -05:00
11 changed files with 804 additions and 407 deletions
+14 -1
View File
@@ -164,6 +164,18 @@ def build_markdown_from_items(file_items: list[dict], screenshot_base_dir: Path,
return "\n\n---\n\n".join(parts)
def build_markdown_no_history(file_items: list[dict], screenshot_base_dir: Path, screenshots: list[str], summary_only: bool = False) -> str:
"""Build markdown with only files + screenshots (no history). Used for stable caching."""
return build_markdown_from_items(file_items, screenshot_base_dir, screenshots, history=[], summary_only=summary_only)
def build_discussion_text(history: list[str]) -> str:
"""Build just the discussion history section text. Returns empty string if no history."""
if not history:
return ""
return "## Discussion History\n\n" + build_discussion_section(history)
def build_markdown(base_dir: Path, files: list[str], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False) -> str:
parts = []
# STATIC PREFIX: Files and Screenshots must go first to maximize Cache Hits
@@ -195,8 +207,9 @@ def run(config: dict) -> tuple[str, Path, list[dict]]:
output_file = output_dir / f"{namespace}_{increment:03d}.md"
# Build file items once, then construct markdown from them (avoids double I/O)
file_items = build_file_items(base_dir, files)
summary_only = config.get("project", {}).get("summary_only", False)
markdown = build_markdown_from_items(file_items, screenshot_base_dir, screenshots, history,
summary_only=False)
summary_only=summary_only)
output_file.write_text(markdown, encoding="utf-8")
return markdown, output_file, file_items
+74 -24
View File
@@ -15,6 +15,8 @@ import tomllib
import json
import time
import datetime
import hashlib
import difflib
from pathlib import Path
import file_cache
import mcp_client
@@ -435,6 +437,13 @@ def _run_script(script: str, base_dir: str) -> str:
return output
def _truncate_tool_output(output: str) -> str:
"""Truncate tool output to _history_trunc_limit chars before sending to API."""
if _history_trunc_limit > 0 and len(output) > _history_trunc_limit:
return output[:_history_trunc_limit] + "\n\n... [TRUNCATED BY SYSTEM TO SAVE TOKENS.]"
return output
# ------------------------------------------------------------------ dynamic file context refresh
def _reread_file_items(file_items: list[dict]) -> tuple[list[dict], list[dict]]:
@@ -460,7 +469,7 @@ def _reread_file_items(file_items: list[dict]) -> tuple[list[dict], list[dict]]:
refreshed.append(item) # unchanged — skip re-read
continue
content = p.read_text(encoding="utf-8")
new_item = {**item, "content": content, "error": False, "mtime": current_mtime}
new_item = {**item, "old_content": item.get("content", ""), "content": content, "error": False, "mtime": current_mtime}
refreshed.append(new_item)
changed.append(new_item)
except Exception as e:
@@ -486,6 +495,35 @@ def _build_file_context_text(file_items: list[dict]) -> str:
return "\n\n---\n\n".join(parts)
_DIFF_LINE_THRESHOLD = 200
def _build_file_diff_text(changed_items: list[dict]) -> str:
"""
Build text for changed files. Small files (<= _DIFF_LINE_THRESHOLD lines)
get full content; large files get a unified diff against old_content.
"""
if not changed_items:
return ""
parts = []
for item in changed_items:
path = item.get("path") or item.get("entry", "unknown")
content = item.get("content", "")
old_content = item.get("old_content", "")
new_lines = content.splitlines(keepends=True)
if len(new_lines) <= _DIFF_LINE_THRESHOLD or not old_content:
suffix = str(path).rsplit(".", 1)[-1] if "." in str(path) else "text"
parts.append(f"### `{path}` (full)\n\n```{suffix}\n{content}\n```")
else:
old_lines = old_content.splitlines(keepends=True)
diff = difflib.unified_diff(old_lines, new_lines, fromfile=str(path), tofile=str(path), lineterm="")
diff_text = "\n".join(diff)
if diff_text:
parts.append(f"### `{path}` (diff)\n\n```diff\n{diff_text}\n```")
else:
parts.append(f"### `{path}` (no changes detected)")
return "\n\n---\n\n".join(parts)
# ------------------------------------------------------------------ content block serialisation
def _content_block_to_dict(block) -> dict:
@@ -530,22 +568,26 @@ def _get_gemini_history_list(chat):
return chat.get_history()
return []
def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items: list[dict] | None = None) -> str:
def _send_gemini(md_content: str, user_message: str, base_dir: str,
file_items: list[dict] | None = None,
discussion_history: str = "") -> str:
global _gemini_chat, _gemini_cache, _gemini_cache_md_hash, _gemini_cache_created_at
try:
_ensure_gemini_client(); mcp_client.configure(file_items or [], [base_dir])
# Only stable content (files + screenshots) goes in the cached system instruction.
# Discussion history is sent as conversation messages so the cache isn't invalidated every turn.
sys_instr = f"{_get_combined_system_prompt()}\n\n<context>\n{md_content}\n</context>"
tools_decl = [_gemini_tool_declaration()]
# DYNAMIC CONTEXT: Check if files/context changed mid-session
current_md_hash = hash(md_content)
current_md_hash = hashlib.md5(md_content.encode()).hexdigest()
old_history = None
if _gemini_chat and _gemini_cache_md_hash != current_md_hash:
old_history = list(_get_gemini_history_list(_gemini_chat)) if _get_gemini_history_list(_gemini_chat) else []
if _gemini_cache:
try: _gemini_client.caches.delete(name=_gemini_cache.name)
except: pass
except Exception as e: _append_comms("OUT", "request", {"message": f"[CACHE DELETE WARN] {e}"})
_gemini_chat = None
_gemini_cache = None
_gemini_cache_created_at = None
@@ -558,7 +600,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
if elapsed > _GEMINI_CACHE_TTL * 0.9:
old_history = list(_get_gemini_history_list(_gemini_chat)) if _get_gemini_history_list(_gemini_chat) else []
try: _gemini_client.caches.delete(name=_gemini_cache.name)
except: pass
except Exception as e: _append_comms("OUT", "request", {"message": f"[CACHE DELETE WARN] {e}"})
_gemini_chat = None
_gemini_cache = None
_gemini_cache_created_at = None
@@ -602,6 +644,12 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
_gemini_chat = _gemini_client.chats.create(**kwargs)
_gemini_cache_md_hash = current_md_hash
# Inject discussion history as a user message on first chat creation
# (only when there's no old_history being restored, i.e., fresh session)
if discussion_history and not old_history:
_gemini_chat.send_message(f"[DISCUSSION HISTORY]\n\n{discussion_history}")
_append_comms("OUT", "request", {"message": f"[HISTORY INJECTED] {len(discussion_history)} chars"})
_append_comms("OUT", "request", {"message": f"[ctx {len(md_content)} + msg {len(user_message)}]"})
payload, all_text = user_message, []
@@ -634,26 +682,19 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
if cached_tokens:
usage["cache_read_input_tokens"] = cached_tokens
# Fetch cache stats in the background thread to avoid blocking GUI
cache_stats = None
try:
cache_stats = get_gemini_cache_stats()
except Exception:
pass
events.emit("response_received", payload={"provider": "gemini", "model": _model, "usage": usage, "round": r_idx, "cache_stats": cache_stats})
events.emit("response_received", payload={"provider": "gemini", "model": _model, "usage": usage, "round": r_idx})
reason = resp.candidates[0].finish_reason.name if resp.candidates and hasattr(resp.candidates[0], "finish_reason") else "STOP"
_append_comms("IN", "response", {"round": r_idx, "stop_reason": reason, "text": txt, "tool_calls": [{"name": c.name, "args": dict(c.args)} for c in calls], "usage": usage})
# Guard: if Gemini reports input tokens approaching the limit, drop oldest history pairs
# Guard: proactively trim history when input tokens exceed 40% of limit
total_in = usage.get("input_tokens", 0)
if total_in > _GEMINI_MAX_INPUT_TOKENS and _gemini_chat and _get_gemini_history_list(_gemini_chat):
if total_in > _GEMINI_MAX_INPUT_TOKENS * 0.4 and _gemini_chat and _get_gemini_history_list(_gemini_chat):
hist = _get_gemini_history_list(_gemini_chat)
dropped = 0
# Drop oldest pairs (user+model) but keep at least the last 2 entries
while len(hist) > 4 and total_in > _GEMINI_MAX_INPUT_TOKENS * 0.7:
while len(hist) > 4 and total_in > _GEMINI_MAX_INPUT_TOKENS * 0.3:
# Drop in pairs (user + model) to maintain alternating roles required by Gemini
saved = 0
for _ in range(2):
@@ -689,11 +730,12 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items:
if i == len(calls) - 1:
if file_items:
file_items, changed = _reread_file_items(file_items)
ctx = _build_file_context_text(changed)
ctx = _build_file_diff_text(changed)
if ctx:
out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
if r_idx == MAX_TOOL_ROUNDS: out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
out = _truncate_tool_output(out)
f_resps.append(types.Part.from_function_response(name=name, response={"output": out}))
log.append({"tool_use_id": name, "content": out})
events.emit("tool_execution", payload={"status": "completed", "tool": name, "result": out, "round": r_idx})
@@ -955,7 +997,7 @@ def _repair_anthropic_history(history: list[dict]):
})
def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_items: list[dict] | None = None) -> str:
def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_items: list[dict] | None = None, discussion_history: str = "") -> str:
try:
_ensure_anthropic_client()
mcp_client.configure(file_items or [], [base_dir])
@@ -969,6 +1011,10 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
context_blocks = _build_chunked_context_blocks(context_text)
system_blocks = stable_blocks + context_blocks
# Prepend discussion history to the first user message if this is a fresh session
if discussion_history and not _anthropic_history:
user_content = [{"type": "text", "text": f"[DISCUSSION HISTORY]\n\n{discussion_history}\n\n---\n\n{user_message}"}]
else:
user_content = [{"type": "text", "text": user_message}]
# COMPRESS HISTORY: Truncate massive tool outputs from previous turns
@@ -1089,7 +1135,7 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
tool_results.append({
"type": "tool_result",
"tool_use_id": b_id,
"content": output,
"content": _truncate_tool_output(output),
})
events.emit("tool_execution", payload={"status": "completed", "tool": b_name, "result": output, "round": round_idx})
elif b_name == TOOL_NAME:
@@ -1108,14 +1154,14 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
tool_results.append({
"type": "tool_result",
"tool_use_id": b_id,
"content": output,
"content": _truncate_tool_output(output),
})
events.emit("tool_execution", payload={"status": "completed", "tool": b_name, "result": output, "round": round_idx})
# Refresh file context after tool calls — only inject CHANGED files
if file_items:
file_items, changed = _reread_file_items(file_items)
refreshed_ctx = _build_file_context_text(changed)
refreshed_ctx = _build_file_diff_text(changed)
if refreshed_ctx:
tool_results.append({
"type": "text",
@@ -1160,20 +1206,24 @@ def send(
user_message: str,
base_dir: str = ".",
file_items: list[dict] | None = None,
discussion_history: str = "",
) -> str:
"""
Send a message to the active provider.
md_content : aggregated markdown string from aggregate.run()
md_content : aggregated markdown string (for Gemini: stable content only,
for Anthropic: full content including history)
user_message : the user question / instruction
base_dir : project base directory (for PowerShell tool calls)
file_items : list of file dicts from aggregate.build_file_items() for
dynamic context refresh after tool calls
discussion_history : discussion history text (used by Gemini to inject as
conversation message instead of caching it)
"""
if _provider == "gemini":
return _send_gemini(md_content, user_message, base_dir, file_items)
return _send_gemini(md_content, user_message, base_dir, file_items, discussion_history)
elif _provider == "anthropic":
return _send_anthropic(md_content, user_message, base_dir, file_items)
return _send_anthropic(md_content, user_message, base_dir, file_items, discussion_history)
raise ValueError(f"unknown provider: {_provider}")
def get_history_bleed_stats() -> dict:
+1 -1
View File
@@ -15,5 +15,5 @@ This file tracks all major tracks for the project. Each track has its own detail
---
- [ ] **Track: get gui_2 working with latest changes to the project.**
- [~] **Track: get gui_2 working with latest changes to the project.**
*Link: [./tracks/gui2_feature_parity_20260223/](./tracks/gui2_feature_parity_20260223/)*
@@ -2,39 +2,39 @@
## Phase 1: Core Architectural Integration
- [ ] **Task:** Integrate `events.py` into `gui_2.py`.
- [ ] Sub-task: Import the `events` module in `gui_2.py`.
- [ ] Sub-task: Refactor the `ai_client` call in `_do_send` to use the event-driven `send` method.
- [ ] Sub-task: Create event handlers in `App` class for `request_start`, `response_received`, and `tool_execution`.
- [ ] Sub-task: Subscribe the handlers to `ai_client.events` upon `App` initialization.
- [ ] **Task:** Integrate `mcp_client.py` for native file tools.
- [x] **Task:** Integrate `events.py` into `gui_2.py`. [24b831c]
- [x] Sub-task: Import the `events` module in `gui_2.py`.
- [x] Sub-task: Refactor the `ai_client` call in `_do_send` to use the event-driven `send` method.
- [x] Sub-task: Create event handlers in `App` class for `request_start`, `response_received`, and `tool_execution`.
- [x] Sub-task: Subscribe the handlers to `ai_client.events` upon `App` initialization.
- [~] **Task:** Integrate `mcp_client.py` for native file tools.
- [ ] Sub-task: Import `mcp_client` in `gui_2.py`.
- [ ] Sub-task: Add `mcp_client.perf_monitor_callback` to the `App` initialization.
- [ ] Sub-task: In `ai_client`, ensure the MCP tools are registered and available for the AI to call when `gui_2.py` is the active UI.
- [ ] **Task:** Write tests for new core integrations.
- [ ] Sub-task: Create `tests/test_gui2_events.py` to verify that `gui_2.py` correctly handles AI lifecycle events.
- [x] Sub-task: Create `tests/test_gui2_events.py` to verify that `gui_2.py` correctly handles AI lifecycle events.
- [ ] Sub-task: Create `tests/test_gui2_mcp.py` to verify that the AI can use MCP tools through `gui_2.py`.
- [ ] **Task:** Conductor - User Manual Verification 'Core Architectural Integration' (Protocol in workflow.md)
## Phase 2: Major Feature Implementation
- [ ] **Task:** Port the API Hooks System.
- [ ] Sub-task: Import `api_hooks` in `gui_2.py`.
- [ ] Sub-task: Instantiate `HookServer` in the `App` class.
- [ ] Sub-task: Implement the logic to start the server based on a CLI flag (e.g., `--enable-test-hooks`).
- [ ] Sub-task: Implement the queue and lock for pending GUI tasks from the hook server, similar to `gui.py`.
- [ ] Sub-task: Add a main loop task to process the GUI task queue.
- [ ] **Task:** Port the Performance & Diagnostics feature.
- [ ] Sub-task: Import `PerformanceMonitor` in `gui_2.py`.
- [ ] Sub-task: Instantiate `PerformanceMonitor` in the `App` class.
- [ ] Sub-task: Create a new "Diagnostics" window in `gui_2.py`.
- [ ] Sub-task: Add UI elements (plots, labels) to the Diagnostics window to display FPS, CPU, frame time, etc.
- [ ] Sub-task: Add a throttled update mechanism in the main loop to refresh diagnostics data.
- [ ] **Task:** Implement the Prior Session Viewer.
- [ ] Sub-task: Add a "Load Prior Session" button to the UI.
- [ ] Sub-task: Implement the file dialog logic to select a `.log` file.
- [ ] Sub-task: Implement the logic to parse the log file and populate the comms history view.
- [ ] Sub-task: Implement the "tinted" theme application when in viewing mode and a way to exit this mode.
- [x] **Task:** Port the API Hooks System. [merged]
- [x] Sub-task: Import `api_hooks` in `gui_2.py`.
- [x] Sub-task: Instantiate `HookServer` in the `App` class.
- [x] Sub-task: Implement the logic to start the server based on a CLI flag (e.g., `--enable-test-hooks`).
- [x] Sub-task: Implement the queue and lock for pending GUI tasks from the hook server, similar to `gui.py`.
- [x] Sub-task: Add a main loop task to process the GUI task queue.
- [x] **Task:** Port the Performance & Diagnostics feature. [merged]
- [x] Sub-task: Import `PerformanceMonitor` in `gui_2.py`.
- [x] Sub-task: Instantiate `PerformanceMonitor` in the `App` class.
- [x] Sub-task: Create a new "Diagnostics" window in `gui_2.py`.
- [x] Sub-task: Add UI elements (plots, labels) to the Diagnostics window to display FPS, CPU, frame time, etc.
- [x] Sub-task: Add a throttled update mechanism in the main loop to refresh diagnostics data.
- [x] **Task:** Implement the Prior Session Viewer. [merged]
- [x] Sub-task: Add a "Load Prior Session" button to the UI.
- [x] Sub-task: Implement the file dialog logic to select a `.log` file.
- [x] Sub-task: Implement the logic to parse the log file and populate the comms history view.
- [x] Sub-task: Implement the "tinted" theme application when in viewing mode and a way to exit this mode.
- [ ] **Task:** Write tests for major features.
- [ ] Sub-task: Create `tests/test_gui2_api_hooks.py` to test the hook server integration.
- [ ] Sub-task: Create `tests/test_gui2_diagnostics.py` to verify the diagnostics panel displays data.
@@ -47,14 +47,14 @@
- [ ] Sub-task: Create wrapper windows for "Context Hub", "AI Settings Hub", "Discussion Hub", and "Operations Hub" in `gui_2.py`.
- [ ] Sub-task: Move existing windows into their respective Hubs using the `imgui-bundle` docking API.
- [ ] Sub-task: Ensure the default layout is saved to and loaded from `manualslop_layout.ini`.
- [ ] **Task:** Add Agent Capability Toggles to the UI.
- [ ] Sub-task: In the "Projects" or a new "Agent" panel, add checkboxes for each agent tool (e.g., `run_powershell`, `read_file`).
- [ ] Sub-task: Ensure these UI toggles are saved to the project's `.toml` file.
- [ ] Sub-task: Ensure `ai_client` respects these settings when determining which tools are available to the AI.
- [ ] **Task:** Full Theme Integration.
- [ ] Sub-task: Review all newly added windows and controls.
- [ ] Sub-task: Ensure that colors, fonts, and scaling from `theme_2.py` are correctly applied everywhere.
- [ ] Sub-task: Test theme switching to confirm all elements update correctly.
- [x] **Task:** Add Agent Capability Toggles to the UI. [merged]
- [x] Sub-task: In the "Projects" or a new "Agent" panel, add checkboxes for each agent tool (e.g., `run_powershell`, `read_file`).
- [x] Sub-task: Ensure these UI toggles are saved to the project's `.toml` file.
- [x] Sub-task: Ensure `ai_client` respects these settings when determining which tools are available to the AI.
- [x] **Task:** Full Theme Integration. [merged]
- [x] Sub-task: Review all newly added windows and controls.
- [x] Sub-task: Ensure that colors, fonts, and scaling from `theme_2.py` are correctly applied everywhere.
- [x] Sub-task: Test theme switching to confirm all elements update correctly.
- [ ] **Task:** Write tests for UI/UX changes.
- [ ] Sub-task: Create `tests/test_gui2_layout.py` to verify the hub structure is created.
- [ ] Sub-task: Add tests to verify agent capability toggles are respected.
+1
View File
@@ -136,6 +136,7 @@ For features involving the GUI or complex internal state, unit tests are often i
# The GUI is now running on port 8999
...
```
Note: pytest must be run with `uv`.
3. **Verify via ApiHookClient:** Use the `ApiHookClient` in `api_hook_client.py` to interact with the running application. It includes robust retry logic and health checks.
+22 -8
View File
@@ -1,15 +1,15 @@
[ai]
provider = "gemini"
model = "gemini-2.5-flash"
temperature = 0.6000000238418579
max_tokens = 12000
model = "gemini-2.0-flash"
temperature = 0.0
max_tokens = 8192
history_trunc_limit = 8000
system_prompt = "DO NOT EVER make a shell script unless told to. DO NOT EVER make a readme or a file describing your changes unless your are told to. If you have commands I should be entering into the command line or if you have something to explain to me, please just use code blocks or normal text output. DO NOT DO ANYTHING OTHER THAN WHAT YOU WERE TOLD TODO. DO NOT EVER, EVER DO ANYTHING OTHER THAN WHAT YOU WERE TOLD TO DO. IF YOU WANT TO DO OTHER THINGS, SIMPLY SUGGEST THEM, AND THEN I WILL REVIEW YOUR CHANGES, AND MAKE THE DECISION ON HOW TO PROCEED. WHEN WRITING SCRIPTS USE A 120-160 character limit per line. I don't want to see scrunched code.\n"
system_prompt = ""
[theme]
palette = "10x Dark"
font_path = "C:/Users/Ed/AppData/Local/uv/cache/archive-v0/WSthkYsQ82b_ywV6DkiaJ/pygame_gui/data/FiraCode-Regular.ttf"
font_size = 18.0
palette = "Gold"
font_path = ""
font_size = 14.0
scale = 1.0
[projects]
@@ -18,4 +18,18 @@ paths = [
"C:/projects/forth/bootslop/bootslop.toml",
"C:\\projects\\manual_slop\\tests\\temp_project.toml",
]
active = "C:\\projects\\manual_slop\\tests\\temp_project.toml"
active = "manual_slop.toml"
[gui.show_windows]
Projects = true
Files = true
Screenshots = true
"Discussion History" = true
Provider = true
Message = true
Response = true
"Tool Calls" = true
"Comms History" = true
"System Prompts" = true
Theme = true
Diagnostics = true
+361 -29
View File
@@ -4,6 +4,8 @@ import threading
import time
import math
import json
import sys
import os
from pathlib import Path
from tkinter import filedialog, Tk
import aggregate
@@ -15,6 +17,9 @@ import project_manager
import theme_2 as theme
import tomllib
import events
import numpy as np
import api_hooks
from performance_monitor import PerformanceMonitor
from imgui_bundle import imgui, hello_imgui, immapp
@@ -57,6 +62,15 @@ KIND_COLORS = {"request": C_REQ, "response": C_RES, "tool_call": C_TC, "tool_res
HEAVY_KEYS = {"message", "text", "script", "output", "content"}
DISC_ROLES = ["User", "AI", "Vendor API", "System"]
AGENT_TOOL_NAMES = ["run_powershell", "read_file", "list_directory", "search_files", "get_file_summary", "web_search", "fetch_url"]
def truncate_entries(entries: list[dict], max_pairs: int) -> list[dict]:
if max_pairs <= 0:
return []
target_count = max_pairs * 2
if len(entries) <= target_count:
return entries
return entries[-target_count:]
def _parse_history_entries(history: list[str], roles: list[str] | None = None) -> list[dict]:
known = roles if roles is not None else DISC_ROLES
@@ -120,6 +134,7 @@ class App:
self.ui_project_main_context = proj_meta.get("main_context", "")
self.ui_project_system_prompt = proj_meta.get("system_prompt", "")
self.ui_word_wrap = proj_meta.get("word_wrap", True)
self.ui_summary_only = proj_meta.get("summary_only", False)
self.ui_auto_add_history = disc_sec.get("auto_add", False)
self.ui_global_system_prompt = self.config.get("ai", {}).get("system_prompt", "")
@@ -140,7 +155,7 @@ class App:
self.send_thread: threading.Thread | None = None
self.models_thread: threading.Thread | None = None
self.show_windows = {
_default_windows = {
"Projects": True,
"Files": True,
"Screenshots": True,
@@ -152,7 +167,10 @@ class App:
"Comms History": True,
"System Prompts": True,
"Theme": True,
"Diagnostics": False,
}
saved = self.config.get("gui", {}).get("show_windows", {})
self.show_windows = {k: saved.get(k, v) for k, v in _default_windows.items()}
self.show_script_output = False
self.show_text_viewer = False
self.text_viewer_title = ""
@@ -182,21 +200,49 @@ class App:
self._scroll_disc_to_bottom = False
# GUI Task Queue (thread-safe, for event handlers and hook server)
self._pending_gui_tasks: list[dict] = []
self._pending_gui_tasks_lock = threading.Lock()
# Session usage tracking
self.session_usage = {"input_tokens": 0, "output_tokens": 0, "cache_read_input_tokens": 0, "cache_creation_input_tokens": 0}
# Token budget / cache telemetry
self._token_budget_pct = 0.0
self._token_budget_current = 0
self._token_budget_limit = 0
self._gemini_cache_text = ""
# Discussion truncation
self.ui_disc_truncate_pairs: int = 2
# Agent tools config
agent_tools_cfg = self.project.get("agent", {}).get("tools", {})
self.ui_agent_tools: dict[str, bool] = {t: agent_tools_cfg.get(t, True) for t in AGENT_TOOL_NAMES}
# Prior session log viewing
self.is_viewing_prior_session = False
self.prior_session_entries: list[dict] = []
# API Hooks
self.test_hooks_enabled = ("--enable-test-hooks" in sys.argv) or (os.environ.get("SLOP_TEST_HOOKS") == "1")
# Performance monitoring
self.perf_monitor = PerformanceMonitor()
self.perf_history = {"frame_time": [0.0]*100, "fps": [0.0]*100, "cpu": [0.0]*100, "input_lag": [0.0]*100}
self._perf_last_update = 0.0
session_logger.open_session()
ai_client.set_provider(self.current_provider, self.current_model)
ai_client.confirm_and_run_callback = self._confirm_and_run
ai_client.comms_log_callback = self._on_comms_entry
ai_client.tool_log_callback = self._on_tool_log
# Subscribe to API lifecycle events
# AI client event subscriptions
ai_client.events.on("request_start", self._on_api_event)
ai_client.events.on("response_received", self._on_api_event)
ai_client.events.on("tool_execution", self._on_api_event)
def _on_api_event(self, *args, **kwargs):
"""Callback for ai_client events. For now, a placeholder."""
pass
# ---------------------------------------------------------------- project loading
def _load_active_project(self):
@@ -263,6 +309,10 @@ class App:
self.ui_project_main_context = proj.get("project", {}).get("main_context", "")
self.ui_auto_add_history = proj.get("discussion", {}).get("auto_add", False)
self.ui_word_wrap = proj.get("project", {}).get("word_wrap", True)
self.ui_summary_only = proj.get("project", {}).get("summary_only", False)
agent_tools_cfg = proj.get("agent", {}).get("tools", {})
self.ui_agent_tools = {t: agent_tools_cfg.get(t, True) for t in AGENT_TOOL_NAMES}
def _save_active_project(self):
if self.active_project_path:
@@ -347,6 +397,76 @@ class App:
def _on_tool_log(self, script: str, result: str):
session_logger.log_tool_call(script, result, None)
def _on_api_event(self, *args, **kwargs):
payload = kwargs.get("payload", {})
with self._pending_gui_tasks_lock:
self._pending_gui_tasks.append({"action": "refresh_api_metrics", "payload": payload})
def _process_pending_gui_tasks(self):
if not self._pending_gui_tasks:
return
with self._pending_gui_tasks_lock:
tasks = self._pending_gui_tasks[:]
self._pending_gui_tasks.clear()
for task in tasks:
try:
action = task.get("action")
if action == "refresh_api_metrics":
self._refresh_api_metrics(task.get("payload", {}))
except Exception as e:
print(f"Error executing GUI task: {e}")
def _recalculate_session_usage(self):
usage = {"input_tokens": 0, "output_tokens": 0, "cache_read_input_tokens": 0, "cache_creation_input_tokens": 0}
for entry in ai_client.get_comms_log():
if entry.get("kind") == "response" and "usage" in entry.get("payload", {}):
u = entry["payload"]["usage"]
for k in usage.keys():
usage[k] += u.get(k, 0) or 0
self.session_usage = usage
def _refresh_api_metrics(self, payload: dict):
self._recalculate_session_usage()
try:
stats = ai_client.get_history_bleed_stats()
self._token_budget_pct = stats.get("percentage", 0.0) / 100.0
self._token_budget_current = stats.get("current", 0)
self._token_budget_limit = stats.get("limit", 0)
except Exception:
pass
cache_stats = payload.get("cache_stats")
if cache_stats:
count = cache_stats.get("cache_count", 0)
size_bytes = cache_stats.get("total_size_bytes", 0)
self._gemini_cache_text = f"Gemini Caches: {count} ({size_bytes / 1024:.1f} KB)"
def cb_load_prior_log(self):
root = hide_tk_root()
path = filedialog.askopenfilename(
title="Load Session Log",
initialdir="logs",
filetypes=[("Log/JSONL", "*.log *.jsonl"), ("All Files", "*.*")]
)
root.destroy()
if not path:
return
entries = []
try:
with open(path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if line:
try:
entries.append(json.loads(line))
except json.JSONDecodeError:
continue
except Exception as e:
self.ai_status = f"log load error: {e}"
return
self.prior_session_entries = entries
self.is_viewing_prior_session = True
self.ai_status = f"viewing prior session: {Path(path).name} ({len(entries)} entries)"
def _confirm_and_run(self, script: str, base_dir: str) -> str | None:
dialog = ConfirmDialog(script, base_dir)
with self._pending_dialog_lock:
@@ -383,6 +503,11 @@ class App:
proj["project"]["system_prompt"] = self.ui_project_system_prompt
proj["project"]["main_context"] = self.ui_project_main_context
proj["project"]["word_wrap"] = self.ui_word_wrap
proj["project"]["summary_only"] = self.ui_summary_only
proj.setdefault("agent", {}).setdefault("tools", {})
for t_name in AGENT_TOOL_NAMES:
proj["agent"]["tools"][t_name] = self.ui_agent_tools.get(t_name, True)
self._flush_disc_entries_to_project()
disc_sec = proj.setdefault("discussion", {})
@@ -400,15 +525,26 @@ class App:
}
self.config["ai"]["system_prompt"] = self.ui_global_system_prompt
self.config["projects"] = {"paths": self.project_paths, "active": self.active_project_path}
self.config["gui"] = {"show_windows": self.show_windows}
theme.save_to_config(self.config)
def _do_generate(self) -> tuple[str, Path, list]:
def _do_generate(self) -> tuple[str, Path, list, str, str]:
"""Returns (full_md, output_path, file_items, stable_md, discussion_text)."""
self._flush_to_project()
self._save_active_project()
self._flush_to_config()
save_config(self.config)
flat = project_manager.flat_config(self.project, self.active_discussion)
return aggregate.run(flat)
full_md, path, file_items = aggregate.run(flat)
# Build stable markdown (no history) for Gemini caching
screenshot_base_dir = Path(flat.get("screenshots", {}).get("base_dir", "."))
screenshots = flat.get("screenshots", {}).get("paths", [])
summary_only = flat.get("project", {}).get("summary_only", False)
stable_md = aggregate.build_markdown_no_history(file_items, screenshot_base_dir, screenshots, summary_only=summary_only)
# Build discussion history text separately
history = flat.get("discussion", {}).get("history", [])
discussion_text = aggregate.build_discussion_text(history)
return full_md, path, file_items, stable_md, discussion_text
def _fetch_models(self, provider: str):
self.ai_status = "fetching models..."
@@ -454,23 +590,7 @@ class App:
# ---------------------------------------------------------------- gui
def _gui_func(self):
# Sync pending comms
with self._pending_comms_lock:
for c in self._pending_comms:
self._comms_log.append(c)
self._pending_comms.clear()
with self._pending_history_adds_lock:
if self._pending_history_adds:
self._scroll_disc_to_bottom = True
for item in self._pending_history_adds:
if item["role"] not in self.disc_roles:
self.disc_roles.append(item["role"])
self.disc_entries.append(item)
self._pending_history_adds.clear()
if imgui.begin_main_menu_bar():
def _show_menus(self):
if imgui.begin_menu("Windows"):
for w in self.show_windows.keys():
_, self.show_windows[w] = imgui.menu_item(w, "", self.show_windows[w])
@@ -498,7 +618,57 @@ class App:
except Exception as e:
self.ai_status = f"error: {e}"
imgui.end_menu()
imgui.end_main_menu_bar()
def _gui_func(self):
self.perf_monitor.start_frame()
# Process GUI task queue
self._process_pending_gui_tasks()
# Sync pending comms
with self._pending_comms_lock:
for c in self._pending_comms:
self._comms_log.append(c)
self._pending_comms.clear()
with self._pending_history_adds_lock:
if self._pending_history_adds:
self._scroll_disc_to_bottom = True
for item in self._pending_history_adds:
if item["role"] not in self.disc_roles:
self.disc_roles.append(item["role"])
self.disc_entries.append(item)
self._pending_history_adds.clear()
# if imgui.begin_main_menu_bar():
# if imgui.begin_menu("Windows"):
# for w in self.show_windows.keys():
# _, self.show_windows[w] = imgui.menu_item(w, "", self.show_windows[w])
# imgui.end_menu()
# if imgui.begin_menu("Project"):
# if imgui.menu_item("Save All", "", False)[0]:
# self._flush_to_project()
# self._save_active_project()
# self._flush_to_config()
# save_config(self.config)
# self.ai_status = "config saved"
# if imgui.menu_item("Reset Session", "", False)[0]:
# ai_client.reset_session()
# ai_client.clear_comms_log()
# self._tool_log.clear()
# self._comms_log.clear()
# self.ai_status = "session reset"
# self.ai_response = ""
# if imgui.menu_item("Generate MD Only", "", False)[0]:
# try:
# md, path, *_ = self._do_generate()
# self.last_md = md
# self.last_md_path = path
# self.ai_status = f"md written: {path.name}"
# except Exception as e:
# self.ai_status = f"error: {e}"
# imgui.end_menu()
# imgui.end_main_menu_bar()
# ---- Projects
if self.show_windows["Projects"]:
@@ -586,6 +756,14 @@ class App:
self.ai_status = "config saved"
ch, self.ui_word_wrap = imgui.checkbox("Word-Wrap (Read-only panels)", self.ui_word_wrap)
ch, self.ui_summary_only = imgui.checkbox("Summary Only (send file structure, not full content)", self.ui_summary_only)
if imgui.collapsing_header("Agent Tools"):
for t_name in AGENT_TOOL_NAMES:
val = self.ui_agent_tools.get(t_name, True)
ch, val = imgui.checkbox(f"Enable {t_name}", val)
if ch:
self.ui_agent_tools[t_name] = val
imgui.end()
# ---- Files
@@ -665,7 +843,50 @@ class App:
if self.show_windows["Discussion History"]:
exp, self.show_windows["Discussion History"] = imgui.begin("Discussion History", self.show_windows["Discussion History"])
if exp:
if imgui.collapsing_header("Discussions", imgui.TreeNodeFlags_.default_open):
# THINKING indicator
is_thinking = self.ai_status in ["sending..."]
if is_thinking:
val = math.sin(time.time() * 10 * math.pi)
alpha = 1.0 if val > 0 else 0.0
imgui.text_colored(imgui.ImVec4(1.0, 0.39, 0.39, alpha), "THINKING...")
imgui.separator()
# Prior session viewing mode
if self.is_viewing_prior_session:
imgui.push_style_color(imgui.Col_.child_bg, vec4(50, 40, 20))
imgui.text_colored(vec4(255, 200, 100), "VIEWING PRIOR SESSION")
imgui.same_line()
if imgui.button("Exit Prior Session"):
self.is_viewing_prior_session = False
self.prior_session_entries.clear()
imgui.separator()
imgui.begin_child("prior_scroll", imgui.ImVec2(0, 0), False)
for idx, entry in enumerate(self.prior_session_entries):
imgui.push_id(f"prior_{idx}")
kind = entry.get("kind", entry.get("type", ""))
imgui.text_colored(C_LBL, f"#{idx+1}")
imgui.same_line()
ts = entry.get("ts", entry.get("timestamp", ""))
if ts:
imgui.text_colored(vec4(160, 160, 160), str(ts))
imgui.same_line()
imgui.text_colored(C_KEY, str(kind))
payload = entry.get("payload", entry)
text = payload.get("text", payload.get("message", payload.get("content", "")))
if text:
preview = str(text).replace("\n", " ")[:200]
if self.ui_word_wrap:
imgui.push_text_wrap_pos(imgui.get_content_region_avail().x)
imgui.text(preview)
imgui.pop_text_wrap_pos()
else:
imgui.text(preview)
imgui.separator()
imgui.pop_id()
imgui.end_child()
imgui.pop_style_color()
if not self.is_viewing_prior_session and imgui.collapsing_header("Discussions", imgui.TreeNodeFlags_.default_open):
names = self._get_discussion_names()
if imgui.begin_combo("##disc_sel", self.active_discussion):
@@ -712,6 +933,7 @@ class App:
if imgui.button("Delete"):
self._delete_discussion(self.active_discussion)
if not self.is_viewing_prior_session:
imgui.separator()
if imgui.button("+ Entry"):
self.disc_entries.append({"role": self.disc_roles[0] if self.disc_roles else "User", "content": "", "collapsed": False, "ts": project_manager.now_ts()})
@@ -731,8 +953,22 @@ class App:
self._flush_to_config()
save_config(self.config)
self.ai_status = "discussion saved"
imgui.same_line()
if imgui.button("Load Log"):
self.cb_load_prior_log()
ch, self.ui_auto_add_history = imgui.checkbox("Auto-add message & response to history", self.ui_auto_add_history)
# Truncation controls
imgui.text("Keep Pairs:")
imgui.same_line()
imgui.set_next_item_width(80)
ch, self.ui_disc_truncate_pairs = imgui.input_int("##trunc_pairs", self.ui_disc_truncate_pairs, 1)
if self.ui_disc_truncate_pairs < 1: self.ui_disc_truncate_pairs = 1
imgui.same_line()
if imgui.button("Truncate"):
self.disc_entries = truncate_entries(self.disc_entries, self.ui_disc_truncate_pairs)
self.ai_status = f"history truncated to {self.ui_disc_truncate_pairs} pairs"
imgui.separator()
if imgui.collapsing_header("Roles"):
@@ -846,12 +1082,32 @@ class App:
ch, self.temperature = imgui.slider_float("Temperature", self.temperature, 0.0, 2.0, "%.2f")
ch, self.max_tokens = imgui.input_int("Max Tokens (Output)", self.max_tokens, 1024)
ch, self.history_trunc_limit = imgui.input_int("History Truncation Limit", self.history_trunc_limit, 1024)
imgui.separator()
imgui.text("Telemetry")
usage = self.session_usage
total = usage["input_tokens"] + usage["output_tokens"]
imgui.text_colored(C_RES, f"Tokens: {total:,} (In: {usage['input_tokens']:,} Out: {usage['output_tokens']:,})")
if usage["cache_read_input_tokens"]:
imgui.text_colored(C_LBL, f" Cache Read: {usage['cache_read_input_tokens']:,} Creation: {usage['cache_creation_input_tokens']:,}")
imgui.text("Token Budget:")
imgui.progress_bar(self._token_budget_pct, imgui.ImVec2(-1, 0), f"{self._token_budget_current:,} / {self._token_budget_limit:,}")
if self._gemini_cache_text:
imgui.text_colored(C_SUB, self._gemini_cache_text)
imgui.end()
# ---- Message
if self.show_windows["Message"]:
exp, self.show_windows["Message"] = imgui.begin("Message", self.show_windows["Message"])
if exp:
# LIVE indicator
is_live = self.ai_status in ["running powershell...", "fetching url...", "searching web...", "powershell done, awaiting AI..."]
if is_live:
val = math.sin(time.time() * 10 * math.pi)
alpha = 1.0 if val > 0 else 0.0
imgui.text_colored(imgui.ImVec4(0.39, 1.0, 0.39, alpha), "LIVE")
imgui.separator()
ch, self.ui_ai_input = imgui.input_text_multiline("##ai_in", self.ui_ai_input, imgui.ImVec2(-1, -40))
imgui.separator()
if imgui.button("Gen + Send"):
@@ -869,14 +1125,20 @@ class App:
base_dir = self.ui_files_base_dir
csp = filter(bool, [self.ui_global_system_prompt.strip(), self.ui_project_system_prompt.strip()])
ai_client.set_custom_system_prompt("\n\n".join(csp))
ai_client.set_model_params(self.temperature, self.max_tokens, self.history_trunc_limit)
ai_client.set_agent_tools(self.ui_agent_tools)
# For Gemini: send stable_md (no history) as cached context,
# and disc_text separately as conversation history.
# For Anthropic: send full md (with history) as before.
send_md = stable_md # No history in cached context for either provider
send_disc = disc_text
def do_send():
if self.ui_auto_add_history:
with self._pending_history_adds_lock:
self._pending_history_adds.append({"role": "User", "content": user_msg, "collapsed": False, "ts": project_manager.now_ts()})
try:
resp = ai_client.send(self.last_md, user_msg, base_dir, self.last_file_items)
resp = ai_client.send(send_md, user_msg, base_dir, self.last_file_items, send_disc)
self.ai_response = resp
self.ai_status = "done"
self._trigger_blink = True
@@ -1178,6 +1440,67 @@ class App:
if ch: theme.set_scale(scale)
imgui.end()
# ---- Diagnostics
if self.show_windows["Diagnostics"]:
exp, self.show_windows["Diagnostics"] = imgui.begin("Diagnostics", self.show_windows["Diagnostics"])
if exp:
now = time.time()
if now - self._perf_last_update >= 0.5:
self._perf_last_update = now
metrics = self.perf_monitor.get_metrics()
self.perf_history["frame_time"].pop(0)
self.perf_history["frame_time"].append(metrics.get("last_frame_time_ms", 0.0))
self.perf_history["fps"].pop(0)
self.perf_history["fps"].append(metrics.get("fps", 0.0))
self.perf_history["cpu"].pop(0)
self.perf_history["cpu"].append(metrics.get("cpu_percent", 0.0))
self.perf_history["input_lag"].pop(0)
self.perf_history["input_lag"].append(metrics.get("input_lag_ms", 0.0))
metrics = self.perf_monitor.get_metrics()
imgui.text("Performance Telemetry")
imgui.separator()
if imgui.begin_table("perf_table", 2, imgui.TableFlags_.borders_inner_h):
imgui.table_setup_column("Metric")
imgui.table_setup_column("Value")
imgui.table_headers_row()
imgui.table_next_row()
imgui.table_next_column()
imgui.text("FPS")
imgui.table_next_column()
imgui.text(f"{metrics.get('fps', 0.0):.1f}")
imgui.table_next_row()
imgui.table_next_column()
imgui.text("Frame Time (ms)")
imgui.table_next_column()
imgui.text(f"{metrics.get('last_frame_time_ms', 0.0):.2f}")
imgui.table_next_row()
imgui.table_next_column()
imgui.text("CPU %")
imgui.table_next_column()
imgui.text(f"{metrics.get('cpu_percent', 0.0):.1f}")
imgui.table_next_row()
imgui.table_next_column()
imgui.text("Input Lag (ms)")
imgui.table_next_column()
imgui.text(f"{metrics.get('input_lag_ms', 0.0):.1f}")
imgui.end_table()
imgui.separator()
imgui.text("Frame Time (ms)")
imgui.plot_lines("##ft_plot", np.array(self.perf_history["frame_time"], dtype=np.float32), overlay_text="frame_time", graph_size=imgui.ImVec2(-1, 60))
imgui.text("CPU %")
imgui.plot_lines("##cpu_plot", np.array(self.perf_history["cpu"], dtype=np.float32), overlay_text="cpu", graph_size=imgui.ImVec2(-1, 60))
imgui.end()
self.perf_monitor.end_frame()
# ---- Modals / Popups
with self._pending_dialog_lock:
dlg = self._pending_dialog
@@ -1296,17 +1619,26 @@ class App:
self.runner_params.app_window_params.window_geometry.size = (1680, 1200)
self.runner_params.imgui_window_params.enable_viewports = True
self.runner_params.imgui_window_params.default_imgui_window_type = hello_imgui.DefaultImGuiWindowType.provide_full_screen_dock_space
self.runner_params.imgui_window_params.show_menu_bar = True
self.runner_params.ini_folder_type = hello_imgui.IniFolderType.current_folder
self.runner_params.ini_filename = "manualslop_layout.ini"
self.runner_params.callbacks.show_gui = self._gui_func
self.runner_params.callbacks.show_menus = self._show_menus
self.runner_params.callbacks.load_additional_fonts = self._load_fonts
self.runner_params.callbacks.post_init = self._post_init
self._fetch_models(self.current_provider)
# Start API hooks server (if enabled)
self.hook_server = api_hooks.HookServer(self)
self.hook_server.start()
immapp.run(self.runner_params)
# On exit
self.hook_server.stop()
self.perf_monitor.stop()
ai_client.cleanup() # Destroy active API caches to stop billing
self._flush_to_project()
self._save_active_project()
+28 -128
View File
File diff suppressed because one or more lines are too long
+69 -63
View File
@@ -8,100 +8,106 @@ Size=400,400
Collapsed=0
[Window][Projects]
Pos=209,396
Size=387,337
ViewportPos=43,95
ViewportId=0x78C57832
Size=897,649
Collapsed=0
DockId=0x00000014,0
DockId=0x0000000D,0
[Window][Files]
Pos=0,0
Size=207,1200
ViewportPos=3125,170
ViewportId=0x26D64416
Size=593,581
Collapsed=0
DockId=0x00000011,0
DockId=0x00000009,0
[Window][Screenshots]
Pos=209,0
Size=387,171
Collapsed=0
DockId=0x00000015,0
[Window][Discussion History]
Pos=598,128
Size=712,619
Collapsed=0
DockId=0x0000000E,0
[Window][Provider]
Pos=209,913
Size=387,287
ViewportPos=3125,170
ViewportId=0x26D64416
Pos=0,583
Size=593,574
Collapsed=0
DockId=0x0000000A,0
[Window][Message]
Pos=598,749
Size=712,451
[Window][Discussion History]
Pos=0,17
Size=1680,730
Collapsed=0
DockId=0x0000000C,0
[Window][Response]
Pos=209,735
Size=387,176
[Window][Provider]
ViewportPos=43,95
ViewportId=0x78C57832
Pos=0,651
Size=897,468
Collapsed=0
DockId=0x00000010,0
DockId=0x0000000E,0
[Window][Message]
Pos=0,749
Size=1680,451
Collapsed=0
DockId=0x0000000F,0
[Window][Response]
Pos=0,749
Size=1680,451
Collapsed=0
DockId=0x0000000F,1
[Window][Tool Calls]
Pos=1312,733
Size=368,144
ViewportPos=43,95
ViewportId=0x78C57832
Pos=0,1121
Size=897,775
Collapsed=0
DockId=0x00000008,0
DockId=0x00000001,1
[Window][Comms History]
Pos=1312,879
Size=368,321
ViewportPos=43,95
ViewportId=0x78C57832
Pos=0,1121
Size=897,775
Collapsed=0
DockId=0x00000006,0
DockId=0x00000001,0
[Window][System Prompts]
Pos=1312,0
Size=368,731
Pos=0,749
Size=1680,451
Collapsed=0
DockId=0x00000007,0
DockId=0x0000000F,2
[Window][Theme]
Pos=209,173
Size=387,221
ViewportPos=43,95
ViewportId=0x78C57832
Size=897,649
Collapsed=0
DockId=0x00000016,0
DockId=0x0000000D,2
[Window][Text Viewer - Entry #7]
Pos=379,324
Size=900,700
Collapsed=0
[Window][Diagnostics]
ViewportPos=43,95
ViewportId=0x78C57832
Size=897,649
Collapsed=0
DockId=0x0000000D,1
[Docking][Data]
DockSpace ID=0xAFC85805 Window=0x079D3A04 Pos=138,161 Size=1680,1200 Split=X
DockNode ID=0x00000011 Parent=0xAFC85805 SizeRef=207,1200 Selected=0x0469CA7A
DockNode ID=0x00000012 Parent=0xAFC85805 SizeRef=1559,1200 Split=X
DockNode ID=0x00000003 Parent=0x00000012 SizeRef=1189,1200 Split=X
DockNode ID=0x00000001 Parent=0x00000003 SizeRef=387,1200 Split=Y Selected=0x8CA2375C
DockNode ID=0x00000009 Parent=0x00000001 SizeRef=405,911 Split=Y Selected=0x8CA2375C
DockNode ID=0x0000000F Parent=0x00000009 SizeRef=405,733 Split=Y Selected=0x8CA2375C
DockNode ID=0x00000013 Parent=0x0000000F SizeRef=405,394 Split=Y Selected=0x8CA2375C
DockNode ID=0x00000015 Parent=0x00000013 SizeRef=405,171 Selected=0xDF822E02
DockNode ID=0x00000016 Parent=0x00000013 SizeRef=405,221 Selected=0x8CA2375C
DockNode ID=0x00000014 Parent=0x0000000F SizeRef=405,337 Selected=0xDA22FEDA
DockNode ID=0x00000010 Parent=0x00000009 SizeRef=405,176 Selected=0x0D5A5273
DockNode ID=0x0000000A Parent=0x00000001 SizeRef=405,287 Selected=0xA07B5F14
DockNode ID=0x00000002 Parent=0x00000003 SizeRef=800,1200 Split=Y
DockNode ID=0x0000000B Parent=0x00000002 SizeRef=1010,747 Split=Y
DockNode ID=0x0000000D Parent=0x0000000B SizeRef=1010,126 CentralNode=1
DockNode ID=0x0000000E Parent=0x0000000B SizeRef=1010,619 Selected=0x5D11106F
DockNode ID=0x0000000C Parent=0x00000002 SizeRef=1010,451 Selected=0x66CFB56E
DockNode ID=0x00000004 Parent=0x00000012 SizeRef=368,1200 Split=Y Selected=0xDD6419BC
DockNode ID=0x00000005 Parent=0x00000004 SizeRef=261,877 Split=Y Selected=0xDD6419BC
DockNode ID=0x00000007 Parent=0x00000005 SizeRef=261,731 Selected=0xDD6419BC
DockNode ID=0x00000008 Parent=0x00000005 SizeRef=261,144 Selected=0x1D56B311
DockNode ID=0x00000006 Parent=0x00000004 SizeRef=261,321 Selected=0x8B4EBFA6
DockNode ID=0x00000007 Pos=43,95 Size=897,1896 Split=Y
DockNode ID=0x00000002 Parent=0x00000007 SizeRef=1029,1119 Split=Y
DockNode ID=0x0000000D Parent=0x00000002 SizeRef=1029,649 Selected=0xB4CBF21A
DockNode ID=0x0000000E Parent=0x00000002 SizeRef=1029,468 Selected=0xA07B5F14
DockNode ID=0x00000001 Parent=0x00000007 SizeRef=1029,775 Selected=0x8B4EBFA6
DockNode ID=0x00000008 Pos=3125,170 Size=593,1157 Split=Y
DockNode ID=0x00000009 Parent=0x00000008 SizeRef=1029,147 Selected=0x0469CA7A
DockNode ID=0x0000000A Parent=0x00000008 SizeRef=1029,145 Selected=0xDF822E02
DockSpace ID=0xAFC85805 Window=0x079D3A04 Pos=1052,572 Size=1680,1183 Split=Y
DockNode ID=0x0000000C Parent=0xAFC85805 SizeRef=1362,1041 CentralNode=1 Selected=0x5D11106F
DockNode ID=0x0000000F Parent=0xAFC85805 SizeRef=1362,451 Selected=0xDD6419BC
;;;<<<Layout_655921752_Default>>>;;;
;;;<<<HelloImGui_Misc>>>;;;
@@ -111,6 +117,6 @@ Name=Default
Show=false
ShowFps=true
[Theme]
Name=DarculaDarker
Name=SoDark_AccentRed
;;;<<<SplitIds>>>;;;
{"gImGuiSplitIDs":{"MainDockSpace":2949142533}}
+5 -2
View File
@@ -4,6 +4,7 @@ git_dir = "C:\\projects\\manual_slop"
system_prompt = ""
main_context = ""
word_wrap = true
summary_only = false
[output]
output_dir = "./md_gen"
@@ -37,5 +38,7 @@ auto_add = true
[discussion.discussions.main]
git_commit = ""
last_updated = "2026-02-23T19:53:17"
history = []
last_updated = "2026-02-23T20:56:57"
history = [
"@2026-02-23T20:12:12\nSystem:\n[PERFORMANCE ALERT] CPU usage high: 121.9%. Please consider optimizing recent changes or reducing load.",
]
+78
View File
@@ -0,0 +1,78 @@
import pytest
from unittest.mock import patch, MagicMock
from gui_2 import App
import ai_client
from events import EventEmitter
@pytest.fixture
def app_instance():
if not hasattr(ai_client, 'events') or ai_client.events is None:
ai_client.events = EventEmitter()
with (
patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}),
patch('gui_2.save_config'),
patch('gui_2.project_manager'),
patch('gui_2.session_logger'),
patch('gui_2.immapp.run'),
patch.object(App, '_load_active_project'),
patch.object(App, '_fetch_models'),
patch.object(App, '_load_fonts'),
patch.object(App, '_post_init')
):
yield App()
def test_mcp_tool_call_is_dispatched(app_instance):
"""
This test verifies that when the AI returns a tool call for an MCP function,
the ai_client correctly dispatches it to mcp_client.
This will fail until mcp_client is properly integrated.
"""
# 1. Define the mock tool call from the AI
mock_fc = MagicMock()
mock_fc.name = "read_file"
mock_fc.args = {"file_path": "test.txt"}
# 2. Construct the mock AI response (Gemini format)
mock_response_with_tool = MagicMock()
mock_part = MagicMock()
mock_part.function_call = mock_fc
mock_candidate = MagicMock()
mock_candidate.content.parts = [mock_part]
mock_candidate.finish_reason.name = "TOOL_CALLING"
mock_response_with_tool.candidates = [mock_candidate]
mock_usage_metadata = MagicMock()
mock_usage_metadata.prompt_token_count = 100
mock_usage_metadata.candidates_token_count = 10
mock_usage_metadata.cached_content_token_count = 0
mock_response_with_tool.usage_metadata = mock_usage_metadata
# 3. Create a mock for the final AI response after the tool call
mock_response_final = MagicMock()
mock_response_final.text = "Final answer"
mock_response_final.candidates = []
mock_response_final.usage_metadata = mock_usage_metadata
# 4. Patch the necessary components
with patch("ai_client._ensure_gemini_client"), \
patch("ai_client._gemini_client"), \
patch("ai_client._gemini_chat") as mock_chat, \
patch('mcp_client.dispatch', return_value="file content") as mock_dispatch:
mock_chat.send_message.side_effect = [mock_response_with_tool, mock_response_final]
ai_client._gemini_chat = mock_chat
ai_client.set_provider("gemini", "mock-model")
# 5. Call the send function
ai_client.send(
md_content="some context",
user_message="read the file",
base_dir=".",
file_items=[],
discussion_history=""
)
# 6. Assert that the MCP dispatch function was called
mock_dispatch.assert_called_once_with("read_file", {"file_path": "test.txt"})