diff --git a/ai_client.py b/ai_client.py
index 4ca350d..89bb000 100644
--- a/ai_client.py
+++ b/ai_client.py
@@ -405,137 +405,55 @@ def _ensure_gemini_client():
def _send_gemini(md_content: str, user_message: str, base_dir: str, file_items: list[dict] | None = None) -> str:
global _gemini_chat
- from google import genai
from google.genai import types
-
try:
- _ensure_gemini_client()
- mcp_client.configure(file_items or [], [base_dir])
+ _ensure_gemini_client(); mcp_client.configure(file_items or [], [base_dir])
+ sys_instr = f"{_get_combined_system_prompt()}\n\n\n{md_content}\n"
+ if not _gemini_chat:
+ _gemini_chat = _gemini_client.chats.create(model=_model, config=types.GenerateContentConfig(system_instruction=sys_instr, tools=[_gemini_tool_declaration()]))
+
+ _append_comms("OUT", "request", {"message": f"[ctx {len(md_content)} + msg {len(user_message)}]"})
+ payload, all_text = user_message, []
+
+ for r_idx in range(MAX_TOOL_ROUNDS + 2):
+ resp = _gemini_chat.send_message(payload)
+ txt = "\n".join(p.text for c in resp.candidates for p in c.content.parts if hasattr(p, "text") and p.text)
+ if txt: all_text.append(txt)
+
+ calls = [p.function_call for c in resp.candidates for p in c.content.parts if hasattr(p, "function_call") and p.function_call]
+ usage = {"input_tokens": getattr(resp.usage_metadata, "prompt_token_count", 0), "output_tokens": getattr(resp.usage_metadata, "candidates_token_count", 0)}
+ reason = resp.candidates[0].finish_reason.name if resp.candidates and hasattr(resp.candidates[0], "finish_reason") else "STOP"
+
+ _append_comms("IN", "response", {"round": r_idx, "stop_reason": reason, "text": txt, "tool_calls": [{"name": c.name, "args": dict(c.args)} for c in calls], "usage": usage})
+ if not calls or r_idx > MAX_TOOL_ROUNDS: break
- system_text = _get_combined_system_prompt() + f"\n\n\n{md_content}\n"
- if _gemini_chat is None:
- _gemini_chat = _gemini_client.chats.create(
- model=_model,
- config=types.GenerateContentConfig(
- system_instruction=system_text,
- tools=[_gemini_tool_declaration()]
- )
- )
- else:
- _gemini_chat = _gemini_client.chats.create(
- model=_model,
- config=types.GenerateContentConfig(
- system_instruction=system_text,
- tools=[_gemini_tool_declaration()]
- ),
- history=_gemini_chat.get_history()
- )
+ f_resps, log = [], []
+ for i, fc in enumerate(calls):
+ name, args = fc.name, dict(fc.args)
+ if name in mcp_client.TOOL_NAMES:
+ _append_comms("OUT", "tool_call", {"name": name, "args": args})
+ out = mcp_client.dispatch(name, args)
+ elif name == TOOL_NAME:
+ scr = args.get("script", "")
+ _append_comms("OUT", "tool_call", {"name": TOOL_NAME, "script": scr})
+ out = _run_script(scr, base_dir)
+ else: out = f"ERROR: unknown tool '{name}'"
- payload_to_send = user_message
-
- _append_comms("OUT", "request", {
- "message": f"[context {len(md_content)} chars + user message {len(user_message)} chars]",
- })
-
- all_text_parts = []
-
- # We allow MAX_TOOL_ROUNDS, plus 1 final loop to get the text synthesis
- for round_idx in range(MAX_TOOL_ROUNDS + 2):
- response = _gemini_chat.send_message(payload_to_send)
-
- text_parts_raw = [
- part.text
- for candidate in response.candidates
- for part in candidate.content.parts
- if hasattr(part, "text") and part.text
- ]
- if text_parts_raw:
- all_text_parts.append("\n".join(text_parts_raw))
-
- tool_calls = [
- part.function_call
- for candidate in response.candidates
- for part in candidate.content.parts
- if hasattr(part, "function_call") and part.function_call is not None
- ]
-
- usage_dict = {}
- if hasattr(response, "usage_metadata") and response.usage_metadata:
- meta = response.usage_metadata
- if hasattr(meta, "prompt_token_count") and meta.prompt_token_count is not None:
- usage_dict["input_tokens"] = meta.prompt_token_count
- if hasattr(meta, "candidates_token_count") and meta.candidates_token_count is not None:
- usage_dict["output_tokens"] = meta.candidates_token_count
- if hasattr(meta, "cached_content_token_count") and meta.cached_content_token_count:
- usage_dict["cache_read_input_tokens"] = meta.cached_content_token_count
-
- stop_reason = ""
- if response.candidates and hasattr(response.candidates[0], "finish_reason"):
- fr = response.candidates[0].finish_reason
- stop_reason = str(fr.name) if hasattr(fr, "name") else str(fr)
-
- _append_comms("IN", "response", {
- "round": round_idx,
- "stop_reason": stop_reason,
- "text": "\n".join(text_parts_raw),
- "tool_calls": [{"name": fc.name, "args": dict(fc.args)} for fc in tool_calls],
- "usage": usage_dict,
- })
-
- if not tool_calls:
- break
-
- if round_idx > MAX_TOOL_ROUNDS:
- # The model ignored the MAX ROUNDS warning and kept calling tools.
- # Force abort to prevent infinite loop.
- break
-
- function_responses = []
- sent_results_log = []
-
- for i, fc in enumerate(tool_calls):
- fc_name = fc.name
- fc_args = dict(fc.args)
-
- if fc_name in mcp_client.TOOL_NAMES:
- _append_comms("OUT", "tool_call", {"name": fc_name, "args": fc_args})
- output = mcp_client.dispatch(fc_name, fc_args)
- _append_comms("IN", "tool_result", {"name": fc_name, "output": output})
- elif fc_name == TOOL_NAME:
- script = fc_args.get("script", "")
- _append_comms("OUT", "tool_call", {"name": TOOL_NAME, "script": script})
- output = _run_script(script, base_dir)
- _append_comms("IN", "tool_result", {"name": TOOL_NAME, "output": output})
- else:
- output = f"ERROR: unknown tool '{fc_name}'"
-
- # Inject dynamic updates directly into the LAST tool's output string.
- # Gemini strictly expects function_responses only, so we piggyback on the string.
- if i == len(tool_calls) - 1:
+ if i == len(calls) - 1:
if file_items:
- file_items = _reread_file_items(file_items)
- refreshed_ctx = _build_file_context_text(file_items)
- if refreshed_ctx:
- output += f"\n\n[SYSTEM: FILES UPDATED — current contents below. Do NOT re-read these files.]\n\n{refreshed_ctx}"
-
- if round_idx == MAX_TOOL_ROUNDS:
- output += "\n\n[SYSTEM WARNING: MAX TOOL ROUNDS REACHED. YOU MUST PROVIDE YOUR FINAL ANSWER NOW WITHOUT CALLING ANY MORE TOOLS.]"
+ ctx = _build_file_context_text(_reread_file_items(file_items))
+ if ctx: out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
+ if r_idx == MAX_TOOL_ROUNDS: out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
+
+ f_resps.append(types.Part.from_function_response(name=name, response={"output": out}))
+ log.append({"tool_use_id": name, "content": out})
+
+ _append_comms("OUT", "tool_result_send", {"results": log})
+ payload = f_resps
+
+ return "\n\n".join(all_text) if all_text else "(No text returned)"
+ except Exception as e: raise _classify_gemini_error(e) from e
- function_responses.append(
- types.Part.from_function_response(name=fc_name, response={"output": output})
- )
- sent_results_log.append({"tool_use_id": fc_name, "content": output})
-
- _append_comms("OUT", "tool_result_send", {"results": sent_results_log})
- payload_to_send = function_responses
-
- final_text = "\n\n".join(all_text_parts)
- return final_text if final_text.strip() else "(No text returned by the model)"
-
- except ProviderError:
- raise
- except Exception as exc:
- raise _classify_gemini_error(exc) from exc
diff --git a/config.toml b/config.toml
index 2971b8f..7b3b0d1 100644
--- a/config.toml
+++ b/config.toml
@@ -1,6 +1,6 @@
[ai]
provider = "gemini"
-model = "gemini-3-flash-preview"
+model = "gemini-3.1-pro-preview"
system_prompt = "DO NOT EVER make a shell script unless told to. DO NOT EVER make a readme or a file describing your changes unless your are told to. If you have commands I should be entering into the command line or if you have something to explain to me, please just use code blocks or normal text output. DO NOT DO ANYTHING OTHER THAN WHAT YOU WERE TOLD TODO. DO NOT EVER, EVER DO ANYTHING OTHER THAN WHAT YOU WERE TOLD TO DO. IF YOU WANT TO DO OTHER THINGS, SIMPLY SUGGEST THEM, AND THEN I WILL REVIEW YOUR CHANGES, AND MAKE THE DECISION ON HOW TO PROCEED. WHEN WRITING SCRIPTS USE A 120-160 character limit per line. I don't want to see scrunched code.\n"
[theme]
diff --git a/gui_2.py b/gui_2.py
new file mode 100644
index 0000000..a09cf80
--- /dev/null
+++ b/gui_2.py
@@ -0,0 +1,1277 @@
+# gui_2.py
+import tomli_w
+import threading
+import time
+import math
+import json
+from pathlib import Path
+from tkinter import filedialog, Tk
+import aggregate
+import ai_client
+from ai_client import ProviderError
+import shell_runner
+import session_logger
+import project_manager
+import theme_2 as theme
+import tomllib
+
+from imgui_bundle import imgui, hello_imgui, immapp
+
+CONFIG_PATH = Path("config.toml")
+PROVIDERS = ["gemini", "anthropic"]
+COMMS_CLAMP_CHARS = 300
+
+def load_config() -> dict:
+ with open(CONFIG_PATH, "rb") as f:
+ return tomllib.load(f)
+
+def save_config(config: dict):
+ with open(CONFIG_PATH, "wb") as f:
+ tomli_w.dump(config, f)
+
+def hide_tk_root() -> Tk:
+ root = Tk()
+ root.withdraw()
+ root.wm_attributes("-topmost", True)
+ return root
+
+# Color Helpers
+def vec4(r, g, b, a=1.0): return imgui.ImVec4(r/255, g/255, b/255, a)
+
+C_OUT = vec4(100, 200, 255)
+C_IN = vec4(140, 255, 160)
+C_REQ = vec4(255, 220, 100)
+C_RES = vec4(180, 255, 180)
+C_TC = vec4(255, 180, 80)
+C_TR = vec4(180, 220, 255)
+C_TRS = vec4(200, 180, 255)
+C_LBL = vec4(180, 180, 180)
+C_VAL = vec4(220, 220, 220)
+C_KEY = vec4(140, 200, 255)
+C_NUM = vec4(180, 255, 180)
+C_SUB = vec4(220, 200, 120)
+
+DIR_COLORS = {"OUT": C_OUT, "IN": C_IN}
+KIND_COLORS = {"request": C_REQ, "response": C_RES, "tool_call": C_TC, "tool_result": C_TR, "tool_result_send": C_TRS}
+HEAVY_KEYS = {"message", "text", "script", "output", "content"}
+
+DISC_ROLES = ["User", "AI", "Vendor API", "System"]
+
+def _parse_history_entries(history: list[str], roles: list[str] | None = None) -> list[dict]:
+ known = roles if roles is not None else DISC_ROLES
+ entries = []
+ for raw in history:
+ entries.append(project_manager.str_to_entry(raw, known))
+ return entries
+
+class ConfirmDialog:
+ _next_id = 0
+ def __init__(self, script: str, base_dir: str):
+ ConfirmDialog._next_id += 1
+ self._uid = ConfirmDialog._next_id
+ self._script = str(script) if script is not None else ""
+ self._base_dir = str(base_dir) if base_dir is not None else ""
+ self._event = threading.Event()
+ self._approved = False
+
+ def wait(self) -> tuple[bool, str]:
+ self._event.wait()
+ return self._approved, self._script
+
+class App:
+ def __init__(self):
+ self.config = load_config()
+
+ ai_cfg = self.config.get("ai", {})
+ self.current_provider: str = ai_cfg.get("provider", "gemini")
+ self.current_model: str = ai_cfg.get("model", "gemini-2.0-flash")
+ self.available_models: list[str] = []
+
+ projects_cfg = self.config.get("projects", {})
+ self.project_paths: list[str] = list(projects_cfg.get("paths", []))
+ self.active_project_path: str = projects_cfg.get("active", "")
+
+ self.project: dict = {}
+ self.active_discussion: str = "main"
+
+ self._load_active_project()
+
+ # Project-derived state
+ self.files: list[str] = list(self.project.get("files", {}).get("paths", []))
+ self.screenshots: list[str] = list(self.project.get("screenshots", {}).get("paths", []))
+
+ disc_sec = self.project.get("discussion", {})
+ self.disc_roles: list[str] = list(disc_sec.get("roles", list(DISC_ROLES)))
+ self.active_discussion = disc_sec.get("active", "main")
+
+ disc_data = disc_sec.get("discussions", {}).get(self.active_discussion, {})
+ self.disc_entries: list[dict] = _parse_history_entries(disc_data.get("history", []), self.disc_roles)
+
+ # UI State Variables
+ self.ui_output_dir = self.project.get("output", {}).get("output_dir", "./md_gen")
+ self.ui_files_base_dir = self.project.get("files", {}).get("base_dir", ".")
+ self.ui_shots_base_dir = self.project.get("screenshots", {}).get("base_dir", ".")
+ proj_meta = self.project.get("project", {})
+ self.ui_project_git_dir = proj_meta.get("git_dir", "")
+ self.ui_project_main_context = proj_meta.get("main_context", "")
+ self.ui_project_system_prompt = proj_meta.get("system_prompt", "")
+ self.ui_word_wrap = proj_meta.get("word_wrap", True)
+ self.ui_auto_add_history = disc_sec.get("auto_add", False)
+
+ self.ui_global_system_prompt = self.config.get("ai", {}).get("system_prompt", "")
+ self.ui_ai_input = ""
+ self.ui_disc_new_name_input = ""
+ self.ui_disc_new_role_input = ""
+
+ # Last Script popup variables
+ self.ui_last_script_text = ""
+ self.ui_last_script_output = ""
+
+ self.ai_status = "idle"
+ self.ai_response = ""
+ self.last_md = ""
+ self.last_md_path: Path | None = None
+ self.last_file_items: list = []
+
+ self.send_thread: threading.Thread | None = None
+ self.models_thread: threading.Thread | None = None
+
+ self.show_windows = {
+ "Projects": True,
+ "Files": True,
+ "Screenshots": True,
+ "Discussion History": True,
+ "Provider": True,
+ "Message": True,
+ "Response": True,
+ "Tool Calls": True,
+ "Comms History": True,
+ "System Prompts": True,
+ "Theme": True,
+ }
+ self.show_script_output = False
+ self.show_text_viewer = False
+ self.text_viewer_title = ""
+ self.text_viewer_content = ""
+
+ self._pending_dialog: ConfirmDialog | None = None
+ self._pending_dialog_open = False
+ self._pending_dialog_lock = threading.Lock()
+
+ self._tool_log: list[tuple[str, str]] = []
+ self._comms_log: list[dict] = []
+
+ self._pending_comms: list[dict] = []
+ self._pending_comms_lock = threading.Lock()
+
+ self._pending_history_adds: list[dict] = []
+ self._pending_history_adds_lock = threading.Lock()
+
+ # Blinking
+ self._trigger_blink = False
+ self._is_blinking = False
+ self._blink_start_time = 0.0
+
+ self._trigger_script_blink = False
+ self._is_script_blinking = False
+ self._script_blink_start_time = 0.0
+
+ session_logger.open_session()
+ ai_client.set_provider(self.current_provider, self.current_model)
+ ai_client.confirm_and_run_callback = self._confirm_and_run
+ ai_client.comms_log_callback = self._on_comms_entry
+ ai_client.tool_log_callback = self._on_tool_log
+
+ # ---------------------------------------------------------------- project loading
+
+ def _load_active_project(self):
+ if self.active_project_path and Path(self.active_project_path).exists():
+ try:
+ self.project = project_manager.load_project(self.active_project_path)
+ return
+ except Exception as e:
+ print(f"Failed to load project {self.active_project_path}: {e}")
+
+ for pp in self.project_paths:
+ if Path(pp).exists():
+ try:
+ self.project = project_manager.load_project(pp)
+ self.active_project_path = pp
+ return
+ except Exception:
+ continue
+
+ self.project = project_manager.migrate_from_legacy_config(self.config)
+ name = self.project.get("project", {}).get("name", "project")
+ fallback_path = f"{name}.toml"
+ project_manager.save_project(self.project, fallback_path)
+ self.active_project_path = fallback_path
+ if fallback_path not in self.project_paths:
+ self.project_paths.append(fallback_path)
+
+ def _switch_project(self, path: str):
+ if not Path(path).exists():
+ self.ai_status = f"project file not found: {path}"
+ return
+
+ self._flush_to_project()
+ self._save_active_project()
+
+ try:
+ self.project = project_manager.load_project(path)
+ self.active_project_path = path
+ except Exception as e:
+ self.ai_status = f"failed to load project: {e}"
+ return
+
+ self._refresh_from_project()
+ ai_client.reset_session()
+ self.ai_status = f"switched to: {Path(path).stem}"
+
+ def _refresh_from_project(self):
+ self.files = list(self.project.get("files", {}).get("paths", []))
+ self.screenshots = list(self.project.get("screenshots", {}).get("paths", []))
+
+ disc_sec = self.project.get("discussion", {})
+ self.disc_roles = list(disc_sec.get("roles", list(DISC_ROLES)))
+ self.active_discussion = disc_sec.get("active", "main")
+
+ disc_data = disc_sec.get("discussions", {}).get(self.active_discussion, {})
+ self.disc_entries = _parse_history_entries(disc_data.get("history", []), self.disc_roles)
+
+ proj = self.project
+ self.ui_output_dir = proj.get("output", {}).get("output_dir", "./md_gen")
+ self.ui_files_base_dir = proj.get("files", {}).get("base_dir", ".")
+ self.ui_shots_base_dir = proj.get("screenshots", {}).get("base_dir", ".")
+ self.ui_project_git_dir = proj.get("project", {}).get("git_dir", "")
+ self.ui_project_system_prompt = proj.get("project", {}).get("system_prompt", "")
+ self.ui_project_main_context = proj.get("project", {}).get("main_context", "")
+ self.ui_auto_add_history = proj.get("discussion", {}).get("auto_add", False)
+ self.ui_word_wrap = proj.get("project", {}).get("word_wrap", True)
+
+ def _save_active_project(self):
+ if self.active_project_path:
+ try:
+ project_manager.save_project(self.project, self.active_project_path)
+ except Exception as e:
+ self.ai_status = f"save error: {e}"
+
+ # ---------------------------------------------------------------- discussion management
+
+ def _get_discussion_names(self) -> list[str]:
+ disc_sec = self.project.get("discussion", {})
+ discussions = disc_sec.get("discussions", {})
+ return sorted(discussions.keys())
+
+ def _switch_discussion(self, name: str):
+ self._flush_disc_entries_to_project()
+
+ disc_sec = self.project.get("discussion", {})
+ discussions = disc_sec.get("discussions", {})
+ if name not in discussions:
+ self.ai_status = f"discussion not found: {name}"
+ return
+
+ self.active_discussion = name
+ disc_sec["active"] = name
+
+ disc_data = discussions[name]
+ self.disc_entries = _parse_history_entries(disc_data.get("history", []), self.disc_roles)
+ self.ai_status = f"discussion: {name}"
+
+ def _flush_disc_entries_to_project(self):
+ history_strings = [project_manager.entry_to_str(e) for e in self.disc_entries]
+ disc_sec = self.project.setdefault("discussion", {})
+ discussions = disc_sec.setdefault("discussions", {})
+ disc_data = discussions.setdefault(self.active_discussion, project_manager.default_discussion())
+ disc_data["history"] = history_strings
+ disc_data["last_updated"] = project_manager.now_ts()
+
+ def _create_discussion(self, name: str):
+ disc_sec = self.project.setdefault("discussion", {})
+ discussions = disc_sec.setdefault("discussions", {})
+ if name in discussions:
+ self.ai_status = f"discussion '{name}' already exists"
+ return
+ discussions[name] = project_manager.default_discussion()
+ self._switch_discussion(name)
+
+ def _rename_discussion(self, old_name: str, new_name: str):
+ disc_sec = self.project.get("discussion", {})
+ discussions = disc_sec.get("discussions", {})
+ if old_name not in discussions:
+ return
+ if new_name in discussions:
+ self.ai_status = f"discussion '{new_name}' already exists"
+ return
+ discussions[new_name] = discussions.pop(old_name)
+ if self.active_discussion == old_name:
+ self.active_discussion = new_name
+ disc_sec["active"] = new_name
+
+ def _delete_discussion(self, name: str):
+ disc_sec = self.project.get("discussion", {})
+ discussions = disc_sec.get("discussions", {})
+ if len(discussions) <= 1:
+ self.ai_status = "cannot delete the last discussion"
+ return
+ if name not in discussions:
+ return
+ del discussions[name]
+ if self.active_discussion == name:
+ remaining = sorted(discussions.keys())
+ self._switch_discussion(remaining[0])
+
+ # ---------------------------------------------------------------- logic
+
+ def _on_comms_entry(self, entry: dict):
+ session_logger.log_comms(entry)
+ with self._pending_comms_lock:
+ self._pending_comms.append(entry)
+
+ def _on_tool_log(self, script: str, result: str):
+ session_logger.log_tool_call(script, result, None)
+
+ def _confirm_and_run(self, script: str, base_dir: str) -> str | None:
+ dialog = ConfirmDialog(script, base_dir)
+ with self._pending_dialog_lock:
+ self._pending_dialog = dialog
+
+ approved, final_script = dialog.wait()
+ if not approved:
+ self._append_tool_log(final_script, "REJECTED by user")
+ return None
+
+ self.ai_status = "running powershell..."
+ output = shell_runner.run_powershell(final_script, base_dir)
+ self._append_tool_log(final_script, output)
+ self.ai_status = "powershell done, awaiting AI..."
+ return output
+
+ def _append_tool_log(self, script: str, result: str):
+ self._tool_log.append((script, result))
+ self.ui_last_script_text = script
+ self.ui_last_script_output = result
+ self._trigger_script_blink = True
+ self.show_script_output = True
+
+ def _flush_to_project(self):
+ proj = self.project
+ proj.setdefault("output", {})["output_dir"] = self.ui_output_dir
+ proj.setdefault("files", {})["base_dir"] = self.ui_files_base_dir
+ proj["files"]["paths"] = self.files
+ proj.setdefault("screenshots", {})["base_dir"] = self.ui_shots_base_dir
+ proj["screenshots"]["paths"] = self.screenshots
+
+ proj.setdefault("project", {})
+ proj["project"]["git_dir"] = self.ui_project_git_dir
+ proj["project"]["system_prompt"] = self.ui_project_system_prompt
+ proj["project"]["main_context"] = self.ui_project_main_context
+ proj["project"]["word_wrap"] = self.ui_word_wrap
+
+ self._flush_disc_entries_to_project()
+ disc_sec = proj.setdefault("discussion", {})
+ disc_sec["roles"] = self.disc_roles
+ disc_sec["active"] = self.active_discussion
+ disc_sec["auto_add"] = self.ui_auto_add_history
+
+ def _flush_to_config(self):
+ self.config["ai"] = {"provider": self.current_provider, "model": self.current_model}
+ self.config["ai"]["system_prompt"] = self.ui_global_system_prompt
+ self.config["projects"] = {"paths": self.project_paths, "active": self.active_project_path}
+ theme.save_to_config(self.config)
+
+ def _do_generate(self) -> tuple[str, Path, list]:
+ self._flush_to_project()
+ self._save_active_project()
+ self._flush_to_config()
+ save_config(self.config)
+ flat = project_manager.flat_config(self.project, self.active_discussion)
+ return aggregate.run(flat)
+
+ def _fetch_models(self, provider: str):
+ self.ai_status = "fetching models..."
+ def do_fetch():
+ try:
+ models = ai_client.list_models(provider)
+ self.available_models = models
+ if self.current_model not in models and models:
+ self.current_model = models[0]
+ ai_client.set_provider(self.current_provider, self.current_model)
+ self.ai_status = f"models loaded: {len(models)}"
+ except Exception as e:
+ self.ai_status = f"model fetch error: {e}"
+ self.models_thread = threading.Thread(target=do_fetch, daemon=True)
+ self.models_thread.start()
+
+ # ---------------------------------------------------------------- helpers
+
+ def _render_text_viewer(self, label: str, content: str):
+ if imgui.button("[+]##" + str(id(content))):
+ self.show_text_viewer = True
+ self.text_viewer_title = label
+ self.text_viewer_content = content
+
+ def _render_heavy_text(self, label: str, content: str):
+ imgui.text_colored(C_LBL, f"{label}:")
+ imgui.same_line()
+ self._render_text_viewer(label, content)
+ if len(content) > COMMS_CLAMP_CHARS:
+ if self.ui_word_wrap:
+ imgui.push_text_wrap_pos(imgui.get_content_region_avail().x)
+ imgui.text(content)
+ imgui.pop_text_wrap_pos()
+ else:
+ imgui.input_text_multiline(f"##{id(content)}", content, imgui.ImVec2(-1, 80), imgui.InputTextFlags_.read_only)
+ else:
+ if self.ui_word_wrap:
+ imgui.push_text_wrap_pos(imgui.get_content_region_avail().x)
+ imgui.text(content if content else "(empty)")
+ imgui.pop_text_wrap_pos()
+ else:
+ imgui.text(content if content else "(empty)")
+
+ # ---------------------------------------------------------------- gui
+
+ def _gui_func(self):
+ # Sync pending comms
+ with self._pending_comms_lock:
+ for c in self._pending_comms:
+ self._comms_log.append(c)
+ self._pending_comms.clear()
+
+ with self._pending_history_adds_lock:
+ for item in self._pending_history_adds:
+ if item["role"] not in self.disc_roles:
+ self.disc_roles.append(item["role"])
+ self.disc_entries.append(item)
+ self._pending_history_adds.clear()
+
+ if imgui.begin_main_menu_bar():
+ if imgui.begin_menu("Windows"):
+ for w in self.show_windows.keys():
+ _, self.show_windows[w] = imgui.menu_item(w, "", self.show_windows[w])
+ imgui.end_menu()
+ if imgui.begin_menu("Project"):
+ if imgui.menu_item("Save All")[0]:
+ self._flush_to_project()
+ self._save_active_project()
+ self._flush_to_config()
+ save_config(self.config)
+ self.ai_status = "config saved"
+ if imgui.menu_item("Reset Session")[0]:
+ ai_client.reset_session()
+ ai_client.clear_comms_log()
+ self._tool_log.clear()
+ self._comms_log.clear()
+ self.ai_status = "session reset"
+ self.ai_response = ""
+ if imgui.menu_item("Generate MD Only")[0]:
+ try:
+ md, path, _ = self._do_generate()
+ self.last_md = md
+ self.last_md_path = path
+ self.ai_status = f"md written: {path.name}"
+ except Exception as e:
+ self.ai_status = f"error: {e}"
+ imgui.end_menu()
+ imgui.end_main_menu_bar()
+
+ # ---- Projects
+ if self.show_windows["Projects"]:
+ exp, self.show_windows["Projects"] = imgui.begin("Projects", self.show_windows["Projects"])
+ if exp:
+ proj_name = self.project.get("project", {}).get("name", Path(self.active_project_path).stem)
+ imgui.text_colored(C_IN, f"Active: {proj_name}")
+ imgui.separator()
+ imgui.text("Git Directory")
+ ch, self.ui_project_git_dir = imgui.input_text("##git_dir", self.ui_project_git_dir)
+ imgui.same_line()
+ if imgui.button("Browse##git"):
+ r = hide_tk_root()
+ d = filedialog.askdirectory(title="Select Git Directory")
+ r.destroy()
+ if d: self.ui_project_git_dir = d
+
+ imgui.separator()
+ imgui.text("Main Context File")
+ ch, self.ui_project_main_context = imgui.input_text("##main_ctx", self.ui_project_main_context)
+ imgui.same_line()
+ if imgui.button("Browse##ctx"):
+ r = hide_tk_root()
+ p = filedialog.askopenfilename(title="Select Main Context File")
+ r.destroy()
+ if p: self.ui_project_main_context = p
+
+ imgui.separator()
+ imgui.text("Output Dir")
+ ch, self.ui_output_dir = imgui.input_text("##out_dir", self.ui_output_dir)
+ imgui.same_line()
+ if imgui.button("Browse##out"):
+ r = hide_tk_root()
+ d = filedialog.askdirectory(title="Select Output Dir")
+ r.destroy()
+ if d: self.ui_output_dir = d
+
+ imgui.separator()
+ imgui.text("Project Files")
+ imgui.begin_child("proj_files", imgui.ImVec2(0, 150), True)
+ for i, pp in enumerate(self.project_paths):
+ is_active = (pp == self.active_project_path)
+ if imgui.button(f"x##p{i}"):
+ removed = self.project_paths.pop(i)
+ if removed == self.active_project_path and self.project_paths:
+ self._switch_project(self.project_paths[0])
+ break
+ imgui.same_line()
+ marker = " *" if is_active else ""
+ if is_active: imgui.push_style_color(imgui.Col_.text, C_IN)
+ if imgui.button(f"{Path(pp).stem}{marker}##ps{i}"):
+ self._switch_project(pp)
+ if is_active: imgui.pop_style_color()
+ imgui.same_line()
+ imgui.text_colored(C_LBL, pp)
+ imgui.end_child()
+
+ if imgui.button("Add Project"):
+ r = hide_tk_root()
+ p = filedialog.askopenfilename(title="Select Project .toml", filetypes=[("TOML", "*.toml"), ("All", "*.*")])
+ r.destroy()
+ if p and p not in self.project_paths:
+ self.project_paths.append(p)
+ imgui.same_line()
+ if imgui.button("New Project"):
+ r = hide_tk_root()
+ p = filedialog.asksaveasfilename(title="Create New Project .toml", defaultextension=".toml", filetypes=[("TOML", "*.toml"), ("All", "*.*")])
+ r.destroy()
+ if p:
+ name = Path(p).stem
+ proj = project_manager.default_project(name)
+ project_manager.save_project(proj, p)
+ if p not in self.project_paths:
+ self.project_paths.append(p)
+ self._switch_project(p)
+ imgui.same_line()
+ if imgui.button("Save All"):
+ self._flush_to_project()
+ self._save_active_project()
+ self._flush_to_config()
+ save_config(self.config)
+ self.ai_status = "config saved"
+
+ ch, self.ui_word_wrap = imgui.checkbox("Word-Wrap (Read-only panels)", self.ui_word_wrap)
+ imgui.end()
+
+ # ---- Files
+ if self.show_windows["Files"]:
+ exp, self.show_windows["Files"] = imgui.begin("Files", self.show_windows["Files"])
+ if exp:
+ imgui.text("Base Dir")
+ ch, self.ui_files_base_dir = imgui.input_text("##f_base", self.ui_files_base_dir)
+ imgui.same_line()
+ if imgui.button("Browse##fb"):
+ r = hide_tk_root()
+ d = filedialog.askdirectory()
+ r.destroy()
+ if d: self.ui_files_base_dir = d
+
+ imgui.separator()
+ imgui.text("Paths")
+ imgui.begin_child("f_paths", imgui.ImVec2(0, -40), True)
+ for i, f in enumerate(self.files):
+ if imgui.button(f"x##f{i}"):
+ self.files.pop(i)
+ break
+ imgui.same_line()
+ imgui.text(f)
+ imgui.end_child()
+
+ if imgui.button("Add File(s)"):
+ r = hide_tk_root()
+ paths = filedialog.askopenfilenames()
+ r.destroy()
+ for p in paths:
+ if p not in self.files: self.files.append(p)
+ imgui.same_line()
+ if imgui.button("Add Wildcard"):
+ r = hide_tk_root()
+ d = filedialog.askdirectory()
+ r.destroy()
+ if d: self.files.append(str(Path(d) / "**" / "*"))
+ imgui.end()
+
+ # ---- Screenshots
+ if self.show_windows["Screenshots"]:
+ exp, self.show_windows["Screenshots"] = imgui.begin("Screenshots", self.show_windows["Screenshots"])
+ if exp:
+ imgui.text("Base Dir")
+ ch, self.ui_shots_base_dir = imgui.input_text("##s_base", self.ui_shots_base_dir)
+ imgui.same_line()
+ if imgui.button("Browse##sb"):
+ r = hide_tk_root()
+ d = filedialog.askdirectory()
+ r.destroy()
+ if d: self.ui_shots_base_dir = d
+
+ imgui.separator()
+ imgui.text("Paths")
+ imgui.begin_child("s_paths", imgui.ImVec2(0, -40), True)
+ for i, s in enumerate(self.screenshots):
+ if imgui.button(f"x##s{i}"):
+ self.screenshots.pop(i)
+ break
+ imgui.same_line()
+ imgui.text(s)
+ imgui.end_child()
+
+ if imgui.button("Add Screenshot(s)"):
+ r = hide_tk_root()
+ paths = filedialog.askopenfilenames()
+ r.destroy()
+ for p in paths:
+ if p not in self.screenshots: self.screenshots.append(p)
+ imgui.end()
+
+ # ---- Discussion History
+ if self.show_windows["Discussion History"]:
+ exp, self.show_windows["Discussion History"] = imgui.begin("Discussion History", self.show_windows["Discussion History"])
+ if exp:
+ if imgui.collapsing_header("Discussions", imgui.TreeNodeFlags_.default_open):
+ names = self._get_discussion_names()
+
+ if imgui.begin_combo("##disc_sel", self.active_discussion):
+ for name in names:
+ is_selected = (name == self.active_discussion)
+ if imgui.selectable(name, is_selected)[0]:
+ self._switch_discussion(name)
+ if is_selected:
+ imgui.set_item_default_focus()
+ imgui.end_combo()
+
+ disc_sec = self.project.get("discussion", {})
+ disc_data = disc_sec.get("discussions", {}).get(self.active_discussion, {})
+ git_commit = disc_data.get("git_commit", "")
+ last_updated = disc_data.get("last_updated", "")
+
+ imgui.text_colored(C_LBL, "commit:")
+ imgui.same_line()
+ imgui.text_colored(C_IN if git_commit else C_LBL, git_commit[:12] if git_commit else "(none)")
+ imgui.same_line()
+ if imgui.button("Update Commit"):
+ git_dir = self.ui_project_git_dir
+ if git_dir:
+ cmt = project_manager.get_git_commit(git_dir)
+ if cmt:
+ disc_data["git_commit"] = cmt
+ disc_data["last_updated"] = project_manager.now_ts()
+ self.ai_status = f"commit: {cmt[:12]}"
+
+ imgui.text_colored(C_LBL, "updated:")
+ imgui.same_line()
+ imgui.text_colored(C_SUB, last_updated if last_updated else "(never)")
+
+ ch, self.ui_disc_new_name_input = imgui.input_text("##new_disc", self.ui_disc_new_name_input)
+ imgui.same_line()
+ if imgui.button("Create"):
+ nm = self.ui_disc_new_name_input.strip()
+ if nm: self._create_discussion(nm); self.ui_disc_new_name_input = ""
+ imgui.same_line()
+ if imgui.button("Rename"):
+ nm = self.ui_disc_new_name_input.strip()
+ if nm: self._rename_discussion(self.active_discussion, nm); self.ui_disc_new_name_input = ""
+ imgui.same_line()
+ if imgui.button("Delete"):
+ self._delete_discussion(self.active_discussion)
+
+ imgui.separator()
+ if imgui.button("+ Entry"):
+ self.disc_entries.append({"role": self.disc_roles[0] if self.disc_roles else "User", "content": "", "collapsed": False, "ts": project_manager.now_ts()})
+ imgui.same_line()
+ if imgui.button("-All"):
+ for e in self.disc_entries: e["collapsed"] = True
+ imgui.same_line()
+ if imgui.button("+All"):
+ for e in self.disc_entries: e["collapsed"] = False
+ imgui.same_line()
+ if imgui.button("Clear All"):
+ self.disc_entries.clear()
+ imgui.same_line()
+ if imgui.button("Save"):
+ self._flush_to_project()
+ self._save_active_project()
+ self._flush_to_config()
+ save_config(self.config)
+ self.ai_status = "discussion saved"
+
+ ch, self.ui_auto_add_history = imgui.checkbox("Auto-add message & response to history", self.ui_auto_add_history)
+ imgui.separator()
+
+ if imgui.collapsing_header("Roles"):
+ imgui.begin_child("roles_scroll", imgui.ImVec2(0, 100), True)
+ for i, r in enumerate(self.disc_roles):
+ if imgui.button(f"x##r{i}"):
+ self.disc_roles.pop(i)
+ break
+ imgui.same_line()
+ imgui.text(r)
+ imgui.end_child()
+ ch, self.ui_disc_new_role_input = imgui.input_text("##new_role", self.ui_disc_new_role_input)
+ imgui.same_line()
+ if imgui.button("Add"):
+ r = self.ui_disc_new_role_input.strip()
+ if r and r not in self.disc_roles:
+ self.disc_roles.append(r)
+ self.ui_disc_new_role_input = ""
+
+ imgui.separator()
+ imgui.begin_child("disc_scroll", imgui.ImVec2(0, 0), False)
+ for i, entry in enumerate(self.disc_entries):
+ imgui.push_id(str(i))
+ collapsed = entry.get("collapsed", False)
+ read_mode = entry.get("read_mode", False)
+
+ if imgui.button("+" if collapsed else "-"):
+ entry["collapsed"] = not collapsed
+ imgui.same_line()
+
+ imgui.set_next_item_width(120)
+ if imgui.begin_combo("##role", entry["role"]):
+ for r in self.disc_roles:
+ if imgui.selectable(r, r == entry["role"])[0]:
+ entry["role"] = r
+ imgui.end_combo()
+
+ if not collapsed:
+ imgui.same_line()
+ if imgui.button("[Edit]" if read_mode else "[Read]"):
+ entry["read_mode"] = not read_mode
+
+ ts_str = entry.get("ts", "")
+ if ts_str:
+ imgui.same_line()
+ imgui.text_colored(vec4(120, 120, 100), ts_str)
+
+ if collapsed:
+ imgui.same_line()
+ if imgui.button("Ins"):
+ self.disc_entries.insert(i, {"role": "User", "content": "", "collapsed": False, "ts": project_manager.now_ts()})
+ imgui.same_line()
+ self._render_text_viewer(f"Entry #{i+1}", entry["content"])
+ imgui.same_line()
+ if imgui.button("Del"):
+ self.disc_entries.pop(i)
+ imgui.pop_id()
+ break
+ imgui.same_line()
+ preview = entry["content"].replace("\n", " ")[:60]
+ if len(entry["content"]) > 60: preview += "..."
+ imgui.text_colored(vec4(160, 160, 150), preview)
+
+ if not collapsed:
+ if read_mode:
+ imgui.begin_child("read_content", imgui.ImVec2(0, 150), True)
+ if self.ui_word_wrap: imgui.push_text_wrap_pos(imgui.get_content_region_avail().x)
+ imgui.text(entry["content"])
+ if self.ui_word_wrap: imgui.pop_text_wrap_pos()
+ imgui.end_child()
+ else:
+ ch, entry["content"] = imgui.input_text_multiline("##content", entry["content"], imgui.ImVec2(-1, 150))
+
+ imgui.separator()
+ imgui.pop_id()
+ imgui.end_child()
+ imgui.end()
+
+ # ---- Provider
+ if self.show_windows["Provider"]:
+ exp, self.show_windows["Provider"] = imgui.begin("Provider", self.show_windows["Provider"])
+ if exp:
+ imgui.text("Provider")
+ if imgui.begin_combo("##prov", self.current_provider):
+ for p in PROVIDERS:
+ if imgui.selectable(p, p == self.current_provider)[0]:
+ self.current_provider = p
+ ai_client.reset_session()
+ ai_client.set_provider(p, self.current_model)
+ self.available_models = []
+ self._fetch_models(p)
+ imgui.end_combo()
+ imgui.separator()
+ imgui.text("Model")
+ imgui.same_line()
+ if imgui.button("Fetch Models"):
+ self._fetch_models(self.current_provider)
+
+ if imgui.begin_list_box("##models", imgui.ImVec2(-1, 120)):
+ for m in self.available_models:
+ if imgui.selectable(m, m == self.current_model)[0]:
+ self.current_model = m
+ ai_client.reset_session()
+ ai_client.set_provider(self.current_provider, m)
+ imgui.end_list_box()
+ imgui.end()
+
+ # ---- Message
+ if self.show_windows["Message"]:
+ exp, self.show_windows["Message"] = imgui.begin("Message", self.show_windows["Message"])
+ if exp:
+ ch, self.ui_ai_input = imgui.input_text_multiline("##ai_in", self.ui_ai_input, imgui.ImVec2(-1, -40))
+ imgui.separator()
+ if imgui.button("Gen + Send"):
+ if not (self.send_thread and self.send_thread.is_alive()):
+ try:
+ md, path, file_items = self._do_generate()
+ self.last_md = md
+ self.last_md_path = path
+ self.last_file_items = file_items
+ except Exception as e:
+ self.ai_status = f"generate error: {e}"
+ else:
+ self.ai_status = "sending..."
+ user_msg = self.ui_ai_input
+ base_dir = self.ui_files_base_dir
+ csp = filter(bool, [self.ui_global_system_prompt.strip(), self.ui_project_system_prompt.strip()])
+ ai_client.set_custom_system_prompt("\n\n".join(csp))
+
+ def do_send():
+ if self.ui_auto_add_history:
+ with self._pending_history_adds_lock:
+ self._pending_history_adds.append({"role": "User", "content": user_msg, "collapsed": False, "ts": project_manager.now_ts()})
+ try:
+ resp = ai_client.send(self.last_md, user_msg, base_dir, self.last_file_items)
+ self.ai_response = resp
+ self.ai_status = "done"
+ self._trigger_blink = True
+ if self.ui_auto_add_history:
+ with self._pending_history_adds_lock:
+ self._pending_history_adds.append({"role": "AI", "content": resp, "collapsed": False, "ts": project_manager.now_ts()})
+ except ProviderError as e:
+ self.ai_response = e.ui_message()
+ self.ai_status = "error"
+ self._trigger_blink = True
+ if self.ui_auto_add_history:
+ with self._pending_history_adds_lock:
+ self._pending_history_adds.append({"role": "Vendor API", "content": self.ai_response, "collapsed": False, "ts": project_manager.now_ts()})
+ except Exception as e:
+ self.ai_response = f"ERROR: {e}"
+ self.ai_status = "error"
+ self._trigger_blink = True
+ if self.ui_auto_add_history:
+ with self._pending_history_adds_lock:
+ self._pending_history_adds.append({"role": "System", "content": self.ai_response, "collapsed": False, "ts": project_manager.now_ts()})
+
+ self.send_thread = threading.Thread(target=do_send, daemon=True)
+ self.send_thread.start()
+ imgui.same_line()
+ if imgui.button("MD Only"):
+ try:
+ md, path, _ = self._do_generate()
+ self.last_md = md
+ self.last_md_path = path
+ self.ai_status = f"md written: {path.name}"
+ except Exception as e:
+ self.ai_status = f"error: {e}"
+ imgui.same_line()
+ if imgui.button("Reset"):
+ ai_client.reset_session()
+ ai_client.clear_comms_log()
+ self._tool_log.clear()
+ self._comms_log.clear()
+ self.ai_status = "session reset"
+ self.ai_response = ""
+ imgui.same_line()
+ if imgui.button("-> History"):
+ if self.ui_ai_input:
+ self.disc_entries.append({"role": "User", "content": self.ui_ai_input, "collapsed": False, "ts": project_manager.now_ts()})
+ imgui.end()
+
+ # ---- Response
+ if self.show_windows["Response"]:
+
+ if self._trigger_blink:
+ self._trigger_blink = False
+ self._is_blinking = True
+ self._blink_start_time = time.time()
+ imgui.set_window_focus_str("Response")
+
+ if self._is_blinking:
+ elapsed = time.time() - self._blink_start_time
+ if elapsed > 1.5:
+ self._is_blinking = False
+ else:
+ val = math.sin(elapsed * 8 * math.pi)
+ alpha = 50/255 if val > 0 else 0
+ imgui.push_style_color(imgui.Col_.frame_bg, vec4(0, 255, 0, alpha))
+ imgui.push_style_color(imgui.Col_.child_bg, vec4(0, 255, 0, alpha))
+
+ exp, self.show_windows["Response"] = imgui.begin("Response", self.show_windows["Response"])
+ if exp:
+ if self.ui_word_wrap:
+ imgui.begin_child("resp_wrap", imgui.ImVec2(-1, -40), True)
+ imgui.push_text_wrap_pos(imgui.get_content_region_avail().x)
+ imgui.text(self.ai_response)
+ imgui.pop_text_wrap_pos()
+ imgui.end_child()
+ else:
+ imgui.input_text_multiline("##ai_out", self.ai_response, imgui.ImVec2(-1, -40), imgui.InputTextFlags_.read_only)
+ imgui.separator()
+ if imgui.button("-> History"):
+ if self.ai_response:
+ self.disc_entries.append({"role": "AI", "content": self.ai_response, "collapsed": False, "ts": project_manager.now_ts()})
+
+ if self._is_blinking:
+ imgui.pop_style_color(2)
+ imgui.end()
+
+ # ---- Tool Calls
+ if self.show_windows["Tool Calls"]:
+ exp, self.show_windows["Tool Calls"] = imgui.begin("Tool Calls", self.show_windows["Tool Calls"])
+ if exp:
+ imgui.text("Tool call history")
+ imgui.same_line()
+ if imgui.button("Clear##tc"):
+ self._tool_log.clear()
+ imgui.separator()
+ imgui.begin_child("tc_scroll")
+ for i, (script, result) in enumerate(self._tool_log, 1):
+ first_line = script.strip().splitlines()[0][:80] if script.strip() else "(empty)"
+ imgui.text_colored(C_KEY, f"Call #{i}: {first_line}")
+ imgui.same_line()
+ self._render_text_viewer(f"Call Script #{i}", script)
+ imgui.same_line()
+ self._render_text_viewer(f"Call Output #{i}", result)
+
+ if self.ui_word_wrap:
+ imgui.begin_child(f"tc_wrap_{i}", imgui.ImVec2(-1, 72), True)
+ imgui.push_text_wrap_pos(imgui.get_content_region_avail().x)
+ imgui.text(result)
+ imgui.pop_text_wrap_pos()
+ imgui.end_child()
+ else:
+ imgui.input_text_multiline(f"##tc_res_{i}", result, imgui.ImVec2(-1, 72), imgui.InputTextFlags_.read_only)
+ imgui.separator()
+ imgui.end_child()
+ imgui.end()
+
+ # ---- Comms History
+ if self.show_windows["Comms History"]:
+ exp, self.show_windows["Comms History"] = imgui.begin("Comms History", self.show_windows["Comms History"])
+ if exp:
+ imgui.text_colored(vec4(200, 220, 160), f"Status: {self.ai_status}")
+ imgui.same_line()
+ if imgui.button("Clear##comms"):
+ ai_client.clear_comms_log()
+ self._comms_log.clear()
+ imgui.separator()
+
+ imgui.text_colored(C_OUT, "OUT")
+ imgui.same_line()
+ imgui.text_colored(C_REQ, "request")
+ imgui.same_line()
+ imgui.text_colored(C_TC, "tool_call")
+ imgui.same_line()
+ imgui.text(" ")
+ imgui.same_line()
+ imgui.text_colored(C_IN, "IN")
+ imgui.same_line()
+ imgui.text_colored(C_RES, "response")
+ imgui.same_line()
+ imgui.text_colored(C_TR, "tool_result")
+ imgui.separator()
+
+ imgui.begin_child("comms_scroll", imgui.ImVec2(0, 0), False, imgui.WindowFlags_.horizontal_scrollbar)
+ for idx, entry in enumerate(self._comms_log, 1):
+ imgui.push_id(f"comms_{idx}")
+ d = entry["direction"]
+ k = entry["kind"]
+
+ imgui.text_colored(vec4(160, 160, 160), f"#{idx}")
+ imgui.same_line()
+ imgui.text_colored(vec4(160, 160, 160), entry["ts"])
+ imgui.same_line()
+ imgui.text_colored(DIR_COLORS.get(d, C_VAL), d)
+ imgui.same_line()
+ imgui.text_colored(KIND_COLORS.get(k, C_VAL), k)
+ imgui.same_line()
+ imgui.text_colored(C_LBL, f"{entry['provider']}/{entry['model']}")
+
+ payload = entry["payload"]
+
+ if k == "request":
+ self._render_heavy_text("message", payload.get("message", ""))
+ elif k == "response":
+ imgui.text_colored(C_LBL, "round:")
+ imgui.same_line()
+ imgui.text_colored(C_VAL, str(payload.get("round", "")))
+
+ imgui.text_colored(C_LBL, "stop_reason:")
+ imgui.same_line()
+ imgui.text_colored(vec4(255, 200, 120), str(payload.get("stop_reason", "")))
+
+ text = payload.get("text", "")
+ if text:
+ self._render_heavy_text("text", text)
+
+ imgui.text_colored(C_LBL, "tool_calls:")
+ tcs = payload.get("tool_calls", [])
+ if not tcs:
+ imgui.text_colored(C_VAL, " (none)")
+ for i, tc in enumerate(tcs):
+ imgui.text_colored(C_KEY, f" call[{i}] {tc.get('name', '?')}")
+ if "id" in tc:
+ imgui.text_colored(C_LBL, " id:")
+ imgui.same_line()
+ imgui.text_colored(C_VAL, str(tc["id"]))
+ args = tc.get("args") or tc.get("input") or {}
+ if isinstance(args, dict):
+ for ak, av in args.items():
+ self._render_heavy_text(f" {ak}", str(av))
+ elif args:
+ self._render_heavy_text(" args", str(args))
+
+ usage = payload.get("usage")
+ if usage:
+ imgui.text_colored(C_SUB, "usage:")
+ for uk, uv in usage.items():
+ imgui.text_colored(C_LBL, f" {uk.replace('_', ' ')}:")
+ imgui.same_line()
+ imgui.text_colored(C_NUM, str(uv))
+
+ elif k == "tool_call":
+ imgui.text_colored(C_LBL, "name:")
+ imgui.same_line()
+ imgui.text_colored(C_VAL, str(payload.get("name", "")))
+ if "id" in payload:
+ imgui.text_colored(C_LBL, "id:")
+ imgui.same_line()
+ imgui.text_colored(C_VAL, str(payload["id"]))
+ if "script" in payload:
+ self._render_heavy_text("script", payload.get("script", ""))
+ elif "args" in payload:
+ args = payload["args"]
+ if isinstance(args, dict):
+ for ak, av in args.items():
+ self._render_heavy_text(ak, str(av))
+ else:
+ self._render_heavy_text("args", str(args))
+
+ elif k == "tool_result":
+ imgui.text_colored(C_LBL, "name:")
+ imgui.same_line()
+ imgui.text_colored(C_VAL, str(payload.get("name", "")))
+ if "id" in payload:
+ imgui.text_colored(C_LBL, "id:")
+ imgui.same_line()
+ imgui.text_colored(C_VAL, str(payload["id"]))
+ self._render_heavy_text("output", payload.get("output", ""))
+
+ elif k == "tool_result_send":
+ for i, r in enumerate(payload.get("results", [])):
+ imgui.text_colored(C_KEY, f"result[{i}]")
+ imgui.text_colored(C_LBL, " tool_use_id:")
+ imgui.same_line()
+ imgui.text_colored(C_VAL, str(r.get("tool_use_id", "")))
+ self._render_heavy_text(" content", str(r.get("content", "")))
+ else:
+ for key, val in payload.items():
+ vstr = json.dumps(val, ensure_ascii=False, indent=2) if isinstance(val, (dict, list)) else str(val)
+ if key in HEAVY_KEYS:
+ self._render_heavy_text(key, vstr)
+ else:
+ imgui.text_colored(C_LBL, f"{key}:")
+ imgui.same_line()
+ imgui.text_colored(C_VAL, vstr)
+
+ imgui.separator()
+ imgui.pop_id()
+ imgui.end_child()
+ imgui.end()
+
+ # ---- System Prompts
+ if self.show_windows["System Prompts"]:
+ exp, self.show_windows["System Prompts"] = imgui.begin("System Prompts", self.show_windows["System Prompts"])
+ if exp:
+ imgui.text("Global System Prompt (all projects)")
+ ch, self.ui_global_system_prompt = imgui.input_text_multiline("##gsp", self.ui_global_system_prompt, imgui.ImVec2(-1, 100))
+ imgui.separator()
+ imgui.text("Project System Prompt")
+ ch, self.ui_project_system_prompt = imgui.input_text_multiline("##psp", self.ui_project_system_prompt, imgui.ImVec2(-1, 100))
+ imgui.end()
+
+ # ---- Theme
+ if self.show_windows["Theme"]:
+ exp, self.show_windows["Theme"] = imgui.begin("Theme", self.show_windows["Theme"])
+ if exp:
+ imgui.text("Palette")
+ cp = theme.get_current_palette()
+ if imgui.begin_combo("##pal", cp):
+ for p in theme.get_palette_names():
+ if imgui.selectable(p, p == cp)[0]:
+ theme.apply(p)
+ imgui.end_combo()
+ imgui.separator()
+ imgui.text("Font")
+ imgui.push_item_width(-150)
+ ch, path = imgui.input_text("##fontp", theme.get_current_font_path())
+ imgui.pop_item_width()
+ if ch: theme._current_font_path = path
+ imgui.same_line()
+ if imgui.button("Browse##font"):
+ r = hide_tk_root()
+ p = filedialog.askopenfilename(filetypes=[("Fonts", "*.ttf *.otf"), ("All", "*.*")])
+ r.destroy()
+ if p: theme._current_font_path = p
+
+ imgui.text("Size (px)")
+ imgui.same_line()
+ imgui.push_item_width(100)
+ ch, size = imgui.input_float("##fonts", theme.get_current_font_size(), 1.0, 1.0, "%.0f")
+ if ch: theme._current_font_size = size
+ imgui.pop_item_width()
+ imgui.same_line()
+ if imgui.button("Apply Font (Requires Restart)"):
+ self._flush_to_config()
+ save_config(self.config)
+ self.ai_status = "Font settings saved. Restart required."
+
+ imgui.separator()
+ imgui.text("UI Scale (DPI)")
+ ch, scale = imgui.slider_float("##scale", theme.get_current_scale(), 0.5, 3.0, "%.2f")
+ if ch: theme.set_scale(scale)
+ imgui.end()
+
+ # ---- Modals / Popups
+ with self._pending_dialog_lock:
+ dlg = self._pending_dialog
+
+ if dlg:
+ if not self._pending_dialog_open:
+ imgui.open_popup("Approve PowerShell Command")
+ self._pending_dialog_open = True
+ else:
+ self._pending_dialog_open = False
+
+ if imgui.begin_popup_modal("Approve PowerShell Command", None, imgui.WindowFlags_.always_auto_resize)[0]:
+ if dlg:
+ imgui.text("The AI wants to run the following PowerShell script:")
+ imgui.text_colored(vec4(200, 200, 100), f"base_dir: {dlg._base_dir}")
+ imgui.separator()
+ if imgui.button("[+ Maximize]##confirm"):
+ self.show_text_viewer = True
+ self.text_viewer_title = "Confirm Script"
+ self.text_viewer_content = dlg._script
+ ch, dlg._script = imgui.input_text_multiline("##confirm_script", dlg._script, imgui.ImVec2(-1, 300))
+ imgui.separator()
+ if imgui.button("Approve & Run", imgui.ImVec2(120, 0)):
+ dlg._approved = True
+ dlg._event.set()
+ with self._pending_dialog_lock:
+ self._pending_dialog = None
+ imgui.close_current_popup()
+ imgui.same_line()
+ if imgui.button("Reject", imgui.ImVec2(120, 0)):
+ dlg._approved = False
+ dlg._event.set()
+ with self._pending_dialog_lock:
+ self._pending_dialog = None
+ imgui.close_current_popup()
+ imgui.end_popup()
+
+ if self.show_script_output:
+ if self._trigger_script_blink:
+ self._trigger_script_blink = False
+ self._is_script_blinking = True
+ self._script_blink_start_time = time.time()
+ imgui.set_window_focus_str("Last Script Output")
+
+ if self._is_script_blinking:
+ elapsed = time.time() - self._script_blink_start_time
+ if elapsed > 1.5:
+ self._is_script_blinking = False
+ else:
+ val = math.sin(elapsed * 8 * math.pi)
+ alpha = 60/255 if val > 0 else 0
+ imgui.push_style_color(imgui.Col_.frame_bg, vec4(0, 100, 255, alpha))
+ imgui.push_style_color(imgui.Col_.child_bg, vec4(0, 100, 255, alpha))
+
+ imgui.set_next_window_size(imgui.ImVec2(800, 600), imgui.Cond_.first_use_ever)
+ expanded, self.show_script_output = imgui.begin("Last Script Output", self.show_script_output)
+ if expanded:
+ imgui.text("Script:")
+ imgui.same_line()
+ self._render_text_viewer("Last Script", self.ui_last_script_text)
+
+ if self.ui_word_wrap:
+ imgui.begin_child("lso_s_wrap", imgui.ImVec2(-1, 200), True)
+ imgui.push_text_wrap_pos(imgui.get_content_region_avail().x)
+ imgui.text(self.ui_last_script_text)
+ imgui.pop_text_wrap_pos()
+ imgui.end_child()
+ else:
+ imgui.input_text_multiline("##lso_s", self.ui_last_script_text, imgui.ImVec2(-1, 200), imgui.InputTextFlags_.read_only)
+
+ imgui.separator()
+ imgui.text("Output:")
+ imgui.same_line()
+ self._render_text_viewer("Last Output", self.ui_last_script_output)
+
+ if self.ui_word_wrap:
+ imgui.begin_child("lso_o_wrap", imgui.ImVec2(-1, -1), True)
+ imgui.push_text_wrap_pos(imgui.get_content_region_avail().x)
+ imgui.text(self.ui_last_script_output)
+ imgui.pop_text_wrap_pos()
+ imgui.end_child()
+ else:
+ imgui.input_text_multiline("##lso_o", self.ui_last_script_output, imgui.ImVec2(-1, -1), imgui.InputTextFlags_.read_only)
+
+ if self._is_script_blinking:
+ imgui.pop_style_color(2)
+ imgui.end()
+
+ if self.show_text_viewer:
+ imgui.set_next_window_size(imgui.ImVec2(900, 700), imgui.Cond_.first_use_ever)
+ expanded, self.show_text_viewer = imgui.begin(f"Text Viewer - {self.text_viewer_title}", self.show_text_viewer)
+ if expanded:
+ if self.ui_word_wrap:
+ imgui.begin_child("tv_wrap", imgui.ImVec2(-1, -1), False)
+ imgui.push_text_wrap_pos(imgui.get_content_region_avail().x)
+ imgui.text(self.text_viewer_content)
+ imgui.pop_text_wrap_pos()
+ imgui.end_child()
+ else:
+ imgui.input_text_multiline("##tv_c", self.text_viewer_content, imgui.ImVec2(-1, -1), imgui.InputTextFlags_.read_only)
+ imgui.end()
+
+ def _load_fonts(self):
+ font_path, font_size = theme.get_font_loading_params()
+ if font_path and Path(font_path).exists():
+ hello_imgui.load_font(font_path, font_size)
+
+ def run(self):
+ theme.load_from_config(self.config)
+
+ self.runner_params = hello_imgui.RunnerParams()
+ self.runner_params.app_window_params.window_title = "manual slop"
+ self.runner_params.app_window_params.window_geometry.size = (1680, 1200)
+ self.runner_params.imgui_window_params.enable_viewports = True
+ self.runner_params.imgui_window_params.default_imgui_window_type = hello_imgui.DefaultImGuiWindowType.provide_full_screen_dock_space
+ self.runner_params.callbacks.show_gui = self._gui_func
+ self.runner_params.callbacks.load_additional_fonts = self._load_fonts
+
+ self._fetch_models(self.current_provider)
+
+ immapp.run(self.runner_params)
+
+ # On exit
+ self._flush_to_project()
+ self._save_active_project()
+ self._flush_to_config()
+ save_config(self.config)
+ session_logger.close_session()
+
+def main():
+ app = App()
+ app.run()
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/manual_slop.toml b/manual_slop.toml
index 1a696de..f07767a 100644
--- a/manual_slop.toml
+++ b/manual_slop.toml
@@ -39,7 +39,7 @@ roles = [
"Vendor API",
"System",
]
-active = "Misc UX improvements"
+active = "Support multi-viewporting"
auto_add = true
[discussion.discussions.main]
@@ -113,7 +113,7 @@ history = [
[discussion.discussions."Misc UX improvements"]
git_commit = "0cccaf05a36269a23224d731bf9a57a209a23f14"
-last_updated = "2026-02-22T00:13:17"
+last_updated = "2026-02-22T01:08:35"
history = [
"@2026-02-21T23:03:51\nUser:\nMake the namespace for the generated markdown file the projet name instead of that specific namespace in the config panel.\nThe ouptut dir can be in the project panel spec (that way we can just remove config panel entirely).",
"@2026-02-21T23:04:55\nAI:\nI have applied the changes to streamline the project settings and remove the old Config panel completely:\n\n1. **GUI Layout Changes (`gui.py`)**:\n - The `Config` panel has been completely removed.\n - The **Output Dir** field has been moved into the **Projects** panel (right above the Project Files list).\n - The **Auto-add history** checkbox has been moved to the top of the **Discussion History** panel (directly under the entry toolbar buttons).\n - The **Files** panel was shifted upward to reclaim the space left behind by the old Config panel.\n\n2. **Namespace Management (`aggregate.py` & `project_manager.py`)**:\n - The explicitly defined `namespace` field is gone.\n - Instead, `aggregate.py` now directly infers the output markdown file namespace from the active project's `name` property. (e.g., saving under the `manual_slop` project will just prefix the markdown with `manual_slop_001.md`).\n - `project_manager.py` was updated to no longer scaffold out `namespace` strings into new `.toml` files, and `flat_config` now injects the core `project` dictionary segment so the generators can access the internal name.\n\n3. **`MainContext.md` Updated**:\n - I have updated the markdown file with the latest layout structures and explained that `namespace` is no longer a configurable setting but dynamically determined by the project profile.\n\nYou'll need to restart the application for the GUI layout and DearPyGUI state changes to take effect! Let me know how it looks.",
@@ -131,3 +131,15 @@ history = [
"@2026-02-22T00:04:00\nUser:\nCan we add support for proper viewport based dear imgui? \nWhere we don't have a main window enforcing the rendering? \nInstead we can have each of these dockable panels outside of the main window and even disolve it so that this program beahves as an axuillary tool we utilize as we use vscode and other tools?\nWe can have a taskbar icon or something to open the windows back fter closing them, etc.",
"@2026-02-22T00:04:45\nAI:\nI will start by examining the current UI structure in `gui.py` to identify how the main window and its panels are organized, so I can plan the transition to a dockable, multi-window layout.\n\nTo transition the application to a multi-viewport, dockable layout, I will modify `gui.py` to enable Dear PyGui's viewport docking features. This will allow panels to be moved independently and even outside the main window boundaries. I will also add a global viewport menu bar so you can easily reopen any panels you close, and I'll remove the fixed position constraints that currently lock the layout in place.\n\nI will perform the following changes:\n1. **Enable Viewport Docking**: Update `App.run` to call `dpg.enable_docking(docking_viewport=True)`.\n2. **Global Menu Bar**: Add a `dpg.viewport_menu_bar` containing a \"Windows\" menu to toggle the visibility of all panels.\n3. **Unlock Windows**: Remove `no_close=True` from all windows and ensure they can be freely moved and resized.\n4. **Persistent Layout**: Ensure `dpg_layout.ini` is used to save your preferred arrangement across sessions.\n\nI will now execute a script to apply these modifications to `gui.py`.\n\nI have successfully implemented the multi-viewport and dockable interface for the application.\n\n### Key Changes:\n\n1. **Viewport Docking Enabled**:\n * The application now uses `dpg.enable_docking(docking_viewport=True)`, allowing windows to be moved outside the main application viewport on your desktop.\n * All panels are now dockable, meaning you can snap them together, stack them as tabs, or leave them floating.\n\n2. **Global Viewport Menu Bar**:\n * A main menu bar has been added at the top of the viewport.\n * **Windows Menu**: You can now toggle the visibility of every panel. If you close a window, just click its name in the \"Windows\" menu to bring it back.\n * **Project Menu**: Added quick access to \"Save All\", \"Reset Session\", and \"Generate MD Only\".\n\n3. **Layout Persistence**:\n * Layouts (window positions, sizes, and docking states) are saved to `dpg_layout.ini` and will be restored when you restart the application.\n\n4. **Unlocked Panels**:\n * Removed `no_close=True` and `no_move=True` constraints from all panels, allowing for a fully customized workspace that you can arrange alongside VS Code or other tools.\n\n5. **Documentation Updated**:\n * `MainContext.md` has been updated with a new section describing these multi-viewport and docking features.\n\nYou will need to restart the application to see the new menu bar and experience the dockable windows. Let me know if the layout feels right!",
]
+
+[discussion.discussions."Support multi-viewporting"]
+git_commit = ""
+last_updated = "2026-02-22T01:40:27"
+history = [
+ "@2026-02-22T01:13:53\nUser:\nLets port this program to use https://github.com/pthom/hello_imgui or another imgui package (WEb search if you have to) that will allow us to do Multi-Viewporting (independent OS windows).\nSo far all my attempts to get dearpygui working with native os window multi-viewporting have failed. Which c++ imgui supports. I think there might be a python package that supports this.\n\nWorst case I might have to use a different language that has a dear imgui package with multi-viewporting support that may make ai codegen performance far worse.",
+ "@2026-02-22T01:15:32\nAI:\nAlright, I've done extensive research on this. Here's my assessment:\n\n## The Multi-Viewport Situation in Python\n\n### The Problem with DearPyGui\nDearPyGui wraps Dear ImGui but **does not expose the multi-viewport feature** (dragging imgui windows out as independent OS windows). It has its own docking implementation but the C++ multi-viewport backend plumbing was never wired through to Python. You've already discovered this the hard way.\n\n### The Best Option: `imgui-bundle` (pip: `imgui-bundle`)\n\n**This is the package you want.** It's by Pascal Thomet (pthom) and wraps:\n- **Dear ImGui** (docking branch — which includes multi-viewport support)\n- **Hello ImGui** (application framework on top of Dear ImGui)\n- 20+ additional libraries (plots, markdown, node editors, etc.)\n\nKey facts:\n- `pip install imgui-bundle` — binary wheels for Windows/macOS/Linux\n- It tracks Dear ImGui's **docking branch** closely, which is the branch that contains `ImGuiConfigFlags_ViewportsEnable`\n- Full Python bindings with type hints\n- The API uses `imgui.config_flags` to set `ImGuiConfigFlags_ViewportsEnable`, then Dear ImGui handles creating/managing OS windows automatically when you drag an imgui window outside the main viewport\n- Has its own layout/docking system via Hello ImGui's dock spaces\n\n### What the Port Involves\n\nThis is a **massive** rewrite. The entire `gui.py` (2094 lines) and `theme.py` (415 lines) are completely DearPyGui-specific. Every single widget call, every tag system, every callback pattern is different. Here's the mapping:\n\n| DearPyGui | imgui-bundle |\n|---|---|\n| `dpg.add_text(...)` | `imgui.text(...)` |\n| `dpg.add_input_text(tag=..., ...)` | State variable + `imgui.input_text(...)` |\n| `dpg.add_button(label=..., callback=...)` | `if imgui.button(\"label\"): do_thing()` |\n| Tag-based widget identity | Immediate mode — no tags, state lives in your code |\n| `dpg.does_item_exist(tag)` | Not needed — immediate mode |\n| `dpg.configure_item(tag, ...)` | Just change the variable and it renders next frame |\n| `dpg.create_viewport()` + render loop | `hello_imgui.run(gui_func, params)` |\n| `dpg.enable_docking()` | `runner_params.imgui_window_params.enable_viewports = True` |\n| Theme via `dpg.add_theme_color()` | `imgui.push_style_color()` / `imgui.get_style()` |\n\nThe immediate-mode paradigm is actually **simpler** conceptually — no tags, no \"does item exist\" checks, no callback registration. But it means a full rewrite of all rendering code.\n\n### My Recommendation\n\n**Do the port to `imgui-bundle`.** Here's why:\n1. It definitively supports multi-viewporting (the whole point)\n2. The immediate-mode API is actually cleaner and more maintainable\n3. Better AI codegen compatibility (pthom specifically calls this out — readable code)\n4. You stay in Python — no language switch needed\n5. Active maintenance tracking upstream Dear ImGui\n\n### How I'd Structure the Port\n\n1. **Phase 1**: Install `imgui-bundle`, create a minimal proof-of-concept with multi-viewporting enabled (verify it works on your machine)\n2. **Phase 2**: Port `theme.py` → new imgui-bundle theming\n3. **Phase 3**: Port `gui.py` — the big one. Rewrite all panels as immediate-mode rendering functions\n4. **Phase 4**: Wire up the threading/callbacks (confirmation dialogs, background sends, etc.)\n\n**Want me to start with Phase 1 — a minimal proof-of-concept** that creates a window with viewports enabled, a couple of dockable panels, and verifies you can drag a panel out as an independent OS window? That way we confirm the foundation works before committing to the full rewrite.\n\nOr if you want me to just go all-in and start the full port, I can do that too. Your call.",
+ "@2026-02-22T01:18:08\nUser:\nYeah lets do the port, lets make a gui_2.py for this port so you don't have to worry about gui.py modifications directly. You can be incremental about it as well and have me test between steps. and I can keep the old ui as we progress.\nIf you can somedhow do the entire thing in one go congrats. \nMake a theme_2.py as well for gui_2.py if it needs a port as well.\nWhen we're done we'll make these the main scripts (rename to gui.py and theme.py, delete old ones, etc).",
+ "@2026-02-22T01:22:47\nAI:\nOK, this is a massive port. Let me first read the existing `theme.py` and `MainContext.md`, then I'll build both `theme_2.py` and `gui_2.py` as complete files.\nThis is a massive port. Let me start by understanding the full picture, then I'll write both `theme_2.py` and `gui_2.py` as complete files. First let me install `imgui-bundle` and verify it works, then write the files.\nLet me first check if imgui-bundle is installed:\nGood, `imgui-bundle` is installed and working with ImGui 1.92.5 (docking branch). Now let me build the two files. This is a massive port so I'll write them as Python helper scripts to disk.\nLet me start with `theme_2.py` since it's simpler and `gui_2.py` depends on it:\nGood, `theme_2.py` is written. Now I need to write the massive `gui_2.py`. This is the big one — a complete port from DearPyGui's retained-mode tag-based system to imgui-bundle's immediate-mode paradigm. I'll write it as a Python helper script since it's too large and complex for a PowerShell here-string.\nLet me write the helper script that creates `gui_2.py`:",
+ "@2026-02-22T01:24:13\nUser:\nI had to swtich providers midway, were not on gemini 3.1 pro preview. Hi gemini!\n\nContinue where claude left off. we still need to do gui_2.py. It attempted theme_2.py.\nSee if you can finish this port!",
+ "@2026-02-22T01:31:13\nAI:\nI encountered an error when attempting to write the file automatically: your system returned `ERROR: powershell executable not found`. It seems `powershell.exe` is missing from your PATH or you are using `pwsh`.\n\nBecause my only execution tool relies on PowerShell, I cannot write the file to your disk directly. Please save the following code manually as `gui_2.py` in your project folder. \n\nThis is the massive port completing the transition to `imgui-bundle`. It maps all of the DearPyGui logic (windows, callbacks, lists, dialogs, theming) into an immediate-mode ImGui rendering loop using `hello_imgui` and `immapp`.\n\n```python\n# gui_2.py\n[Omitted you can read it if you want yourself]\n```\n\nIf you rename `gui.py` and `theme.py`, you can test it by running `python gui_2.py`. Then you should be fully multi-viewport capable, allowing panels to dissolve into separate OS windows perfectly!",
+]
diff --git a/patch_test.py b/patch_test.py
deleted file mode 100644
index d70228f..0000000
--- a/patch_test.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# patch_gui.py — apply multi-project support to gui.py
-import re
-from pathlib import Path
-
-src = Path("C:/projects/manual_slop/gui.py").read_text(encoding="utf-8")
-
-# ── 1. Add project_manager import after "import theme" ──────────────────────
-src = src.replace(
- "import theme\n",
- "import theme\nimport project_manager\n",
- 1
-)
-
-# ── 2. Add PROJECT_TOML_PATH constant after CONFIG_PATH line ─────────────────
-src = src.replace(
- 'CONFIG_PATH = Path("config.toml")\n',
- 'CONFIG_PATH = Path("config.toml")\nPROVIDERS = ["gemini", "anthropic"]\n',
- 1
-)
-# Remove the duplicate PROVIDERS = [...] that already exists two lines down
-src = src.replace(
- 'PROVIDERS = ["gemini", "anthropic"]\nPROVIDERS = ["gemini", "anthropic"]\n',
- 'PROVIDERS = ["gemini", "anthropic"]\n',
- 1
-)
-
-print("Pass 1 done - imports & constants")
-print(repr(src[:300]))
diff --git a/pyproject.toml b/pyproject.toml
index 1b14241..b3b599d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,6 +5,7 @@ version = "0.1.0"
requires-python = ">=3.11"
dependencies = [
"dearpygui",
+ "imgui-bundle",
"google-genai",
"anthropic",
"tomli-w"
diff --git a/shell_runner.py b/shell_runner.py
index 89dd5d3..0615875 100644
--- a/shell_runner.py
+++ b/shell_runner.py
@@ -1,5 +1,4 @@
-import subprocess
-import shlex
+import subprocess, shutil
from pathlib import Path
TIMEOUT_SECONDS = 60
@@ -10,27 +9,20 @@ def run_powershell(script: str, base_dir: str) -> str:
Returns a string combining stdout, stderr, and exit code.
Raises nothing - all errors are captured into the return string.
"""
- # Prepend Set-Location so the AI doesn't need to worry about cwd
full_script = f"Set-Location -LiteralPath '{base_dir}'\n{script}"
-
+ # Try common executable names
+ exe = next((x for x in ["powershell.exe", "pwsh.exe", "powershell", "pwsh"] if shutil.which(x)), None)
+ if not exe: return "ERROR: Neither powershell nor pwsh found in PATH"
+
try:
- result = subprocess.run(
- ["powershell", "-NoProfile", "-NonInteractive", "-Command", full_script],
- capture_output=True,
- text=True,
- timeout=TIMEOUT_SECONDS,
- cwd=base_dir
+ r = subprocess.run(
+ [exe, "-NoProfile", "-NonInteractive", "-Command", full_script],
+ capture_output=True, text=True, timeout=TIMEOUT_SECONDS, cwd=base_dir
)
parts = []
- if result.stdout.strip():
- parts.append(f"STDOUT:\n{result.stdout.strip()}")
- if result.stderr.strip():
- parts.append(f"STDERR:\n{result.stderr.strip()}")
- parts.append(f"EXIT CODE: {result.returncode}")
- return "\n".join(parts) if parts else f"EXIT CODE: {result.returncode}"
- except subprocess.TimeoutExpired:
- return f"ERROR: command timed out after {TIMEOUT_SECONDS}s"
- except FileNotFoundError:
- return "ERROR: powershell executable not found"
- except Exception as e:
- return f"ERROR: {e}"
+ if r.stdout.strip(): parts.append(f"STDOUT:\n{r.stdout.strip()}")
+ if r.stderr.strip(): parts.append(f"STDERR:\n{r.stderr.strip()}")
+ parts.append(f"EXIT CODE: {r.returncode}")
+ return "\n".join(parts)
+ except subprocess.TimeoutExpired: return f"ERROR: timed out after {TIMEOUT_SECONDS}s"
+ except Exception as e: return f"ERROR: {e}"
diff --git a/theme_2.py b/theme_2.py
new file mode 100644
index 0000000..fedffe1
--- /dev/null
+++ b/theme_2.py
@@ -0,0 +1,272 @@
+# theme_2.py
+"""
+Theming support for manual_slop GUI — imgui-bundle port.
+
+Replaces theme.py (DearPyGui-specific) with imgui-bundle equivalents.
+Palettes are applied via imgui.get_style().set_color_() calls.
+Font loading uses hello_imgui.load_font().
+Scale uses imgui.get_io().font_global_scale.
+"""
+
+from imgui_bundle import imgui, hello_imgui
+from pathlib import Path
+
+# ------------------------------------------------------------------ palettes
+
+# Each palette maps imgui color enum values to (R, G, B, A) floats [0..1].
+# Only keys that differ from the ImGui dark defaults need to be listed.
+
+def _c(r, g, b, a=255):
+ """Convert 0-255 RGBA to 0.0-1.0 floats."""
+ return (r / 255.0, g / 255.0, b / 255.0, a / 255.0)
+
+
+_PALETTES: dict[str, dict[int, tuple]] = {
+
+ "ImGui Dark": {}, # empty = use imgui dark defaults
+
+ "10x Dark": {
+ imgui.Col_.window_bg: _c( 34, 32, 28),
+ imgui.Col_.child_bg: _c( 30, 28, 24),
+ imgui.Col_.popup_bg: _c( 35, 30, 20),
+ imgui.Col_.border: _c( 60, 55, 50),
+ imgui.Col_.border_shadow: _c( 0, 0, 0, 0),
+ imgui.Col_.frame_bg: _c( 45, 42, 38),
+ imgui.Col_.frame_bg_hovered: _c( 60, 56, 50),
+ imgui.Col_.frame_bg_active: _c( 75, 70, 62),
+ imgui.Col_.title_bg: _c( 40, 35, 25),
+ imgui.Col_.title_bg_active: _c( 60, 45, 15),
+ imgui.Col_.title_bg_collapsed: _c( 30, 27, 20),
+ imgui.Col_.menu_bar_bg: _c( 35, 30, 20),
+ imgui.Col_.scrollbar_bg: _c( 30, 28, 24),
+ imgui.Col_.scrollbar_grab: _c( 80, 78, 72),
+ imgui.Col_.scrollbar_grab_hovered: _c(100, 100, 92),
+ imgui.Col_.scrollbar_grab_active: _c(120, 118, 110),
+ imgui.Col_.check_mark: _c(194, 164, 74),
+ imgui.Col_.slider_grab: _c(126, 78, 14),
+ imgui.Col_.slider_grab_active: _c(194, 140, 30),
+ imgui.Col_.button: _c( 83, 76, 60),
+ imgui.Col_.button_hovered: _c(126, 78, 14),
+ imgui.Col_.button_active: _c(115, 90, 70),
+ imgui.Col_.header: _c( 83, 76, 60),
+ imgui.Col_.header_hovered: _c(126, 78, 14),
+ imgui.Col_.header_active: _c(115, 90, 70),
+ imgui.Col_.separator: _c( 70, 65, 55),
+ imgui.Col_.separator_hovered: _c(126, 78, 14),
+ imgui.Col_.separator_active: _c(194, 164, 74),
+ imgui.Col_.resize_grip: _c( 60, 55, 44),
+ imgui.Col_.resize_grip_hovered: _c(126, 78, 14),
+ imgui.Col_.resize_grip_active: _c(194, 164, 74),
+ imgui.Col_.tab: _c( 83, 83, 70),
+ imgui.Col_.tab_hovered: _c(126, 77, 25),
+ imgui.Col_.tab_selected: _c(126, 77, 25),
+ imgui.Col_.tab_dimmed: _c( 60, 58, 50),
+ imgui.Col_.tab_dimmed_selected: _c( 90, 80, 55),
+ imgui.Col_.docking_preview: _c(126, 78, 14, 180),
+ imgui.Col_.docking_empty_bg: _c( 20, 20, 20),
+ imgui.Col_.text: _c(200, 200, 200),
+ imgui.Col_.text_disabled: _c(130, 130, 120),
+ imgui.Col_.text_selected_bg: _c( 59, 86, 142, 180),
+ imgui.Col_.table_header_bg: _c( 55, 50, 38),
+ imgui.Col_.table_border_strong: _c( 70, 65, 55),
+ imgui.Col_.table_border_light: _c( 50, 47, 42),
+ imgui.Col_.table_row_bg: _c( 0, 0, 0, 0),
+ imgui.Col_.table_row_bg_alt: _c( 40, 38, 34, 40),
+ imgui.Col_.nav_cursor: _c(126, 78, 14),
+ imgui.Col_.nav_windowing_highlight: _c(194, 164, 74, 180),
+ imgui.Col_.nav_windowing_dim_bg: _c( 20, 20, 20, 80),
+ imgui.Col_.modal_window_dim_bg: _c( 10, 10, 10, 100),
+ },
+
+ "Nord Dark": {
+ imgui.Col_.window_bg: _c( 36, 41, 49),
+ imgui.Col_.child_bg: _c( 30, 34, 42),
+ imgui.Col_.popup_bg: _c( 36, 41, 49),
+ imgui.Col_.border: _c( 59, 66, 82),
+ imgui.Col_.border_shadow: _c( 0, 0, 0, 0),
+ imgui.Col_.frame_bg: _c( 46, 52, 64),
+ imgui.Col_.frame_bg_hovered: _c( 59, 66, 82),
+ imgui.Col_.frame_bg_active: _c( 67, 76, 94),
+ imgui.Col_.title_bg: _c( 36, 41, 49),
+ imgui.Col_.title_bg_active: _c( 59, 66, 82),
+ imgui.Col_.title_bg_collapsed: _c( 30, 34, 42),
+ imgui.Col_.menu_bar_bg: _c( 46, 52, 64),
+ imgui.Col_.scrollbar_bg: _c( 30, 34, 42),
+ imgui.Col_.scrollbar_grab: _c( 76, 86, 106),
+ imgui.Col_.scrollbar_grab_hovered: _c( 94, 129, 172),
+ imgui.Col_.scrollbar_grab_active: _c(129, 161, 193),
+ imgui.Col_.check_mark: _c(136, 192, 208),
+ imgui.Col_.slider_grab: _c( 94, 129, 172),
+ imgui.Col_.slider_grab_active: _c(129, 161, 193),
+ imgui.Col_.button: _c( 59, 66, 82),
+ imgui.Col_.button_hovered: _c( 94, 129, 172),
+ imgui.Col_.button_active: _c(129, 161, 193),
+ imgui.Col_.header: _c( 59, 66, 82),
+ imgui.Col_.header_hovered: _c( 94, 129, 172),
+ imgui.Col_.header_active: _c(129, 161, 193),
+ imgui.Col_.separator: _c( 59, 66, 82),
+ imgui.Col_.separator_hovered: _c( 94, 129, 172),
+ imgui.Col_.separator_active: _c(136, 192, 208),
+ imgui.Col_.resize_grip: _c( 59, 66, 82),
+ imgui.Col_.resize_grip_hovered: _c( 94, 129, 172),
+ imgui.Col_.resize_grip_active: _c(136, 192, 208),
+ imgui.Col_.tab: _c( 46, 52, 64),
+ imgui.Col_.tab_hovered: _c( 94, 129, 172),
+ imgui.Col_.tab_selected: _c( 76, 86, 106),
+ imgui.Col_.tab_dimmed: _c( 36, 41, 49),
+ imgui.Col_.tab_dimmed_selected: _c( 59, 66, 82),
+ imgui.Col_.docking_preview: _c( 94, 129, 172, 180),
+ imgui.Col_.docking_empty_bg: _c( 20, 22, 28),
+ imgui.Col_.text: _c(216, 222, 233),
+ imgui.Col_.text_disabled: _c(116, 128, 150),
+ imgui.Col_.text_selected_bg: _c( 94, 129, 172, 180),
+ imgui.Col_.table_header_bg: _c( 59, 66, 82),
+ imgui.Col_.table_border_strong: _c( 76, 86, 106),
+ imgui.Col_.table_border_light: _c( 59, 66, 82),
+ imgui.Col_.table_row_bg: _c( 0, 0, 0, 0),
+ imgui.Col_.table_row_bg_alt: _c( 46, 52, 64, 40),
+ imgui.Col_.nav_cursor: _c(136, 192, 208),
+ imgui.Col_.modal_window_dim_bg: _c( 10, 12, 16, 100),
+ },
+
+ "Monokai": {
+ imgui.Col_.window_bg: _c( 39, 40, 34),
+ imgui.Col_.child_bg: _c( 34, 35, 29),
+ imgui.Col_.popup_bg: _c( 39, 40, 34),
+ imgui.Col_.border: _c( 60, 61, 52),
+ imgui.Col_.border_shadow: _c( 0, 0, 0, 0),
+ imgui.Col_.frame_bg: _c( 50, 51, 44),
+ imgui.Col_.frame_bg_hovered: _c( 65, 67, 56),
+ imgui.Col_.frame_bg_active: _c( 80, 82, 68),
+ imgui.Col_.title_bg: _c( 39, 40, 34),
+ imgui.Col_.title_bg_active: _c( 73, 72, 62),
+ imgui.Col_.title_bg_collapsed: _c( 30, 31, 26),
+ imgui.Col_.menu_bar_bg: _c( 50, 51, 44),
+ imgui.Col_.scrollbar_bg: _c( 34, 35, 29),
+ imgui.Col_.scrollbar_grab: _c( 80, 80, 72),
+ imgui.Col_.scrollbar_grab_hovered: _c(102, 217, 39),
+ imgui.Col_.scrollbar_grab_active: _c(166, 226, 46),
+ imgui.Col_.check_mark: _c(166, 226, 46),
+ imgui.Col_.slider_grab: _c(102, 217, 39),
+ imgui.Col_.slider_grab_active: _c(166, 226, 46),
+ imgui.Col_.button: _c( 73, 72, 62),
+ imgui.Col_.button_hovered: _c(249, 38, 114),
+ imgui.Col_.button_active: _c(198, 30, 92),
+ imgui.Col_.header: _c( 73, 72, 62),
+ imgui.Col_.header_hovered: _c(249, 38, 114),
+ imgui.Col_.header_active: _c(198, 30, 92),
+ imgui.Col_.separator: _c( 60, 61, 52),
+ imgui.Col_.separator_hovered: _c(249, 38, 114),
+ imgui.Col_.separator_active: _c(166, 226, 46),
+ imgui.Col_.resize_grip: _c( 73, 72, 62),
+ imgui.Col_.resize_grip_hovered: _c(249, 38, 114),
+ imgui.Col_.resize_grip_active: _c(166, 226, 46),
+ imgui.Col_.tab: _c( 73, 72, 62),
+ imgui.Col_.tab_hovered: _c(249, 38, 114),
+ imgui.Col_.tab_selected: _c(249, 38, 114),
+ imgui.Col_.tab_dimmed: _c( 50, 51, 44),
+ imgui.Col_.tab_dimmed_selected: _c( 90, 88, 76),
+ imgui.Col_.docking_preview: _c(249, 38, 114, 180),
+ imgui.Col_.docking_empty_bg: _c( 20, 20, 18),
+ imgui.Col_.text: _c(248, 248, 242),
+ imgui.Col_.text_disabled: _c(117, 113, 94),
+ imgui.Col_.text_selected_bg: _c(249, 38, 114, 150),
+ imgui.Col_.table_header_bg: _c( 60, 61, 52),
+ imgui.Col_.table_border_strong: _c( 73, 72, 62),
+ imgui.Col_.table_border_light: _c( 55, 56, 48),
+ imgui.Col_.table_row_bg: _c( 0, 0, 0, 0),
+ imgui.Col_.table_row_bg_alt: _c( 50, 51, 44, 40),
+ imgui.Col_.nav_cursor: _c(166, 226, 46),
+ imgui.Col_.modal_window_dim_bg: _c( 10, 10, 8, 100),
+ },
+}
+
+PALETTE_NAMES: list[str] = list(_PALETTES.keys())
+
+# ------------------------------------------------------------------ state
+
+_current_palette: str = "ImGui Dark"
+_current_font_path: str = ""
+_current_font_size: float = 16.0
+_current_scale: float = 1.0
+_custom_font: imgui.ImFont = None # type: ignore
+
+
+# ------------------------------------------------------------------ public API
+
+def get_palette_names() -> list[str]:
+ return list(_PALETTES.keys())
+
+
+def get_current_palette() -> str:
+ return _current_palette
+
+
+def get_current_font_path() -> str:
+ return _current_font_path
+
+
+def get_current_font_size() -> float:
+ return _current_font_size
+
+
+def get_current_scale() -> float:
+ return _current_scale
+
+
+def apply(palette_name: str):
+ """
+ Apply a named palette by setting all ImGui style colors.
+ Call this once per frame if you want dynamic switching, or once at startup.
+ In practice we call it once when the user picks a palette, and imgui retains the style.
+ """
+ global _current_palette
+ _current_palette = palette_name
+
+ colours = _PALETTES.get(palette_name, {})
+
+ if not colours:
+ # Reset to imgui dark defaults
+ imgui.style_colors_dark()
+ return
+
+ style = imgui.get_style()
+ # Start from dark defaults so unlisted keys have sensible values
+ imgui.style_colors_dark()
+ for col_enum, rgba in colours.items():
+ style.set_color_(col_enum, imgui.ImVec4(*rgba))
+
+
+def set_scale(factor: float):
+ """Set the global font scale factor."""
+ global _current_scale
+ _current_scale = factor
+ io = imgui.get_io()
+ io.font_global_scale = factor
+
+
+def save_to_config(config: dict):
+ """Persist theme settings into the config dict under [theme]."""
+ config.setdefault("theme", {})
+ config["theme"]["palette"] = _current_palette
+ config["theme"]["font_path"] = _current_font_path
+ config["theme"]["font_size"] = _current_font_size
+ config["theme"]["scale"] = _current_scale
+
+
+def load_from_config(config: dict):
+ """Read [theme] from config and apply palette + scale. Font is handled separately at startup."""
+ global _current_font_path, _current_font_size, _current_scale, _current_palette
+ t = config.get("theme", {})
+ _current_palette = t.get("palette", "ImGui Dark")
+ _current_font_path = t.get("font_path", "")
+ _current_font_size = float(t.get("font_size", 16.0))
+ _current_scale = float(t.get("scale", 1.0))
+
+ apply(_current_palette)
+ set_scale(_current_scale)
+
+
+def get_font_loading_params() -> tuple[str, float]:
+ """Return (font_path, font_size) for use during hello_imgui font loading callback."""
+ return _current_font_path, _current_font_size