This commit is contained in:
2026-02-21 21:29:45 -05:00
parent 0a591d58eb
commit 59208b27ec
4 changed files with 221 additions and 17 deletions

View File

@@ -3,6 +3,7 @@ import tomllib
import re import re
import glob import glob
from pathlib import Path, PureWindowsPath from pathlib import Path, PureWindowsPath
import summarize
def find_next_increment(output_dir: Path, namespace: str) -> int: def find_next_increment(output_dir: Path, namespace: str) -> int:
pattern = re.compile(rf"^{re.escape(namespace)}_(\d+)\.md$") pattern = re.compile(rf"^{re.escape(namespace)}_(\d+)\.md$")
@@ -104,11 +105,23 @@ def build_file_items(base_dir: Path, files: list[str]) -> list[dict]:
error = True error = True
items.append({"path": path, "entry": entry, "content": content, "error": error}) items.append({"path": path, "entry": entry, "content": content, "error": error})
return items return items
def build_markdown(base_dir: Path, files: list[str], screenshot_base_dir: Path, screenshots: list[str], history: list[str]) -> str:
def build_summary_section(base_dir: Path, files: list[str]) -> str:
"""
Build a compact summary section using summarize.py — one short block per file.
Used as the initial <context> block instead of full file contents.
"""
items = build_file_items(base_dir, files)
return summarize.build_summary_markdown(items)
def build_markdown(base_dir: Path, files: list[str], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False) -> str:
parts = [] parts = []
if history: if history:
parts.append("## Discussion History\n\n" + build_discussion_section(history)) parts.append("## Discussion History\n\n" + build_discussion_section(history))
if files: if files:
if summary_only:
parts.append("## Files (Summary)\n\n" + build_summary_section(base_dir, files))
else:
parts.append("## Files\n\n" + build_files_section(base_dir, files)) parts.append("## Files\n\n" + build_files_section(base_dir, files))
if screenshots: if screenshots:
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots)) parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots))
@@ -126,7 +139,8 @@ def run(config: dict) -> tuple[str, Path]:
output_dir.mkdir(parents=True, exist_ok=True) output_dir.mkdir(parents=True, exist_ok=True)
increment = find_next_increment(output_dir, namespace) increment = find_next_increment(output_dir, namespace)
output_file = output_dir / f"{namespace}_{increment:03d}.md" output_file = output_dir / f"{namespace}_{increment:03d}.md"
markdown = build_markdown(base_dir, files, screenshot_base_dir, screenshots, history) markdown = build_markdown(base_dir, files, screenshot_base_dir, screenshots, history,
summary_only=True)
output_file.write_text(markdown, encoding="utf-8") output_file.write_text(markdown, encoding="utf-8")
file_items = build_file_items(base_dir, files) file_items = build_file_items(base_dir, files)
return markdown, output_file, file_items return markdown, output_file, file_items

View File

@@ -1,6 +1,6 @@
[ai] [ai]
provider = "anthropic" provider = "anthropic"
model = "claude-opus-4-6" model = "claude-sonnet-4-6"
[theme] [theme]
palette = "10x Dark" palette = "10x Dark"

View File

@@ -26,9 +26,7 @@ paths = [
[screenshots] [screenshots]
base_dir = "C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02" base_dir = "C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02"
paths = [ paths = []
"C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02/Fork_2026-02-21_21-10-30.png",
]
[discussion] [discussion]
roles = [ roles = [
@@ -62,16 +60,8 @@ history = [
[discussion.discussions."Support files or mcp, or proper dynamic context uptake."] [discussion.discussions."Support files or mcp, or proper dynamic context uptake."]
git_commit = "d9a51332efb3354c81876f9a230bcedd632e7fd2" git_commit = "d9a51332efb3354c81876f9a230bcedd632e7fd2"
last_updated = "2026-02-21T21:16:37" last_updated = "2026-02-21T21:29:16"
history = [ history = []
"@2026-02-21T21:04:10\nUser:\nLets add support for mcp, files, or proper dynamic context update. \n\nRight now I'm feeding you files in a giant md text message but you seem to still have the habit of just grabbing the file's latest \ncontents again with powershell... Problably because we are not actually using proper file upload or gathering that \nyour used to with anthropic api or an mcp server.",
"@2026-02-21T21:04:48\nAI:\n(Annotation from user, no reponse from ai.)",
"@2026-02-21T21:06:15\nUser:\nSo you were doing script calls and didn't compelete what you were doing, continue.",
"@2026-02-21T21:07:15\nAI:\nThe system prompt didn't get updated — the old and new strings didn't match exactly (likely a quoting issue in the Python script). Let me check and fix that, and also verify the anthropic tool result injection:",
"@2026-02-21T21:09:49\nUser:\nThis time it seems to have actually patched stuff, let me know if your done and I should restart the gui, otherwise keep going its fine.",
"@2026-02-21T21:09:55\nAI:\n(Annotation from user, no reponse from ai. Seems like it did some work I'll take a screencap of latest and send over)",
"@2026-02-21T21:11:31\nAI:\nEverything is done. You can restart the GUI now. Here's a summary of all changes:\n\n**What changed:**\n\n1. **`ai_client.py`** — 3 new functions + modifications to 4 existing functions:\n - `_reread_file_items(file_items)` — re-reads every file from disk, returns fresh list\n - `_build_file_context_text(file_items)` — formats files as markdown code blocks for injection\n - `_ANTHROPIC_SYSTEM` — added instruction telling the AI not to re-read files already in context\n - `send()` — new `file_items` parameter, passed through to provider functions\n - `_send_anthropic()` — after each tool call round, re-reads files and appends a `[FILES UPDATED]` text block to the tool results message\n - `_send_gemini()` — after each tool call round, re-reads files (updates list, though can't inject into Gemini's structured responses)\n - `tool_result_send` comms logging — filters with `r.get(\"type\") == \"tool_result\"` so the injected text block doesn't crash the list comprehension\n\n2. **`gui.py`** — one-line change:\n - `ai_client.send(self.last_md, user_msg, base_dir, self.last_file_items)` — passes file items through\n\n3. **`MainContext.md`** — new \"Dynamic file context refresh\" documentation section",
]
[discussion.discussions."Word Wrap"] [discussion.discussions."Word Wrap"]
git_commit = "" git_commit = ""

200
summarize.py Normal file
View File

@@ -0,0 +1,200 @@
# summarize.py
"""
Local symbolic summariser — no AI calls, no network.
For each file, extracts structural information:
.py : imports, classes (with methods), top-level functions, global constants
.toml : top-level table keys + array lengths
.md : headings (h1-h3)
other : line count + first 8 lines as preview
Returns a compact markdown string per file, suitable for use as a low-token
context block that replaces full file contents in the initial <context> send.
"""
import ast
import re
from pathlib import Path
# ------------------------------------------------------------------ per-type extractors
def _summarise_python(path: Path, content: str) -> str:
lines = content.splitlines()
line_count = len(lines)
parts = [f"**Python** — {line_count} lines"]
try:
tree = ast.parse(content.lstrip(chr(0xFEFF)), filename=str(path))
except SyntaxError as e:
parts.append(f"_Parse error: {e}_")
return "\n".join(parts)
# Imports
imports = []
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for alias in node.names:
imports.append(alias.name.split(".")[0])
elif isinstance(node, ast.ImportFrom):
if node.module:
imports.append(node.module.split(".")[0])
if imports:
unique_imports = sorted(set(imports))
parts.append(f"imports: {', '.join(unique_imports)}")
# Top-level constants (ALL_CAPS assignments)
constants = []
for node in ast.iter_child_nodes(tree):
if isinstance(node, ast.Assign):
for t in node.targets:
if isinstance(t, ast.Name) and t.id.isupper():
constants.append(t.id)
elif isinstance(node, (ast.AnnAssign,)):
if isinstance(node.target, ast.Name) and node.target.id.isupper():
constants.append(node.target.id)
if constants:
parts.append(f"constants: {', '.join(constants)}")
# Classes + their methods
for node in ast.iter_child_nodes(tree):
if isinstance(node, ast.ClassDef):
methods = [
n.name for n in ast.iter_child_nodes(node)
if isinstance(n, (ast.FunctionDef, ast.AsyncFunctionDef))
]
if methods:
parts.append(f"class {node.name}: {', '.join(methods)}")
else:
parts.append(f"class {node.name}")
# Top-level functions
top_fns = [
node.name for node in ast.iter_child_nodes(tree)
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef))
]
if top_fns:
parts.append(f"functions: {', '.join(top_fns)}")
return "\n".join(parts)
def _summarise_toml(path: Path, content: str) -> str:
lines = content.splitlines()
line_count = len(lines)
parts = [f"**TOML** — {line_count} lines"]
# Extract top-level table headers [key] and [[key]]
table_pat = re.compile(r"^\s*\[{1,2}([^\[\]]+)\]{1,2}")
tables = []
for line in lines:
m = table_pat.match(line)
if m:
tables.append(m.group(1).strip())
if tables:
parts.append(f"tables: {', '.join(tables)}")
# Top-level key = value (not inside a [table])
kv_pat = re.compile(r"^([a-zA-Z_][a-zA-Z0-9_]*)\s*=")
in_table = False
top_keys = []
for line in lines:
if table_pat.match(line):
in_table = True
continue
if not in_table:
m = kv_pat.match(line)
if m:
top_keys.append(m.group(1))
if top_keys:
parts.append(f"top-level keys: {', '.join(top_keys)}")
return "\n".join(parts)
def _summarise_markdown(path: Path, content: str) -> str:
lines = content.splitlines()
line_count = len(lines)
parts = [f"**Markdown** — {line_count} lines"]
headings = []
for line in lines:
m = re.match(r"^(#{1,3})\s+(.+)", line)
if m:
level = len(m.group(1))
text = m.group(2).strip()
indent = " " * (level - 1)
headings.append(f"{indent}{text}")
if headings:
parts.append("headings:\n" + "\n".join(f" {h}" for h in headings))
return "\n".join(parts)
def _summarise_generic(path: Path, content: str) -> str:
lines = content.splitlines()
line_count = len(lines)
suffix = path.suffix.lstrip(".").upper() or "TEXT"
parts = [f"**{suffix}** — {line_count} lines"]
preview = lines[:8]
if preview:
parts.append("preview:\n```\n" + "\n".join(preview) + "\n```")
return "\n".join(parts)
# ------------------------------------------------------------------ dispatch
_SUMMARISERS = {
".py": _summarise_python,
".toml": _summarise_toml,
".md": _summarise_markdown,
".ini": _summarise_generic,
".txt": _summarise_generic,
".ps1": _summarise_generic,
}
def summarise_file(path: Path, content: str) -> str:
"""
Return a compact markdown summary string for a single file.
`content` is the already-read file text (or an error string).
"""
suffix = path.suffix.lower() if hasattr(path, "suffix") else ""
fn = _SUMMARISERS.get(suffix, _summarise_generic)
try:
return fn(path, content)
except Exception as e:
return f"_Summariser error: {e}_"
def summarise_items(file_items: list[dict]) -> list[dict]:
"""
Given a list of file_item dicts (as returned by aggregate.build_file_items),
return a parallel list of dicts with an added `summary` key.
"""
result = []
for item in file_items:
path = item.get("path")
content = item.get("content", "")
error = item.get("error", False)
if error or path is None:
summary = f"_Error reading file_"
else:
p = Path(path) if not isinstance(path, Path) else path
summary = summarise_file(p, content)
result.append({**item, "summary": summary})
return result
def build_summary_markdown(file_items: list[dict]) -> str:
"""
Build a compact markdown string of file summaries, suitable for the
initial <context> block instead of full file contents.
"""
summarised = summarise_items(file_items)
parts = []
for item in summarised:
path = item.get("path") or item.get("entry", "unknown")
summary = item.get("summary", "")
parts.append(f"### `{path}`\n\n{summary}")
return "\n\n---\n\n".join(parts)