adjustments

This commit is contained in:
2026-02-21 14:40:14 -05:00
parent 672b184c86
commit fa78c1d08a
4 changed files with 152 additions and 83 deletions

54
.editorconfig Normal file
View File

@@ -0,0 +1,54 @@
root = true
[*.py]
indent_style = space
indent_size = 2
[*.s]
indent_style = tab
indent_size = 2
[*.asm]
indent_style = tab
indent_size = 2
[*.refactor]
indent_style = space
indent_size = 4
[*.md]
indent_style = space
indent_size = 4
[*.c]
indent_style = tab
indent_size = 2
charset = utf-8
[*.cpp]
indent_style = tab
indent_size = 2
charset = utf-8
[*.h]
indent_style = tab
indent_size = 2
charset = utf-8
[*.hpp]
indent_style = tab
indent_size = 2
charset = utf-8
[*.{ps1, psm1}]
indent_style = tab
indent_size = 4
[*.odin]
indent_style = tab
indent_size = 2
charset = utf-8
[*.{natvis, natstepfilter}]
indent_style = tab
indent_size = 4

View File

@@ -3,17 +3,8 @@ namespace = "colorforth_bootslop"
output_dir = "."
[files]
base_dir = "C:/projects/forth/bootslop"
base_dir = "C:/projects/manual_slop"
paths = [
"./attempt_1/*",
"./scripts/*",
"./references/Architectural_Consolidation.md",
"./references/neokineogfx_in-depth.md",
"./references/blog_in-depth.md",
"./references/kyra_in-depth.md",
".editorconfig",
"GEMINI.md",
"CONVENTIONS.md",
]
[screenshots]
@@ -24,6 +15,5 @@ paths = []
history = []
[ai]
provider = "gemini"
model = "gemini-2.0-flash"
provider = "anthropic"
model = "claude-sonnet-4-6"

28
config_bootslop.toml Normal file
View File

@@ -0,0 +1,28 @@
[output]
namespace = "colorforth_bootslop"
output_dir = "."
[files]
base_dir = "C:/projects/forth/bootslop"
paths = [
"./attempt_1/*",
"./scripts/*",
"./references/Architectural_Consolidation.md",
"./references/neokineogfx_in-depth.md",
"./references/blog_in-depth.md",
"./references/kyra_in-depth.md",
".editorconfig",
"GEMINI.md",
"CONVENTIONS.md",
]
[screenshots]
base_dir = "C:/Users/Ed/scoop/apps/sharex/current/ShareX/Screenshots/2026-02"
paths = []
[discussion]
history = []
[ai]
provider = "anthropic"
model = "claude-sonnet-4-6"

137
gui.py
View File

@@ -37,8 +37,8 @@ class App:
self.current_model: str = ai_cfg.get("model", "gemini-2.0-flash")
self.available_models: list[str] = []
self.gemini_status = "idle"
self.gemini_response = ""
self.ai_status = "idle"
self.ai_response = ""
self.last_md = ""
self.last_md_path: Path | None = None
self.send_thread: threading.Thread | None = None
@@ -71,38 +71,40 @@ class App:
return aggregate.run(self.config)
def _update_status(self, status: str):
self.gemini_status = status
self.ai_status = status
if dpg.does_item_exist("ai_status"):
dpg.set_value("ai_status", f"Status: {status}")
def _update_response(self, text: str):
self.gemini_response = text
if dpg.does_item_exist("gemini_response"):
dpg.set_value("gemini_response", text)
self.ai_response = text
if dpg.does_item_exist("ai_response"):
dpg.set_value("ai_response", text)
def _rebuild_files_list(self):
if dpg.does_item_exist("files_scroll"):
dpg.delete_item("files_scroll", children_only=True)
for i, f in enumerate(self.files):
with dpg.group(horizontal=True, parent="files_scroll"):
dpg.add_button(
label="x",
width=24,
callback=self._make_remove_file_cb(i)
)
dpg.add_text(f)
if not dpg.does_item_exist("files_scroll"):
return
dpg.delete_item("files_scroll", children_only=True)
for i, f in enumerate(self.files):
with dpg.group(horizontal=True, parent="files_scroll"):
dpg.add_button(
label="x",
width=24,
callback=self._make_remove_file_cb(i)
)
dpg.add_text(f)
def _rebuild_shots_list(self):
if dpg.does_item_exist("shots_scroll"):
dpg.delete_item("shots_scroll", children_only=True)
for i, s in enumerate(self.screenshots):
with dpg.group(horizontal=True, parent="shots_scroll"):
dpg.add_button(
label="x",
width=24,
callback=self._make_remove_shot_cb(i)
)
dpg.add_text(s)
if not dpg.does_item_exist("shots_scroll"):
return
dpg.delete_item("shots_scroll", children_only=True)
for i, s in enumerate(self.screenshots):
with dpg.group(horizontal=True, parent="shots_scroll"):
dpg.add_button(
label="x",
width=24,
callback=self._make_remove_shot_cb(i)
)
dpg.add_text(s)
def _rebuild_models_list(self):
if not dpg.does_item_exist("model_listbox"):
@@ -111,8 +113,8 @@ class App:
if self.current_model in self.available_models:
dpg.set_value("model_listbox", self.current_model)
elif self.available_models:
dpg.set_value("model_listbox", self.available_models[0])
self.current_model = self.available_models[0]
dpg.set_value("model_listbox", self.current_model)
ai_client.set_provider(self.current_provider, self.current_model)
def _make_remove_file_cb(self, idx: int):
@@ -235,7 +237,6 @@ class App:
except Exception as e:
self._update_status(f"generate error: {e}")
return
self._update_status("sending...")
user_msg = dpg.get_value("ai_input")
@@ -369,14 +370,13 @@ class App:
dpg.add_button(label="Save", callback=self.cb_save_discussion)
with dpg.window(
label="AI Client",
tag="win_ai",
label="Provider",
tag="win_provider",
pos=(1232, 8),
width=420,
height=900,
height=280,
no_close=True
):
# provider
dpg.add_text("Provider")
dpg.add_combo(
tag="provider_combo",
@@ -385,16 +385,10 @@ class App:
width=-1,
callback=self.cb_provider_changed
)
dpg.add_separator()
# model
with dpg.group(horizontal=True):
dpg.add_text("Model")
dpg.add_button(
label="Fetch Models",
callback=self.cb_fetch_models
)
dpg.add_button(label="Fetch Models", callback=self.cb_fetch_models)
dpg.add_listbox(
tag="model_listbox",
items=self.available_models,
@@ -403,61 +397,64 @@ class App:
num_items=6,
callback=self.cb_model_changed
)
dpg.add_separator()
dpg.add_text("Status: idle", tag="ai_status")
dpg.add_separator()
dpg.add_text("Message")
with dpg.window(
label="Message",
tag="win_message",
pos=(1232, 296),
width=420,
height=280,
no_close=True
):
dpg.add_input_text(
tag="ai_input",
multiline=True,
width=-1,
height=140
height=-64
)
dpg.add_separator()
with dpg.group(horizontal=True):
dpg.add_button(label="Gen + Send", callback=self.cb_generate_send)
dpg.add_button(label="MD Only", callback=self.cb_md_only)
dpg.add_button(label="Reset", callback=self.cb_reset_session)
dpg.add_separator()
dpg.add_text("Response")
with dpg.window(
label="Response",
tag="win_response",
pos=(1232, 584),
width=420,
height=400,
no_close=True
):
dpg.add_input_text(
tag="gemini_response",
tag="ai_response",
multiline=True,
readonly=True,
width=-1,
height=-1
)
def run(config: dict) -> tuple[str, Path]:
namespace = config["output"]["namespace"]
output_dir = Path(config["output"]["output_dir"]) / "md_gen"
base_dir = Path(config["files"]["base_dir"])
files = config["files"].get("paths", [])
screenshot_base_dir = Path(config.get("screenshots", {}).get("base_dir", "."))
screenshots = config.get("screenshots", {}).get("paths", [])
history = config.get("discussion", {}).get("history", [])
def run(self):
dpg.create_context()
dpg.configure_app(docking=True, docking_space=True)
dpg.create_viewport(title="manual slop", width=1600, height=900)
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.maximize_viewport()
self._build_ui()
self._fetch_models(self.current_provider)
while dpg.is_dearpygui_running():
dpg.render_dearpygui_frame()
dpg.destroy_context()
output_dir.mkdir(parents=True, exist_ok=True)
increment = find_next_increment(output_dir, namespace)
output_file = output_dir / f"{namespace}_{increment:03d}.md"
markdown = build_markdown(base_dir, files, screenshot_base_dir, screenshots, history)
output_file.write_text(markdown, encoding="utf-8")
return markdown, output_file
def main():
with open("config.toml", "rb") as f:
import tomllib
config = tomllib.load(f)
markdown, output_file = run(config)
print(f"Written: {output_file}")
app = App()
app.run()
if __name__ == "__main__":