This commit is contained in:
2026-02-21 21:50:06 -05:00
parent f258fc5765
commit 2182bfbd4a
2 changed files with 10 additions and 4 deletions

View File

@@ -1,6 +1,6 @@
[ai] [ai]
provider = "anthropic" provider = "gemini"
model = "claude-sonnet-4-6" model = "gemini-3.1-pro-preview"
[theme] [theme]
palette = "10x Dark" palette = "10x Dark"

View File

@@ -40,7 +40,7 @@ roles = [
"Vendor API", "Vendor API",
"System", "System",
] ]
active = "Support files or mcp, or proper dynamic context uptake." active = "Support for system prompt"
[discussion.discussions.main] [discussion.discussions.main]
git_commit = "3b2da7873f839bf062c06c7539fadd9df7a3e2b9" git_commit = "3b2da7873f839bf062c06c7539fadd9df7a3e2b9"
@@ -65,7 +65,7 @@ history = [
[discussion.discussions."Support files or mcp, or proper dynamic context uptake."] [discussion.discussions."Support files or mcp, or proper dynamic context uptake."]
git_commit = "59208b27ec31949cb45cae382dddaee645b53b4f" git_commit = "59208b27ec31949cb45cae382dddaee645b53b4f"
last_updated = "2026-02-21T21:42:01" last_updated = "2026-02-21T21:42:55"
history = [ history = [
"@2026-02-21T21:30:26\nUser:\nLets add support for mcp. Similar to what claude code has, but restricted only to the paths provided. We have a basic summerizer right now but it doesn't work for all code files (we'd need tree-sitter). We need to add support for getting heurstics on what nuances each file has that is valuable for a basic overview before retrieving their content worst case to get a job done.", "@2026-02-21T21:30:26\nUser:\nLets add support for mcp. Similar to what claude code has, but restricted only to the paths provided. We have a basic summerizer right now but it doesn't work for all code files (we'd need tree-sitter). We need to add support for getting heurstics on what nuances each file has that is valuable for a basic overview before retrieving their content worst case to get a job done.",
"@2026-02-21T21:30:32\nAI:\nThe PowerShell `Set-Content` write corrupted the file — it re-encoded it as Windows-1252, turning the UTF-8 em-dash (0xE2 0x80 0x94) into 0x97. I need to read the file as bytes, find and fix the corruption, apply the three targeted changes, and write back as UTF-8.", "@2026-02-21T21:30:32\nAI:\nThe PowerShell `Set-Content` write corrupted the file — it re-encoded it as Windows-1252, turning the UTF-8 em-dash (0xE2 0x80 0x94) into 0x97. I need to read the file as bytes, find and fix the corruption, apply the three targeted changes, and write back as UTF-8.",
@@ -73,6 +73,7 @@ history = [
"@2026-02-21T21:34:03\nAI:\nERROR: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'prompt is too long: 204369 tokens > 200000 maximum'}, 'request_id': 'req_011CYNKqxGAjRaD8cXEbuGTt'}", "@2026-02-21T21:34:03\nAI:\nERROR: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'prompt is too long: 204369 tokens > 200000 maximum'}, 'request_id': 'req_011CYNKqxGAjRaD8cXEbuGTt'}",
"@2026-02-21T21:39:13\nUser:\nYou got an error for a request (message too long). Continue where you left off I provided screenshots to hopefully help. I restarted the editor it has the last mcp_client.py and I added it to your context.", "@2026-02-21T21:39:13\nUser:\nYou got an error for a request (message too long). Continue where you left off I provided screenshots to hopefully help. I restarted the editor it has the last mcp_client.py and I added it to your context.",
"@2026-02-21T21:39:13\nAI:\nERROR: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'prompt is too long: 211450 tokens > 200000 maximum'}, 'request_id': 'req_011CYNLEVvnYZCCBugQ1sfQX'}", "@2026-02-21T21:39:13\nAI:\nERROR: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'prompt is too long: 211450 tokens > 200000 maximum'}, 'request_id': 'req_011CYNLEVvnYZCCBugQ1sfQX'}",
"@2026-02-21T21:42:34\nUser:\nGot the error again but I'll restart withthe mcp_client fixes you made..\nrestarted, continue.",
] ]
[discussion.discussions."Word Wrap"] [discussion.discussions."Word Wrap"]
@@ -83,3 +84,8 @@ history = [
"@2026-02-21T21:13:46\nAI:\nThe file is fixed and reading cleanly now. The `gui.py` file has the UTF-8 BOM restored, the em-dash is proper UTF-8 again, and `wrap=0` is in place.\n\n**Summary of what changed:**\n\n- `gui.py` line 80: `wrap=460` → `wrap=0` in `_add_text_field()` — this is the only `dpg.add_text()` call that had a hardcoded pixel wrap width. With `wrap=0`, DearPyGUI wraps the text to the available parent container width dynamically, so it adapts when panels are resized/docked differently.\n\nAll the other text panels (`ai_input`, `ai_response`, `disc_content_*`, tool log results, confirm dialog script) use `dpg.add_input_text(multiline=True, width=-1)` which already wraps to available width automatically.", "@2026-02-21T21:13:46\nAI:\nThe file is fixed and reading cleanly now. The `gui.py` file has the UTF-8 BOM restored, the em-dash is proper UTF-8 again, and `wrap=0` is in place.\n\n**Summary of what changed:**\n\n- `gui.py` line 80: `wrap=460` → `wrap=0` in `_add_text_field()` — this is the only `dpg.add_text()` call that had a hardcoded pixel wrap width. With `wrap=0`, DearPyGUI wraps the text to the available parent container width dynamically, so it adapts when panels are resized/docked differently.\n\nAll the other text panels (`ai_input`, `ai_response`, `disc_content_*`, tool log results, confirm dialog script) use `dpg.add_input_text(multiline=True, width=-1)` which already wraps to available width automatically.",
"@2026-02-21T21:15:14\nUser:\nI don't see discussion entries wrapping, nor the message box...", "@2026-02-21T21:15:14\nUser:\nI don't see discussion entries wrapping, nor the message box...",
] ]
[discussion.discussions."Support for system prompt"]
git_commit = "f258fc5765d647eb821716bce3d2000fb2ba2857"
last_updated = "2026-02-21T21:49:59"
history = []