15 Commits

165 changed files with 1676 additions and 18819 deletions
-1
View File
@@ -14,4 +14,3 @@ dpg_layout.ini
.coverage
tests/temp_workspace
.mypy_cache
.slop_cache
+3 -3
View File
@@ -1,7 +1,7 @@
---
---
description: Fast, read-only agent for exploring the codebase structure
mode: subagent
model: minimax-coding-plan/MiniMax-M2.7
model: MiniMax-M2.5
temperature: 0.2
permission:
edit: deny
@@ -78,4 +78,4 @@ Return concise findings with file:line references:
### Summary
[One-paragraph summary of findings]
```
```
+3 -3
View File
@@ -1,7 +1,7 @@
---
---
description: General-purpose agent for researching complex questions and executing multi-step tasks
mode: subagent
model: minimax-coding-plan/MiniMax-M2.7
model: MiniMax-M2.5
temperature: 0.3
---
@@ -81,4 +81,4 @@ Return detailed findings with evidence:
### Recommendations
- [Suggested next steps if applicable]
```
```
+5 -5
View File
@@ -1,7 +1,7 @@
---
---
description: Tier 1 Orchestrator for product alignment, high-level planning, and track initialization
mode: primary
model: minimax-coding-plan/MiniMax-M2.7
model: MiniMax-M2.5
temperature: 0.5
permission:
edit: ask
@@ -18,7 +18,7 @@ ONLY output the requested text. No pleasantries.
## Context Management
**MANUAL COMPACTION ONLY** Never rely on automatic context summarization.
**MANUAL COMPACTION ONLY** Never rely on automatic context summarization.
Use `/compact` command explicitly when context needs reduction.
Preserve full context during track planning and spec creation.
@@ -105,7 +105,7 @@ Use `manual-slop_py_get_code_outline`, `manual-slop_py_get_definition`,
Document existing implementations with file:line references in a
"Current State Audit" section in the spec.
**FAILURE TO AUDIT = TRACK FAILURE** Previous tracks failed because specs
**FAILURE TO AUDIT = TRACK FAILURE** Previous tracks failed because specs
asked to implement features that already existed.
### 2. Identify Gaps, Not Features
@@ -175,4 +175,4 @@ Focus: {One-sentence scope}
- Do NOT use native `edit` tool - use MCP tools
- DO NOT SKIP A TEST IN PYTEST JUST BECAUSE ITS BROKEN AND HAS NO TRIVIAL SOLUTION OR FIX.
- DO NOT SIMPLIFY A TEST JUST BECAUSE IT HAS NO TRIVIAL SOLUTION TO FIX.
- DO NOT CREATE MOCK PATCHES TO PSEUDO API CALLS OR HOOKS BECAUSE THE APP SOURCE WAS CHANGED. ADAPT TESTS PROPERLY.
- DO NOT CREATE MOCK PATCHES TO PSEUDO API CALLS OR HOOKS BECAUSE THE APP SOURCE WAS CHANGED. ADAPT TESTS PROPERLY.
+7 -7
View File
@@ -1,7 +1,7 @@
---
---
description: Tier 2 Tech Lead for architectural design and track execution with persistent memory
mode: primary
model: minimax-coding-plan/MiniMax-M2.7
model: MiniMax-M2.5
temperature: 0.4
permission:
edit: ask
@@ -14,9 +14,9 @@ ONLY output the requested text. No pleasantries.
## Context Management
**MANUAL COMPACTION ONLY** Never rely on automatic context summarization.
**MANUAL COMPACTION ONLY** Never rely on automatic context summarization.
Use `/compact` command explicitly when context needs reduction.
You maintain PERSISTENT MEMORY throughout track execution do NOT apply Context Amnesia to your own session.
You maintain PERSISTENT MEMORY throughout track execution do NOT apply Context Amnesia to your own session.
## CRITICAL: MCP Tools Only (Native Tools Banned)
@@ -134,14 +134,14 @@ Before implementing:
- Zero-assertion ban: Tests MUST have meaningful assertions
- Delegate test creation to Tier 3 Worker via Task tool
- Run tests and confirm they FAIL as expected
- **CONFIRM FAILURE** this is the Red phase
- **CONFIRM FAILURE** this is the Red phase
### 3. Green Phase: Implement to Pass
- **Pre-delegation checkpoint**: Stage current progress (`git add .`)
- Delegate implementation to Tier 3 Worker via Task tool
- Run tests and confirm they PASS
- **CONFIRM PASS** this is the Green phase
- **CONFIRM PASS** this is the Green phase
### 4. Refactor Phase (Optional)
@@ -213,4 +213,4 @@ When all tasks in a phase are complete:
- Do NOT use native `edit` tool - use MCP tools
- DO NOT SKIP A TEST IN PYTEST JUST BECAUSE ITS BROKEN AND HAS NO TRIVIAL SOLUTION OR FIX.
- DO NOT SIMPLIFY A TEST JUST BECAUSE IT HAS NO TRIVIAL SOLUTION TO FIX.
- DO NOT CREATE MOCK PATCHES TO PSEUDO API CALLS OR HOOKS BECAUSE THE APP SOURCE WAS CHANGED. ADAPT TESTS PROPERLY.
- DO NOT CREATE MOCK PATCHES TO PSEUDO API CALLS OR HOOKS BECAUSE THE APP SOURCE WAS CHANGED. ADAPT TESTS PROPERLY.
+3 -3
View File
@@ -1,7 +1,7 @@
---
---
description: Stateless Tier 3 Worker for surgical code implementation and TDD
mode: subagent
model: minimax-coding-plan/minimax-m2.7
model: MiniMax-M2.5
temperature: 0.3
permission:
edit: allow
@@ -133,4 +133,4 @@ If you cannot complete the task:
- Do NOT modify files outside the specified scope
- DO NOT SKIP A TEST IN PYTEST JUST BECAUSE ITS BROKEN AND HAS NO TRIVIAL SOLUTION OR FIX.
- DO NOT SIMPLIFY A TEST JUST BECAUSE IT HAS NO TRIVIAL SOLUTION TO FIX.
- DO NOT CREATE MOCK PATCHES TO PSEUDO API CALLS OR HOOKS BECAUSE THE APP SOURCE WAS CHANGED. ADAPT TESTS PROPERLY.
- DO NOT CREATE MOCK PATCHES TO PSEUDO API CALLS OR HOOKS BECAUSE THE APP SOURCE WAS CHANGED. ADAPT TESTS PROPERLY.
+3 -3
View File
@@ -1,7 +1,7 @@
---
---
description: Stateless Tier 4 QA Agent for error analysis and diagnostics
mode: subagent
model: minimax-coding-plan/MiniMax-M2.7
model: MiniMax-M2.5
temperature: 0.2
permission:
edit: deny
@@ -119,4 +119,4 @@ If you cannot analyze the error:
- Do NOT read full large files - use skeleton tools first
- DO NOT SKIP A TEST IN PYTEST JUST BECAUSE ITS BROKEN AND HAS NO TRIVIAL SOLUTION OR FIX.
- DO NOT SIMPLIFY A TEST JUST BECAUSE IT HAS NO TRIVIAL SOLUTION TO FIX.
- DO NOT CREATE MOCK PATCHES TO PSEUDO API CALLS OR HOOKS BECAUSE THE APP SOURCE WAS CHANGED. ADAPT TESTS PROPERLY.
- DO NOT CREATE MOCK PATCHES TO PSEUDO API CALLS OR HOOKS BECAUSE THE APP SOURCE WAS CHANGED. ADAPT TESTS PROPERLY.
-376
View File
@@ -1,376 +0,0 @@
{
"name": ".opencode",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"@opencode-ai/plugin": "1.14.18"
}
},
"node_modules/@msgpackr-extract/msgpackr-extract-darwin-arm64": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-darwin-arm64/-/msgpackr-extract-darwin-arm64-3.0.3.tgz",
"integrity": "sha512-QZHtlVgbAdy2zAqNA9Gu1UpIuI8Xvsd1v8ic6B2pZmeFnFcMWiPLfWXh7TVw4eGEZ/C9TH281KwhVoeQUKbyjw==",
"cpu": [
"arm64"
],
"license": "MIT",
"optional": true,
"os": [
"darwin"
]
},
"node_modules/@msgpackr-extract/msgpackr-extract-darwin-x64": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-darwin-x64/-/msgpackr-extract-darwin-x64-3.0.3.tgz",
"integrity": "sha512-mdzd3AVzYKuUmiWOQ8GNhl64/IoFGol569zNRdkLReh6LRLHOXxU4U8eq0JwaD8iFHdVGqSy4IjFL4reoWCDFw==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"darwin"
]
},
"node_modules/@msgpackr-extract/msgpackr-extract-linux-arm": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-arm/-/msgpackr-extract-linux-arm-3.0.3.tgz",
"integrity": "sha512-fg0uy/dG/nZEXfYilKoRe7yALaNmHoYeIoJuJ7KJ+YyU2bvY8vPv27f7UKhGRpY6euFYqEVhxCFZgAUNQBM3nw==",
"cpu": [
"arm"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
]
},
"node_modules/@msgpackr-extract/msgpackr-extract-linux-arm64": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-arm64/-/msgpackr-extract-linux-arm64-3.0.3.tgz",
"integrity": "sha512-YxQL+ax0XqBJDZiKimS2XQaf+2wDGVa1enVRGzEvLLVFeqa5kx2bWbtcSXgsxjQB7nRqqIGFIcLteF/sHeVtQg==",
"cpu": [
"arm64"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
]
},
"node_modules/@msgpackr-extract/msgpackr-extract-linux-x64": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-x64/-/msgpackr-extract-linux-x64-3.0.3.tgz",
"integrity": "sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
]
},
"node_modules/@msgpackr-extract/msgpackr-extract-win32-x64": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-win32-x64/-/msgpackr-extract-win32-x64-3.0.3.tgz",
"integrity": "sha512-x0fWaQtYp4E6sktbsdAqnehxDgEc/VwM7uLsRCYWaiGu0ykYdZPiS8zCWdnjHwyiumousxfBm4SO31eXqwEZhQ==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"win32"
]
},
"node_modules/@opencode-ai/plugin": {
"version": "1.14.18",
"resolved": "https://registry.npmjs.org/@opencode-ai/plugin/-/plugin-1.14.18.tgz",
"integrity": "sha512-oF1U7Aipz8A93WGllrwxYugopeL4ml/zd6ywoFIyuF2gbvEhOGFomAvqt1E5YjLN0wEL8nCPwFine3l7pqgNUA==",
"license": "MIT",
"dependencies": {
"@opencode-ai/sdk": "1.14.18",
"effect": "4.0.0-beta.48",
"zod": "4.1.8"
},
"peerDependencies": {
"@opentui/core": ">=0.1.100",
"@opentui/solid": ">=0.1.100"
},
"peerDependenciesMeta": {
"@opentui/core": {
"optional": true
},
"@opentui/solid": {
"optional": true
}
}
},
"node_modules/@opencode-ai/sdk": {
"version": "1.14.18",
"resolved": "https://registry.npmjs.org/@opencode-ai/sdk/-/sdk-1.14.18.tgz",
"integrity": "sha512-E0QiiB+9rv/TPH0a1GunKl6LnuXDRHDiJaIFHOPaBL364rQx+3ClHwHkz78/KBsjhjeLrC2CaLgK+CoxV/XUIQ==",
"license": "MIT",
"dependencies": {
"cross-spawn": "7.0.6"
}
},
"node_modules/@standard-schema/spec": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz",
"integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==",
"license": "MIT"
},
"node_modules/cross-spawn": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
"license": "MIT",
"dependencies": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/detect-libc": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz",
"integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==",
"license": "Apache-2.0",
"optional": true,
"engines": {
"node": ">=8"
}
},
"node_modules/effect": {
"version": "4.0.0-beta.48",
"resolved": "https://registry.npmjs.org/effect/-/effect-4.0.0-beta.48.tgz",
"integrity": "sha512-MMAM/ZabuNdNmgXiin+BAanQXK7qM8mlt7nfXDoJ/Gn9V8i89JlCq+2N0AiWmqFLXjGLA0u3FjiOjSOYQk5uMw==",
"license": "MIT",
"dependencies": {
"@standard-schema/spec": "^1.1.0",
"fast-check": "^4.6.0",
"find-my-way-ts": "^0.1.6",
"ini": "^6.0.0",
"kubernetes-types": "^1.30.0",
"msgpackr": "^1.11.9",
"multipasta": "^0.2.7",
"toml": "^4.1.1",
"uuid": "^13.0.0",
"yaml": "^2.8.3"
}
},
"node_modules/fast-check": {
"version": "4.7.0",
"resolved": "https://registry.npmjs.org/fast-check/-/fast-check-4.7.0.tgz",
"integrity": "sha512-NsZRtqvSSoCP0HbNjUD+r1JH8zqZalyp6gLY9e7OYs7NK9b6AHOs2baBFeBG7bVNsuoukh89x2Yg3rPsul8ziQ==",
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/dubzzz"
},
{
"type": "opencollective",
"url": "https://opencollective.com/fast-check"
}
],
"license": "MIT",
"dependencies": {
"pure-rand": "^8.0.0"
},
"engines": {
"node": ">=12.17.0"
}
},
"node_modules/find-my-way-ts": {
"version": "0.1.6",
"resolved": "https://registry.npmjs.org/find-my-way-ts/-/find-my-way-ts-0.1.6.tgz",
"integrity": "sha512-a85L9ZoXtNAey3Y6Z+eBWW658kO/MwR7zIafkIUPUMf3isZG0NCs2pjW2wtjxAKuJPxMAsHUIP4ZPGv0o5gyTA==",
"license": "MIT"
},
"node_modules/ini": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/ini/-/ini-6.0.0.tgz",
"integrity": "sha512-IBTdIkzZNOpqm7q3dRqJvMaldXjDHWkEDfrwGEQTs5eaQMWV+djAhR+wahyNNMAa+qpbDUhBMVt4ZKNwpPm7xQ==",
"license": "ISC",
"engines": {
"node": "^20.17.0 || >=22.9.0"
}
},
"node_modules/isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
"license": "ISC"
},
"node_modules/kubernetes-types": {
"version": "1.30.0",
"resolved": "https://registry.npmjs.org/kubernetes-types/-/kubernetes-types-1.30.0.tgz",
"integrity": "sha512-Dew1okvhM/SQcIa2rcgujNndZwU8VnSapDgdxlYoB84ZlpAD43U6KLAFqYo17ykSFGHNPrg0qry0bP+GJd9v7Q==",
"license": "Apache-2.0"
},
"node_modules/msgpackr": {
"version": "1.11.12",
"resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.11.12.tgz",
"integrity": "sha512-RBdJ1Un7yGlXWajrkxcSa93nvQ0w4zBf60c0yYv7YtBelP8H2FA7XsfBbMHtXKXUMUxH7zV3Zuozh+kUQWhHvg==",
"license": "MIT",
"optionalDependencies": {
"msgpackr-extract": "^3.0.2"
}
},
"node_modules/msgpackr-extract": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/msgpackr-extract/-/msgpackr-extract-3.0.3.tgz",
"integrity": "sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==",
"hasInstallScript": true,
"license": "MIT",
"optional": true,
"dependencies": {
"node-gyp-build-optional-packages": "5.2.2"
},
"bin": {
"download-msgpackr-prebuilds": "bin/download-prebuilds.js"
},
"optionalDependencies": {
"@msgpackr-extract/msgpackr-extract-darwin-arm64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-darwin-x64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-linux-arm": "3.0.3",
"@msgpackr-extract/msgpackr-extract-linux-arm64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-linux-x64": "3.0.3",
"@msgpackr-extract/msgpackr-extract-win32-x64": "3.0.3"
}
},
"node_modules/multipasta": {
"version": "0.2.7",
"resolved": "https://registry.npmjs.org/multipasta/-/multipasta-0.2.7.tgz",
"integrity": "sha512-KPA58d68KgGil15oDqXjkUBEBYc00XvbPj5/X+dyzeo/lWm9Nc25pQRlf1D+gv4OpK7NM0J1odrbu9JNNGvynA==",
"license": "MIT"
},
"node_modules/node-gyp-build-optional-packages": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/node-gyp-build-optional-packages/-/node-gyp-build-optional-packages-5.2.2.tgz",
"integrity": "sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==",
"license": "MIT",
"optional": true,
"dependencies": {
"detect-libc": "^2.0.1"
},
"bin": {
"node-gyp-build-optional-packages": "bin.js",
"node-gyp-build-optional-packages-optional": "optional.js",
"node-gyp-build-optional-packages-test": "build-test.js"
}
},
"node_modules/path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/pure-rand": {
"version": "8.4.0",
"resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-8.4.0.tgz",
"integrity": "sha512-IoM8YF/jY0hiugFo/wOWqfmarlE6J0wc6fDK1PhftMk7MGhVZl88sZimmqBBFomLOCSmcCCpsfj7wXASCpvK9A==",
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/dubzzz"
},
{
"type": "opencollective",
"url": "https://opencollective.com/fast-check"
}
],
"license": "MIT"
},
"node_modules/shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"license": "MIT",
"dependencies": {
"shebang-regex": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/toml": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/toml/-/toml-4.1.1.tgz",
"integrity": "sha512-EBJnVBr3dTXdA89WVFoAIPUqkBjxPMwRqsfuo1r240tKFHXv3zgca4+NJib/h6TyvGF7vOawz0jGuryJCdNHrw==",
"license": "MIT",
"engines": {
"node": ">=20"
}
},
"node_modules/uuid": {
"version": "13.0.1",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-13.0.1.tgz",
"integrity": "sha512-9ezox2roIft6ExBVTVqibSd5dc5/47Sw/uY6b4SjQUT2TzQ0tltNquWA46y4xPQmdZYqvnio22SgWd41M86+jw==",
"funding": [
"https://github.com/sponsors/broofa",
"https://github.com/sponsors/ctavan"
],
"license": "MIT",
"bin": {
"uuid": "dist-node/bin/uuid"
}
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"license": "ISC",
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
"node-which": "bin/node-which"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/yaml": {
"version": "2.8.4",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.4.tgz",
"integrity": "sha512-ml/JPOj9fOQK8RNnWojA67GbZ0ApXAUlN2UQclwv2eVgTgn7O9gg9o7paZWKMp4g0H3nTLtS9LVzhkpOFIKzog==",
"license": "ISC",
"bin": {
"yaml": "bin.mjs"
},
"engines": {
"node": ">= 14.6"
},
"funding": {
"url": "https://github.com/sponsors/eemeli"
}
},
"node_modules/zod": {
"version": "4.1.8",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
}
}
}
-74
View File
@@ -1,74 +0,0 @@
{
"C:\\projects\\manual_slop\\src\\ai_client.py": {
"hash": "db4b3aad82599499d7796860757e229d2d412c5ccb3e821ddaee68ca0d3ad5d3",
"summary": "This Python module serves as a unified client interface for multiple Large Language Model (LLM) providers, abstracting away provider-specific differences in tool handling, history management, and caching. It includes specialized logic for Anthropic to manage token limits and for Gemini to inject initial context efficiently.\n\n* **Multi-Provider Abstraction:** Provides a single interface for interacting with LLMs from Anthropic, Gemini, DeepSeek, and Minimax.\n* **Provider-Specific Optimization:** Implements tailored strategies for managing token limits (Anthropic) and context injection (Gemini).\n* **Tooling and Bias Management:** Supports setting agent tools, tool presets, and bias profiles to influence LLM behavior.\n* **Communication Logging:** Tracks and logs communication events with LLM providers.\n* **Configuration and State Management:** Manages global generation parameters, credentials, and session state.\n\n**Outline:**\n**Python** \u2014 2501 lines\nimports: __future__, anthropic, asyncio, collections, datetime, difflib, google, hashlib, json, openai, os, pathlib, requests, src, sys, threading, time, tomllib, typing\nconstants: _GEMINI_CACHE_TTL, _BIAS_ENGINE, MAX_TOOL_ROUNDS, _MAX_TOOL_OUTPUT_BYTES, _ANTHROPIC_CHUNK_SIZE, _SYSTEM_PROMPT, COMMS_CLAMP_CHARS, TOOL_NAME, _CACHED_ANTHROPIC_TOOLS, _DIFF_LINE_THRESHOLD, _CACHED_DEEPSEEK_TOOLS, _CHARS_PER_TOKEN, _ANTHROPIC_MAX_PROMPT_TOKENS, _GEMINI_MAX_INPUT_TOKENS, _FILE_REFRESH_MARKER\nclass ProviderError: __init__, ui_message\nfunctions: set_model_params, get_history_trunc_limit, set_history_trunc_limit, get_current_tier, set_current_tier, set_custom_system_prompt, set_base_system_prompt, set_use_default_base_prompt, set_project_context_marker, _get_context_marker, _get_combined_system_prompt, get_combined_system_prompt, _append_comms, get_comms_log, clear_comms_log, get_credentials_path, _load_credentials, _classify_anthropic_error, _classify_gemini_error, _classify_deepseek_error, _classify_minimax_error, set_provider, get_provider, cleanup, reset_session, get_gemini_cache_stats, list_models, _list_gemini_cli_models, _list_gemini_models, _list_anthropic_models, _list_deepseek_models, _list_minimax_models, set_agent_tools, set_tool_preset, set_bias_profile, get_bias_profile, _build_anthropic_tools, _get_anthropic_tools, _gemini_tool_declaration, _execute_tool_calls_concurrently, _execute_single_tool_call_async, _run_script, _truncate_tool_output, _reread_file_items, _build_file_context_text, _build_file_diff_text, _build_deepseek_tools, _get_deepseek_tools, _content_block_to_dict, _ensure_gemini_client, _get_gemini_history_list, _send_gemini, _send_gemini_cli, _estimate_message_tokens, _invalidate_token_estimate, _estimate_prompt_tokens, _strip_stale_file_refreshes, _trim_anthropic_history, _ensure_anthropic_client, _chunk_text, _build_chunked_context_blocks, _strip_cache_controls, _add_history_cache_breakpoint, _repair_anthropic_history, _send_anthropic, _ensure_deepseek_client, _ensure_minimax_client, _repair_deepseek_history, _send_deepseek, _send_minimax, run_tier4_analysis, run_tier4_patch_callback, run_tier4_patch_generation, get_token_stats, send, _add_bleed_derived, get_history_bleed_stats, run_subagent_summarization"
},
"C:\\projects\\manual_slop\\conductor\\workflow.md": {
"hash": "ac3f4c0b807ce88bbbfdbd33b4d0888d4d5f97abca5642c2d5a3d9f2c1bc9fa5",
"summary": "This document outlines the mandatory workflow for the Conductor project, emphasizing strict adherence to code style, a test-driven development process with delegated implementation, and atomic, well-documented commits. Key takeaways include the critical importance of 1-space indentation for Python, the use of specific MCP tools to avoid indentation destruction, and a multi-phase task execution involving research, failing tests, implementation, refactoring, and thorough documentation via Git notes.\n\n**Outline:**\n**Markdown** \u2014 389 lines\nheadings:\n Project Workflow\n Session Start Checklist (MANDATORY)\n Code Style (MANDATORY - Python)\n CRITICAL: Native Edit Tool Destroys Indentation\n Guiding Principles\n Task Workflow\n Standard Task Workflow\n Phase Completion Verification and Checkpointing Protocol\n Verification via API Hooks\n Quality Gates\n Development Commands\n Setup\n Example: Commands to set up the development environment (e.g., install dependencies, configure database)\n e.g., for a Node.js project: npm install\n e.g., for a Go project: go mod tidy\n Daily Development\n Example: Commands for common daily tasks (e.g., start dev server, run tests, lint, format)\n e.g., for a Node.js project: npm run dev, npm test, npm run lint\n e.g., for a Go project: go run main.go, go test ./..., go fmt ./...\n Before Committing\n Example: Commands to run all pre-commit checks (e.g., format, lint, type check, run tests)\n e.g., for a Node.js project: npm run check\n e.g., for a Go project: make check (if a Makefile exists)\n Testing Requirements\n Structural Testing Contract\n Unit Testing\n Integration Testing\n Mobile Testing\n Code Review Process\n Self-Review Checklist\n Commit Guidelines\n Message Format\n Types\n Examples\n Definition of Done\n Conductor Token Firewalling & Model Switching Strategy\n 1. Active Model Switching (Simulating the 4 Tiers)\n 2. Context Management and Token Firewalling\n 3. Phase Checkpoints (The Final Defense)"
},
"C:\\projects\\manual_slop\\src\\models.py": {
"hash": "6e097e6a78ff02e3050212f3021761ebfe2aa9ce82b7074656842b394453ec90",
"summary": "This module defines the core data structures for the Manual Slop application, including tasks, tracks, and configuration, enabling project orchestration and persistence.\n\n* **Data Models:** Defines `Ticket`, `Track`, `WorkerContext`, `Metadata`, `TrackState`, `FileItem`, `Preset`, `Tool`, `ToolPreset`, `BiasProfile`, `Persona`, `MCPServerConfig`, `MCPConfiguration`, `VectorStoreConfig`, `RAGConfig`, and `WorkspaceProfile` as dataclasses.\n* **Serialization:** Implements `to_dict` and `from_dict` methods for all dataclasses to support TOML/JSON persistence.\n* **Configuration Management:** Provides functions `load_config`, `save_config`, and `parse_history_entries` for managing application settings and historical data.\n* **Tool Definitions:** Lists available `AGENT_TOOL_NAMES` and categorizes them in `DEFAULT_TOOL_CATEGORIES`.\n\n**Outline:**\n**Python** \u2014 704 lines\nimports: __future__, dataclasses, datetime, json, os, pathlib, re, src, sys, tomli_w, tomllib, typing\nconstants: CONFIG_PATH, AGENT_TOOL_NAMES, DEFAULT_TOOL_CATEGORIES\nclass ThinkingSegment: to_dict, from_dict\nclass Ticket: mark_blocked, mark_manual_block, clear_manual_block, mark_complete, get, to_dict, from_dict\nclass Track: get_executable_tickets, to_dict, from_dict\nclass WorkerContext\nclass Metadata: to_dict, from_dict\nclass TrackState: to_dict, from_dict\nclass FileItem: to_dict, from_dict\nclass Preset: to_dict, from_dict\nclass Tool: to_dict, from_dict\nclass ToolPreset: to_dict, from_dict\nclass BiasProfile: to_dict, from_dict\nclass Persona: provider, model, temperature, top_p, max_output_tokens, to_dict, from_dict\nclass MCPServerConfig: to_dict, from_dict\nclass MCPConfiguration: to_dict, from_dict\nclass VectorStoreConfig: to_dict, from_dict\nclass RAGConfig: to_dict, from_dict\nclass WorkspaceProfile: to_dict, from_dict\nfunctions: _clean_nones, load_config, save_config, parse_history_entries, load_mcp_config"
},
"C:\\projects\\manual_slop\\tests\\test_saved_presets_sim.py": {
"hash": "4b059b49282ecaede5171f4e0ad0ca789d00f9794b4c8e7bea1b95b7cd66c3b4",
"summary": "This Python file contains tests for the preset management functionality of the `manual_slop` application, specifically focusing on how global and project-specific presets are loaded, applied, and managed through a GUI interface.\n\n* **Environment Setup:** Initializes a temporary workspace with necessary configuration files for testing.\n* **Preset Switching:** Tests the ability to apply global and project presets, verifying that project-specific presets can override global ones and that selecting \"None\" correctly clears the active preset.\n* **Preset Manager Modal:** Simulates interactions with a modal to create and delete presets, verifying that changes are correctly persisted to the respective TOML files.\n\n**Outline:**\n**Python** \u2014 167 lines\nimports: json, os, pathlib, pytest, shutil, src, time, tomli_w, tomllib\nfunctions: test_env_setup, test_preset_switching, test_preset_manager_modal"
},
"C:\\projects\\manual_slop\\conductor\\product.md": {
"hash": "7f8036b634e92710f64641bd82f8d4804fd630af0fc0e02eafd42ca0b57bc0c4",
"summary": "\"Manual Slop\" is an expert-level developer utility designed for small projects, offering granular manual control over vendor API metrics, agent capabilities, and context memory. Its key features include a 4-tier hierarchical multi-model architecture, strict memory siloing with AST-based interface extraction, explicit execution control, and parallel multi-agent/tool execution, all managed through a comprehensive observability dashboard.\n\n**Outline:**\n**Markdown** \u2014 113 lines\nheadings:\n Product Guide: Manual Slop\n Vision\n Architecture Reference\n Primary Use Cases\n Key Features"
},
"C:\\projects\\manual_slop\\conductor\\tracks\\data_oriented_optimization_20260312\\plan.md": {
"hash": "5fa509d92864e7c084b1709ab11f0be93ec761a38d3dd58ab0727fdeb5d64f86",
"summary": "This document outlines a four-phase plan to optimize Python code for data-oriented performance. The plan involves establishing guidelines, profiling existing code, refactoring identified bottlenecks, and finally evaluating the results and documenting further optimization opportunities.\n\n**Outline:**\n**Markdown** \u2014 27 lines\nheadings:\n Implementation Plan: Data-Oriented Python Optimization Pass\n Phase 1: Guidelines and Instrumentation\n Phase 2: Audit and Profiling (`src/` and `simulation/`)\n Phase 3: Targeted Optimization and Refactoring\n Phase 4: Final Evaluation and Documentation"
},
"C:\\projects\\manual_slop\\src\\performance_monitor.py": {
"hash": "831716b8010ca0ce571d52db1fe1030a57e6027c276cca819d41d571ed634299",
"summary": "This module implements a thread-safe `PerformanceMonitor` singleton for real-time tracking of application performance metrics, including FPS, frame times, and CPU usage, with efficient O(1) moving averages.\n\n* **Core Functionality:** Tracks FPS, frame duration, and CPU utilization.\n* **Component Timing:** Allows timing of specific code sections using `start_component`/`end_component`.\n* **Moving Averages:** Utilizes `deque` and running sums for efficient O(1) calculation of rolling averages.\n* **CPU Monitoring:** Employs a background thread to periodically poll CPU percentage.\n* **Singleton Pattern:** Accessible globally via the `get_monitor()` function.\n\n**Outline:**\n**Python** \u2014 235 lines\nimports: __future__, collections, psutil, threading, time, typing\nclass PerformanceMonitor: __init__, _monitor_cpu, _add_to_history, _get_avg, start_frame, end_frame, start_component, end_component, get_metrics, get_history, stop\nfunctions: get_monitor"
},
"C:\\projects\\manual_slop\\tests\\test_performance_monitor.py": {
"hash": "19d4b8a0e105e6549da69400b23fb31cc0dd11a3694fa273c78863034e58e8d3",
"summary": "This file contains unit tests for the `PerformanceMonitor` class, verifying its ability to track frame and component-specific execution times.\n\n* Tests basic frame timing.\n* Tests timing of individual components within a frame.\n\n**Outline:**\n**Python** \u2014 28 lines\nimports: os, performance_monitor, sys, time\nfunctions: test_perf_monitor_basic_timing, test_perf_monitor_component_timing"
},
"C:\\projects\\manual_slop\\src\\events.py": {
"hash": "ae440a6eb72b5cd76253c70a012aace900890b9dafe5027e341c69a5423fdefa",
"summary": "**Python** \u2014 161 lines\nimports: queue, typing\nclass EventEmitter: __init__, on, emit, clear\nclass AsyncEventQueue: __init__, put, get, empty, task_done, join\nclass UserRequestEvent: __init__, to_dict"
},
"C:\\Users\\Ed\\AppData\\Local\\Temp\\pytest-of-Ed\\pytest-843\\test_auto_aggregate_skip0\\file1.txt": {
"hash": "d0b425e00e15a0d36b9b361f02bab63563aed6cb4665083905386c55d5b679fa",
"summary": "This document, `file1.txt`, contains a single line of text: \"content1\". Its purpose appears to be to hold this specific piece of content.\n\n**Outline:**\n**TXT** \u2014 1 lines\npreview:\n```\ncontent1\n```"
},
"C:\\Users\\Ed\\AppData\\Local\\Temp\\pytest-of-Ed\\pytest-843\\test_force_full0\\other.txt": {
"hash": "04d61c0832f9cbc2a210334352425d2519890a0a5945da96ccc5bd9ff101c4d3",
"summary": "This document is a simple text file containing ten lines of content, with the first eight lines previewed. Its purpose appears to be for basic data storage or as a placeholder.\n\n**Outline:**\n**TXT** \u2014 10 lines\npreview:\n```\nline1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\n```"
},
"C:\\Users\\Ed\\AppData\\Local\\Temp\\pytest-of-Ed\\pytest-844\\test_auto_aggregate_skip0\\file1.txt": {
"hash": "d0b425e00e15a0d36b9b361f02bab63563aed6cb4665083905386c55d5b679fa",
"summary": "This document contains a single line of text, \"content1\". Its purpose is to present this specific content.\n\n**Outline:**\n**TXT** \u2014 1 lines\npreview:\n```\ncontent1\n```"
},
"C:\\Users\\Ed\\AppData\\Local\\Temp\\pytest-of-Ed\\pytest-844\\test_force_full0\\other.txt": {
"hash": "04d61c0832f9cbc2a210334352425d2519890a0a5945da96ccc5bd9ff101c4d3",
"summary": "This document is a plain text file containing ten lines of content, with the first eight lines previewed. Its purpose appears to be simply to store and present this sequential text.\n\n**Outline:**\n**TXT** \u2014 10 lines\npreview:\n```\nline1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\n```"
},
"other.py": {
"hash": "0f8d9a2c864001b5c6492122310ba2a1346db67609fe815c070cab8a548ce27f",
"summary": "This file appears to contain invalid Python syntax, preventing its analysis.\n\n* **Primary Responsibility:** Undetermined due to syntax errors.\n* **Key Components:** None identifiable.\n\n**Outline:**\n**Python** \u2014 1 lines\n_Parse error: invalid syntax (other.py, line 1)_"
},
"tier3_file.txt": {
"hash": "1e55d54976d9d709d6f0a029a6761c4c9bee2250c9ce68c4804349f27ab210cb",
"summary": "This document, titled \"Full Tier 3 Content,\" appears to be a placeholder or a basic structure containing ten lines of text. Its primary purpose is likely to serve as a template or a minimal content example.\n\n**Outline:**\n**TXT** \u2014 10 lines\npreview:\n```\nFull Tier 3 Content\nLine 2\nLine 3\nLine 4\nLine 5\nLine 6\nLine 7\nLine 8\n```"
},
"other.txt": {
"hash": "cb3b5cb91da29dfdd685b44a33c6629e66505a691b2459e20dd556a705ec7cb8",
"summary": "This document, titled \"Other Content,\" contains ten lines of text. It appears to be a simple placeholder or a list of generic content lines.\n\n**Outline:**\n**TXT** \u2014 10 lines\npreview:\n```\nOther Content\nLine 2\nLine 3\nLine 4\nLine 5\nLine 6\nLine 7\nLine 8\n```"
},
"tier1_file.txt": {
"hash": "3ce2c835f9705e6eb4ccf87d7a4ed9d9f2b91659712beb0618d7009215a84792",
"summary": "This document, titled \"Full Tier 1 Content,\" appears to be a placeholder or introductory text, with its primary content being \"Line 2.\"\n\n**Outline:**\n**TXT** \u2014 2 lines\npreview:\n```\nFull Tier 1 Content\nLine 2\n```"
},
"C:\\projects\\manual_slop\\src\\multi_agent_conductor.py": {
"hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"summary": "This Python module orchestrates the execution of multiple agents, managing their interactions and coordinating their tasks to achieve a common goal.\n\n* Manages agent lifecycle and communication.\n* Facilitates task delegation and result aggregation.\n* Implements a central control loop for agent coordination.\n\n**Outline:**\n**Python** \u2014 0 lines"
}
}
@@ -1,26 +0,0 @@
# Implementation Plan: Frosted Glass Background Effect
## Phase 1: Shader Development & Integration
- [ ] Task: Audit `src/shader_manager.py` to identify existing background/post-process integration points.
- [ ] Task: Write Tests: Verify `ShaderManager` can compile and bind a multi-pass blur shader.
- [ ] Task: Implement: Add `FrostedGlassShader` (GLSL) to `src/shader_manager.py`.
- [ ] Task: Implement: Integrate the blur shader into the `ShaderManager` lifecycle.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Shader Development & Integration' (Protocol in workflow.md)
## Phase 2: Framebuffer Capture Pipeline
- [ ] Task: Write Tests: Verify the FBO capture mechanism correctly samples the back buffer and stores it in a texture.
- [ ] Task: Implement: Update `src/shader_manager.py` or `src/gui_2.py` to handle "pre-rendering" of the background into a texture for blurring.
- [ ] Task: Implement: Ensure the blurred texture is updated every frame or on window move events.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: Framebuffer Capture Pipeline' (Protocol in workflow.md)
## Phase 3: GUI Integration & Rendering
- [ ] Task: Write Tests: Verify that a mocked ImGui window successfully calls the frosted glass rendering logic.
- [ ] Task: Implement: Create a `_render_frosted_background(self, pos, size)` helper in `src/gui_2.py`.
- [ ] Task: Implement: Update panel rendering loops (e.g. `_gui_func`) to inject the frosted background before calling `imgui.begin()` for major panels.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: GUI Integration & Rendering' (Protocol in workflow.md)
## Phase 4: UI Controls & Configuration
- [ ] Task: Write Tests: Verify that modifying blur uniforms via the Live Editor updates the shader state.
- [ ] Task: Implement: Add "Frosted Glass" sliders (Blur, Tint, Opacity) to the **Shader Editor** in `src/gui_2.py`.
- [ ] Task: Implement: Update `src/theme.py` to parse and store frosted glass settings from `config.toml`.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: UI Controls & Configuration' (Protocol in workflow.md)
-2
View File
@@ -15,8 +15,6 @@
## Code Standards & Architecture
- **Data-Oriented & Immediate Mode Heuristics:** Align with the architectural values of engineers like Casey Muratori and Mike Acton.
- **The "Less Python Does, the Better" Rule:** Python should act primarily as a procedural semantic definer (similar to how ImGui defines a UI DAG), delegating heavy lifting to efficient data structures, vectorized operations, or lower-level primitives.
- Minimize Python JIT overhead by favoring bulk data processing over fine-grained object-oriented manipulation.
- The GUI (`gui_2.py`) must remain a pure visualization of application state. It should not *own* complex business logic or orchestrator hooks (strive to decouple the 'Application' controller from the 'View').
- Treat the UI as an immediate mode frame-by-frame projection of underlying data structures.
- Optimize for zero lag and never block the main render loop with heavy Python JIT work.
+11 -32
View File
@@ -17,7 +17,7 @@ For deep implementation details when planning or implementing tracks, consult `d
## Primary Use Cases
- **Full Control over Vendor APIs:** Exposing detailed API metrics and configuring deep agent capabilities directly within the GUI.
- **Context & Memory Management:** Better visualization and management of token usage and context memory. Includes granular per-file flags (**Auto-Aggregate**, **Force Full**), a dedicated **'Context' role** for manual injections, and **Context Presets** for saving and loading named file/screenshot selections. Allows assigning specific context presets to MMA agent personas for granular cognitive load isolation.
- **Context & Memory Management:** Better visualization and management of token usage and context memory. Includes granular per-file flags (**Auto-Aggregate**, **Force Full**) and a dedicated **'Context' role** for manual injections, allowing developers to optimize prompt limits with expert precision.
- **Manual "Vibe Coding" Assistant:** Serving as an auxiliary, multi-provider assistant that natively interacts with the codebase via sandboxed PowerShell scripts and MCP-like file tools, emphasizing manual developer oversight and explicit confirmation.
## Key Features
@@ -25,15 +25,14 @@ For deep implementation details when planning or implementing tracks, consult `d
- **Multi-Provider Integration:** Supports Gemini, Anthropic, and DeepSeek with seamless switching.
- **4-Tier Hierarchical Multi-Model Architecture:** Orchestrates an intelligent cascade of specialized models to isolate cognitive loads and minimize token burn.
- **Tier 1 (Orchestrator):** Strategic product alignment, setup (`/conductor:setup`), and track initialization (`/conductor:newTrack`) using `gemini-3.1-pro-preview`.
- **Tier 2 (Tech Lead):** Technical oversight and track execution (`/conductor:implement`) using `gemini-3-flash-preview`. Maintains persistent context throughout implementation.
- **Tier 3 (Worker):** Surgical code implementation and TDD using `gemini-2.5-flash-lite`. Operates statelessly with tool access and dependency skeletons.
- **Tier 4 (QA):** Error analysis and diagnostics using `gemini-2.5-flash-lite`. Operates statelessly with tool access.
- **Tier 2 (Tech Lead):** Technical oversight and track execution (`/conductor:implement`) using `gemini-2.5-flash`. Maintains persistent context throughout implementation.
- **Tier 3 (Worker):** Surgical code implementation and TDD using `gemini-2.5-flash` or `deepseek-v3`. Operates statelessly with tool access and dependency skeletons.
- **Tier 4 (QA):** Error analysis and diagnostics using `gemini-2.5-flash` or `deepseek-v3`. Operates statelessly with tool access.
- **MMA Delegation Engine:** Route tasks, ensuring role-scoped context and detailed observability via timestamped sub-agent logs. Supports dynamic ticket creation and dependency resolution via an automated Dispatcher Loop.
- **MMA Observability Dashboard:** A high-density control center within the GUI for monitoring and managing the 4-Tier architecture.
- **Track Browser:** Real-time visualization of all implementation tracks with status indicators and progress bars. Includes a dedicated **Active Track Summary** featuring a color-coded progress bar, precise ticket status breakdown (Completed, In Progress, Blocked, Todo), and dynamic **ETA estimation** based on historical completion times.
- **Visual Task DAG:** An interactive, node-based visualizer for the active track's task dependencies using `imgui-node-editor`. Features color-coded state tracking (Ready, Running, Blocked, Done), drag-and-drop dependency creation, and right-click deletion.
- **Strategy Visualization:** Dedicated real-time output streams for Tier 1 (Strategic Planning) and Tier 2/3 (Execution) agents, allowing the user to follow the agent's reasoning chains alongside the task DAG.
- **Agent-Focused Filtering:** Allows the user to focus the entire GUI (Session Hub, Discussion Hub, Comms) on a specific agent's activities and scoped context.
- **Track-Scoped State Management:** Segregates discussion history and task progress into per-track state files. Supports **Project-Specific Conductor Directories**, defaulting to `./conductor` relative to each project's TOML file. Projects can define their own conductor path override in `manual_slop.toml` (`[conductor].dir`) via the Projects tab for isolated track management. This prevents global context pollution and ensures the Tech Lead session is isolated to the specific track's objective.
**Native DAG Execution Engine:** Employs a Python-based Directed Acyclic Graph (DAG) engine to manage complex task dependencies. Supports automated topological sorting, robust cycle detection, and **transitive blocking propagation** (cascading `blocked` status to downstream dependents to prevent execution stalls).
@@ -41,14 +40,9 @@ For deep implementation details when planning or implementing tracks, consult `d
- **Role-Scoped Documentation:** Automated mapping of foundational documents to specific tiers to prevent token bloat and maintain high-signal context.
- **Tiered Context Scoping:** Employs optimized context subsets for each tier. Tiers 1 & 2 receive strategic documents and full history, while Tier 3/4 workers receive task-specific "Focus Files" and automated AST dependency skeletons.
- **Worker Spawn Interceptor:** A mandatory security gate that intercepts every sub-agent launch. Provides a GUI modal allowing the user to review, modify, or reject the worker's prompt and file context before it is sent to the API.
- **Strict Memory Siloing:** Employs tree-sitter AST-based interface extraction (Skeleton View, Curated View, and Targeted View) and "Context Amnesia" to provide workers only with the absolute minimum context required. Supports **Python, C, and C++** languages for structural extraction. Features an intelligent context aggregation engine utilizing **Hash-Based Caching (SHA256)** and LRU eviction to eliminate redundant processing. Employs **Tier-Level Aggregation Strategies** (`full`, `summarize`, `skeleton`) configured directly via Agent Personas, integrating high-tier AI sub-agents during the aggregation pass to generate succinct, high-signal summaries for both code and text files. Includes **Manual Skeleton Context Injection**, allowing developers to preview and manually inject file skeletons or full content into discussions via a dedicated GUI modal. Features multi-level dependency traversal and AST caching to minimize re-parsing overhead and token burn.
- **Strict Memory Siloing:** Employs tree-sitter AST-based interface extraction (Skeleton View, Curated View, and Targeted View) and "Context Amnesia" to provide workers only with the absolute minimum context required. Includes **Manual Skeleton Context Injection**, allowing developers to preview and manually inject file skeletons or full content into discussions via a dedicated GUI modal. Features multi-level dependency traversal and AST caching to minimize re-parsing overhead and token burn.
- **Explicit Execution Control:** All AI-generated PowerShell scripts require explicit human confirmation via interactive UI dialogs before execution, supported by a global "Linear Execution Clutch" for deterministic debugging.
- **Parallel Multi-Agent Execution:** Executes multiple AI workers in parallel using a non-blocking execution engine and a dedicated `WorkerPool`. Features configurable concurrency limits (defaulting to 4) to optimize resource usage and prevent API rate limiting.
- **Beads Mode Integration:** Supports [Beads](https://github.com/steveyegge/beads) as a first-class, project-specific alternative to markdown-based tracking.
- **Git-Backed Issue Tracking:** Uses a local `.beads` repository (backed by Dolt) to store the task graph, allowing tracks and tickets to be versioned alongside the code.
- **Beads Toolset:** Provides a suite of MCP tools (`bd_create`, `bd_update`, `bd_ready`, `bd_list`) for agents to manage the issue graph autonomously.
- **Context Compaction:** Automatically summarizes completed beads to preserve context window space for the active task.
- **Augmented Visualizations:** Integrates with the Visual DAG and MMA Dashboard to provide real-time visibility into the Dolt-backed issue graph.
- **Parallel Tool Execution:** Executes independent tool calls (e.g., parallel file reads) concurrently within a single agent turn using an asynchronous execution engine, significantly reducing end-to-end latency.
- **Automated Tier 4 QA:** Integrates real-time error interception in the shell runner, automatically forwarding technical failures to cheap sub-agents for 20-word diagnostic summaries injected back into the worker history.
- **External MCP Server Support:** Adds support for integrating external Model Context Protocol (MCP) servers, expanding the agent's toolset with the broader MCP ecosystem.
@@ -57,26 +51,10 @@ For deep implementation details when planning or implementing tracks, consult `d
- **Auto-Start & Discovery:** Automatically initializes configured servers on project load and dynamically aggregates their tools into the agent's capability declarations.
- **Dedicated Operations UI:** Features a new **External Tools** section within the Operations Hub for monitoring server status (idle, starting, running, error) and browsing discovered tool schemas. Supports **Pop-Out Panel functionality**, allowing the External Tools interface to be detached into a standalone window for optimized multi-monitor workflows.
- **Strict HITL Safety:** All external tool calls are intercepted and require explicit human-in-the-loop approval via the standard confirmation dialog before execution.
- **Retrieval-Augmented Generation (RAG) Support:** Introduces advanced retrieval capabilities to overcome context window limitations and reduce hallucination.
- **Multi-Source Retrieval:** Supports local vector stores (ChromaDB) and an **External RAG Bridge** via the Model Context Protocol (MCP) for connecting to third-party retrieval services.
- **High-Performance Indexing:** Employs a parallelized indexing pipeline using `ThreadPoolExecutor` and incremental updates based on file `mtime` to handle large codebases efficiently.
- **Deep Discussion Integration:** Retrieved context fragments are automatically prepended to agent prompts and captured in the discussion history, featuring a dedicated visualization mode with source buttons for instant file navigation.
- **Configurable Strategy:** Users can toggle RAG globally and fine-tune retrieval parameters (source, embedding provider, chunk size/overlap) directly within the AI Settings.
- **Automated Synchronization:** Features background re-indexing of the project workspace, ensuring the vector store remains consistent with the current project state.
- **Undo/Redo History Support:** Implements a robust, non-provider based undo/redo system for managing UI state and discussion mutations.
- **Comprehensive State Snapshots:** Captures all critical UI state, including text inputs (system prompts, AI input), model parameters (Temperature, Top-P), and context management (files, screenshots).
- **Discussion Mutation Tracking:** Allows reverting and redoing additions, deletions, and structural changes to the discussion history.
- **History List View:** Features a dedicated, scrollable panel showing recent actions with timestamps, allowing users to jump directly to any historical state.
- **Tactile Hotkeys:** Supports industry-standard shortcuts (`Ctrl+Z`, `Ctrl+Y`, `Ctrl+Shift+Z`) for fast, intuitive state navigation.
- **High-Fidelity Selectable UI:** Most read-only labels and logs across the interface (including discussion history, comms payloads, tool outputs, and telemetry metrics) are now implemented as selectable text fields. This enables standard OS-level text selection and copying (Ctrl+C) while maintaining a high-density, non-editable aesthetic.
- **High-Fidelity UI Rendering:** Employs advanced 3x font oversampling and sub-pixel positioning to ensure crisp, high-clarity text rendering across all resolutions, enhancing readability for dense logs and complex code fragments.
- **Workspace Docking & Layout Profiles:** Expands layout management to support named workspace profiles, capturing multi-viewport docking arrangements, window visibility, and internal panel states.
- **Scope Inheritance:** Profiles follow a Global and Project inheritance model, allowing for both universal defaults and project-specific layouts.
- **Contextual Auto-Switch (Experimental):** An opt-in mechanism that automatically binds and loads specific workspace profiles based on the active MMA Tier or task context, dynamically reshaping the UI for the current cognitive load.
- **Enhanced MMA Observability:** Worker streams and ticket previews now support direct text selection, allowing for easy extraction of specific logs or reasoning fragments during parallel execution.
- **Transparent Context Visibility:** A dedicated **Session Hub** exposes the exact aggregated markdown and resolved system prompt sent to the AI.
- **Injection Timeline:** Discussion history visually indicates the precise moments when files or screenshots were injected into the session context.
- **Detailed History Management:** Rich discussion history with non-linear timeline branching ("takes"), tabbed interface navigation, specific git commit linkage per conversation, and automated multi-take synthesis.
- **Detailed History Management:** Rich discussion history with branching, timestamping, and specific git commit linkage per conversation.
- **Advanced Log Management:** Optimizes log storage by offloading large data (AI-generated scripts and tool outputs) to unique files within the session directory, using compact `[REF:filename]` pointers in JSON-L logs to minimize token overhead during analysis. Features a dedicated **Log Management panel** for monitoring, whitelisting, and pruning session logs.
- **Full Session Restoration:** Allows users to load and reconstruct entire historical sessions from their log directories. Includes a dedicated, tinted **'Historical Replay' mode** that populates discussion history and provides a read-only view of prior agent activities.
- **Dedicated Diagnostics Hub:** Consolidates real-time telemetry (FPS, CPU, Frame Time) and transient system warnings into a standalone **Diagnostics panel**, providing deep visibility into application health without polluting the discussion history.
@@ -86,18 +64,19 @@ For deep implementation details when planning or implementing tracks, consult `d
- **Session Analysis:** Ability to load and visualize historical session logs with a dedicated tinted "Prior Session" viewing mode.
- **Structured Log Taxonomy:** Automated session-based log organization into configurable directories (defaulting to `logs/sessions/`). Includes a dedicated GUI panel for monitoring and manual whitelisting. Features an intelligent heuristic-based pruner that automatically cleans up insignificant logs older than 24 hours while preserving valuable sessions.
- **Clean Project Root:** Enforces a "Cruft-Free Root" policy by organizing core implementation into a `src/` directory and redirecting all temporary test data, configurations, and AI-generated artifacts to `tests/artifacts/`.
- **Performance Diagnostics:** High-precision, microsecond-accurate per-component profiling. Features a dedicated **Diagnostics Panel** providing real-time telemetry for FPS, Frame Time, CPU usage, and **Extended Metrics** (Hit Counts, Peak Latency, Minimum Latency) for all GUI panels and background logic (DAG Engine, Aggregation), utilizing the **"Less Python Does, the Better"** heuristic to minimize monitoring overhead.
- **Performance Diagnostics:** Comprehensive, conditional per-component profiling across the entire application. Features a dedicated **Diagnostics Panel** providing real-time telemetry for FPS, Frame Time, CPU usage, and **Detailed Component Timings** for all GUI panels and background threads, including automated threshold-based latency alerts.
- **Automated UX Verification:** A robust IPC mechanism via API hooks and a modular simulation suite allows for human-like simulation walkthroughs and automated regression testing of the full GUI lifecycle across multiple specialized scenarios.
- **Professional UI Theme & Typography:** Implements a high-fidelity visual system featuring **Inter** and **Maple Mono** fonts for optimal readability. Employs a cohesive "Subtle Rounding" aesthetic across all standard widgets, supported by custom **soft shadow shaders** for modals and popups to provide depth and professional polish. Includes a selectable **NERV UI theme** featuring a "Black Void" palette, zero-rounding geometry, and CRT-style visual effects (scanlines, status flickering).
- **Professional UI Theme & Typography:** Implements a high-fidelity visual system featuring **Inter** and **Maple Mono** fonts for optimal readability. Employs a cohesive \"Subtle Rounding\" aesthetic across all standard widgets, supported by custom **soft shadow shaders** for modals and popups, and a high-fidelity **frosted glass (acrylic) background effect** for panels to provide depth and professional polish. Includes a selectable **NERV UI theme** featuring a \"Black Void\" palette, zero-rounding geometry, and CRT-style visual effects (scanlines, status flickering).
- **Rich Text & Syntax Highlighting:** Provides advanced rendering for messages, logs, and tool outputs using a hybrid Markdown system. Supports GitHub-Flavored Markdown (GFM) via `imgui_markdown` and integrates `ImGuiColorTextEdit` for high-performance syntax highlighting of code blocks (Python, JSON, C++, etc.). Includes automated language detection and clickable URL support.
- **Multi-Viewport & Layout Management:** Full support for ImGui Multi-Viewport, allowing users to detach panels into standalone OS windows for complex multi-monitor workflows. Includes a comprehensive **Layout Presets system**, enabling developers to save, name, and instantly restore custom window arrangements, including their Multi-Viewport state.
- **Headless Backend Service & Hook API:** Optional headless mode allowing the core AI and tool execution logic to run as a decoupled service. Features a comprehensive Hook API and WebSocket event streaming for remote orchestration, deep state inspection, and manual lifecycle management for both individual workers and the global task queue (e.g., ticket approval, DAG mutation).
- **Headless Backend Service & Hook API:** Optional headless mode allowing the core AI and tool execution logic to run as a decoupled service. Features a comprehensive Hook API and WebSocket event streaming for remote orchestration, deep state inspection, and manual worker lifecycle management.
- **Remote Confirmation Protocol:** A non-blocking, ID-based challenge/response mechanism for approving AI actions via the REST API, enabling remote "Human-in-the-Loop" safety.
- **Gemini CLI Integration:** Allows using the `gemini` CLI as a headless backend provider. This enables leveraging Gemini subscriptions with advanced features like persistent sessions, while maintaining full "Human-in-the-Loop" safety through a dedicated bridge for synchronous tool call approvals within the Manual Slop GUI. Now features full functional parity with the direct API, including accurate token estimation, safety settings, and robust system instruction handling.
- **Context & Token Visualization:** Detailed UI panels for monitoring real-time token usage, history depth, and **visual cache awareness** (tracking specific files currently live in the provider's context cache).
- **On-Demand Definition Lookup:** Allows developers to request specific class or function definitions during discussions using `@SymbolName` syntax. Injected definitions feature syntax highlighting, intelligent collapsing for long blocks, and a **[Source]** button for instant navigation to the full file.
- **Manual Ticket Queue Management:** Provides a dedicated GUI panel for granular control over the implementation queue. Features include color-coded priority assignment (High, Medium, Low), multi-select bulk operations (Execute, Skip, Block), and interactive drag-and-drop reordering with real-time Directed Acyclic Graph (DAG) validation.
- **System Prompt Presets:** Comprehensive management system for saving and switching between complex system prompt configurations. Features full visibility and customization of the **Foundational Base System Prompt**, allowing users to modify the core instructions that define agent capabilities and tool usage heuristics. - **Scoped Inheritance:** Supports **Global** (application-wide) and **Project-Specific** presets. Project presets with the same name automatically override global counterparts, allowing for fine-tuned context tailoring.
- **System Prompt Presets:** Comprehensive management system for saving and switching between complex system prompt configurations.
- **Scoped Inheritance:** Supports **Global** (application-wide) and **Project-Specific** presets. Project presets with the same name automatically override global counterparts, allowing for fine-tuned context tailoring.
- **Full AI Profiles:** Presets capture not only the system prompt text but also critical model parameters like **Temperature**, **Top-P**, and **Max Output Tokens**.
- **Preset Manager Modal:** A dedicated high-density GUI for creating, editing, and deleting presets with real-time validation and instant application to the active session.
- **Agent Personas & Unified Profiles:** Consolidates model settings, provider routing, system prompts, tool presets, and bias profiles into named "Persona" entities.
+8 -24
View File
@@ -22,8 +22,7 @@
- **DeepSeek (Dedicated SDK):** Integrated for high-performance codegen and reasoning (Phase 2).
- **Gemini CLI:** Integrated as a headless backend provider, utilizing a custom subprocess adapter and bridge script for tool execution control. Achieves full functional parity with direct SDK usage, including real-time token counting and detailed subprocess observability.
- **Gemini 3.1 Pro Preview:** Tier 1 Orchestrator model for complex reasoning.
- **Gemini 3-Flash Preview:** High-reasoning low-latency model for Tier 2 Tech Lead.
- **Gemini 2.5 Flash Lite:** Ultra-low-cost, high-speed model for Tier 3 Workers and Tier 4 QA.
- **Gemini 2.5 Flash:** High-performance, low-latency model for Tier 2 Tech Lead, Tier 3 Workers, and Tier 4 QA.
- **DeepSeek-V3:** Tier 3 Worker model optimized for code implementation.
- **DeepSeek-R1:** Specialized reasoning model for complex logical chains and "thinking" traces.
@@ -33,31 +32,20 @@
- **src/paths.py:** Centralized module for path resolution. Supports project-specific conductor directory overrides via project TOML (`[conductor].dir`), enabling isolated track management per project. If not specified, conductor paths default to `./conductor` relative to each project's TOML file. All paths are resolved to absolute objects. Provides **Path Resolution Metadata**, exposing the source of each resolved path (default, environment variable, or configuration file) for high-fidelity GUI display. Supports **Runtime Re-Resolution** via `reset_resolved()`, allowing path changes to be applied immediately without an application restart. Path configuration (logs, scripts) can also be configured via `config.toml` or environment variables, eliminating hardcoded filesystem dependencies.
- **src/presets.py:** Implements `PresetManager` for high-performance CRUD operations on system prompt presets stored in TOML format (`presets.toml`, `project_presets.toml`). Supports dynamic path resolution, scope-based inheritance, and foundational base prompt customization.
- **src/presets.py:** Implements `PresetManager` for high-performance CRUD operations on system prompt presets stored in TOML format (`presets.toml`, `project_presets.toml`). Supports dynamic path resolution and scope-based inheritance.
- **src/personas.py:** Implements `PersonaManager` for high-performance CRUD operations on unified agent personas stored in TOML format (`personas.toml`, `project_personas.toml`). Handles consolidation of model settings, prompts, and tool biases.
- **src/tool_bias.py:** Implements the `ToolBiasEngine` for semantic tool description nudging and dynamic tooling strategy generation.
- **src/tool_presets.py:** Extends `ToolPresetManager` to handle nested `Tool` models, weights, and global `BiasProfile` persistence within `tool_presets.toml`.
- **src/mcp_client.py:** Implements the native tool registry and the `ExternalMCPManager` for orchestrating third-party Model Context Protocol servers. Provides dynamic tool discovery and validation.
- **src/mcp_client.py (External Extension):** Implements the `ExternalMCPManager` for orchestrating third-party Model Context Protocol servers.
- **StdioMCPServer:** Manages local MCP servers via asynchronous subprocess pipes (stdin/stdout/stderr).
- **RemoteMCPServer (SSE):** Provides a foundation for remote MCP integration via Server-Sent Events.
- **JSON-RPC 2.0 Engine:** Handles asynchronous message routing, request/response matching, and error handling for all external MCP communication.
- **AST-Based C/C++ Tools:** Provides `ts_c_get_skeleton`, `ts_cpp_get_skeleton`, `ts_c_get_code_outline`, and `ts_cpp_get_code_outline` for structural analysis of C/C++ codebases using tree-sitter.
- **src/rag_engine.py:** Core RAG implementation managing the vector store lifecycle, chunking strategies (character-based and AST-aware), and multi-provider search. Integrates with **ChromaDB** for local persistence and provides a bridge for external MCP retrieval tools.
- **src/beads_client.py:** Python client for interacting with the [Beads](https://github.com/steveyegge/beads) / Dolt backend. Handles repository initialization, bead creation, status updates, and graph queries.
- **bd / dolt:** External CLI dependencies for versioned, graph-based issue tracking.
- **src/history.py:** Implements the core `HistoryManager` and `UISnapshot` logic for the non-provider undo/redo system. Manages state stacks with a fixed capacity and provides jumping capabilities.
- **src/workspace_manager.py:** Implements the `WorkspaceManager` and `WorkspaceProfile` data models for saving, loading, and merging ImGui docking layouts and window states across global and project-specific configurations.
- **src/paths.py:** Centralized module for path resolution.
- **tree-sitter / AST Parsing:** For deterministic AST parsing and automated generation of curated "Skeleton Views" and "Targeted Views" (extracting specific functions and their dependencies). Supports Python, C, and C++. Features an integrated AST cache with mtime-based invalidation to minimize re-parsing overhead. Supplemented by `SummaryCache` which provides persistent, hash-based (SHA256) caching with LRU eviction for AI-generated file summaries.
- **tree-sitter / AST Parsing:** For deterministic AST parsing and automated generation of curated "Skeleton Views" and "Targeted Views" (extracting specific functions and their dependencies). Features an integrated AST cache with mtime-based invalidation to minimize re-parsing overhead.
- **pydantic / dataclasses:** For defining strict state schemas (Tracks, Tickets) used in linear orchestration.
- **tomli-w:** For writing TOML configuration files.
- **tomllib:** For native TOML parsing (Python 3.11+).
@@ -70,7 +58,7 @@
- **Taxonomy & Artifacts:** Enforces a clean root by organizing core implementation into a `src/` directory, and redirecting session logs and artifacts to configurable directories (defaulting to `logs/sessions/` and `scripts/generated/`). Temporary test data and test logs are siloed in `tests/artifacts/` and `tests/logs/`.
- **ApiHookClient:** A dedicated IPC client for automated GUI interaction and state inspection.
- **mma-exec / mma.ps1:** Python-based execution engine and PowerShell wrapper for managing the 4-Tier MMA hierarchy and automated documentation mapping.
- **dag_engine.py:** A native Python utility implementing `TrackDAG` and `ExecutionEngine` for dependency resolution, cycle detection, transitive blocking propagation, and programmable task execution loops. Optimized using **Kahn's Algorithm** and **iterative DFS** to eliminate recursion overhead and provide $O(V+E)$ performance.
- **dag_engine.py:** A native Python utility implementing `TrackDAG` and `ExecutionEngine` for dependency resolution, cycle detection, transitive blocking propagation, and programmable task execution loops. Refined to decouple status management from dependency resolution to support external pool control.
- **multi_agent_conductor.py:** Orchestrates the concurrent execution of implementation tracks using a non-blocking `ConductorEngine` and a thread-safe `WorkerPool`. Employs configurable concurrency limits and thread-local context isolation to manage multi-agent state.
- **Thread-Local Context Isolation:** Utilizes `threading.local()` for managing per-thread AI client context (e.g., source tier tagging), ensuring thread safety during concurrent multi-agent execution.
- **Asynchronous Tool Execution Engine:** Refactored MCP tool dispatch and AI client loops to use `asyncio.gather` and `asyncio.to_thread`, enabling parallel execution of independent tool calls within a single AI turn to reduce latency.
@@ -79,13 +67,9 @@
- **Event-Driven Metrics:** Uses a custom `EventEmitter` to decouple API lifecycle events from UI rendering, improving performance and responsiveness.
- **Synchronous Event Queue:** Employs a `SyncEventQueue` based on `queue.Queue` to manage communication between the UI and backend agents, maintaining responsiveness through a threaded execution model.
- **Synchronous IPC Approval Flow:** A specialized bridge mechanism that allows headless AI providers (like Gemini CLI) to synchronously request and receive human approval for tool calls and manual ticket transitions (Step Mode) via the GUI's REST API hooks.
- **Synchronous IPC Approval Flow:** A specialized bridge mechanism that allows headless AI providers (like Gemini CLI) to synchronously request and receive human approval for tool calls via the GUI's REST API hooks.
- **High-Fidelity Selectable Labels:** Implements a pattern for making read-only UI text selectable by wrapping `imgui.input_text` with `imgui.InputTextFlags_.read_only`. Includes a specialized `_render_selectable_label` helper that resets frame backgrounds, borders, and padding to mimic standard labels while enabling OS-level clipboard support (Ctrl+C).
- **Hybrid Markdown Rendering:** Employs a custom `MarkdownRenderer` that orchestrates `imgui_markdown` for standard text and headers while intercepting code blocks to render them via cached `ImGuiColorTextEdit` instances. This ensures high-performance rich text rendering with robust syntax highlighting and stateful text selection.
- **Hybrid Shader Pipeline:** Utilizes an optimized `ImDrawList`-based batching technique to simulate UI effects such as soft shadows and acrylic glass overlays without the overhead of heavy GPU-resident shaders. Supplemented by a true GPU shader pipeline using `PyOpenGL` and Framebuffer Objects (FBOs) for complex post-processing (CRT scanlines, bloom) and dynamic backgrounds.
- **Hybrid Shader Pipeline:** Utilizes an optimized `ImDrawList`-based batching technique to simulate UI effects such as soft shadows without the overhead of heavy GPU-resident shaders. Supplemented by a true GPU shader pipeline using `PyOpenGL` and Framebuffer Objects (FBOs) for complex post-processing (CRT scanlines, bloom), dynamic backgrounds, and high-fidelity **frosted glass (acrylic) blurring** of the GUI panels via multi-pass Gaussian/Kawase filtering.
- **Interface-Driven Development (IDD):** Enforces a "Stub-and-Resolve" pattern where cross-module dependencies are resolved by generating signatures/contracts before implementation.
" pattern where cross-module dependencies are resolved by generating signatures/contracts before implementation.
ncies are resolved by generating signatures/contracts before implementation.
-40
View File
@@ -1,40 +0,0 @@
import sys
import os
# Add src to path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
from src.history import HistoryManager
def verify_phase_1():
print("Verifying Phase 1: History Core Logic...")
hm = HistoryManager(max_capacity=10)
# Test push
hm.push({"test": 1}, "initial")
if not hm.can_undo:
print("Error: can_undo should be true after push")
sys.exit(1)
# Test undo
entry = hm.undo({"test": 2}, "current")
if entry.state != {"test": 1}:
print(f"Error: expected state {{'test': 1}}, got {entry.state}")
sys.exit(1)
if entry.description != "initial":
print(f"Error: expected description 'initial', got {entry.description}")
sys.exit(1)
# Test redo
entry = hm.redo({"test": 1}, "back")
if entry.state != {"test": 2}:
print(f"Error: expected state {{'test': 2}}, got {entry.state}")
sys.exit(1)
if entry.description != "current":
print(f"Error: expected description 'current', got {entry.description}")
sys.exit(1)
print("Phase 1 verification PASSED.")
if __name__ == "__main__":
verify_phase_1()
-24
View File
@@ -1,24 +0,0 @@
import subprocess
import sys
import os
def verify_phase_2():
print("Verifying Phase 2: Text Input & Control Undo/Redo...")
# Run the simulation test
result = subprocess.run(
["uv", "run", "pytest", "tests/test_undo_redo_sim.py"],
capture_output=True,
text=True
)
if result.returncode == 0:
print("Phase 2 verification PASSED.")
else:
print("Phase 2 verification FAILED.")
print(result.stdout)
print(result.stderr)
sys.exit(1)
if __name__ == "__main__":
verify_phase_2()
-24
View File
@@ -1,24 +0,0 @@
import subprocess
import sys
def verify_phase_3():
print("Verifying Phase 3: GUI Menu Integration...")
# We rely on the existing simulation test to verify the callback logic,
# which underpins the GUI menu integration.
result = subprocess.run(
["uv", "run", "pytest", "tests/test_workspace_profiles_sim.py"],
capture_output=True,
text=True
)
if result.returncode == 0:
print("Phase 3 verification PASSED.")
else:
print("Phase 3 verification FAILED.")
print(result.stdout)
print(result.stderr)
sys.exit(1)
if __name__ == "__main__":
verify_phase_3()
-54
View File
@@ -1,54 +0,0 @@
import sys
import os
import time
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")))
from src import api_hook_client
def verify_phase_3():
print("[VERIFY] Starting Phase 3 Automated Verification...")
client = api_hook_client.ApiHookClient()
if not client.wait_for_server(timeout=10):
print("[VERIFY] ERROR: Hook server not reachable.")
sys.exit(1)
try:
# Check RAG status
status = client.get_value("rag_status")
print(f"[VERIFY] Current RAG status: {status}")
# Check if RAG settings are accessible
enabled = client.get_value("rag_enabled")
source = client.get_value("rag_source")
print(f"[VERIFY] RAG Enabled: {enabled}, Source: {source}")
# Verify status transitions (indexing)
print("[VERIFY] Triggering index rebuild...")
client.click("btn_rebuild_rag_index")
time.sleep(0.5)
status = client.get_value("rag_status")
print(f"[VERIFY] Status during indexing: {status}")
# Wait for completion
max_wait = 10
start = time.time()
while time.time() - start < max_wait:
status = client.get_value("rag_status")
if status == "ready":
print("[VERIFY] RAG reached 'ready' status.")
break
time.sleep(1)
else:
print(f"[VERIFY] WARNING: RAG status timeout. Final: {status}")
print("[VERIFY] Phase 3 verification COMPLETED successfully.")
except Exception as e:
print(f"[VERIFY] ERROR during verification: {e}")
sys.exit(1)
if __name__ == "__main__":
verify_phase_3()
-23
View File
@@ -1,23 +0,0 @@
import subprocess
import sys
import os
def verify_phase_4():
print("Verifying Phase 4: Contextual Auto-Switch...")
result = subprocess.run(
["uv", "run", "pytest", "tests/test_auto_switch_sim.py"],
capture_output=True,
text=True
)
if result.returncode == 0:
print("Phase 4 verification PASSED.")
else:
print("Phase 4 verification FAILED.")
print(result.stdout)
print(result.stderr)
sys.exit(1)
if __name__ == "__main__":
verify_phase_4()
+20 -36
View File
@@ -1,4 +1,4 @@
# Project Tracks
# Project Tracks
This file tracks all major tracks for the project. Each track has its own detailed plan in its respective folder.
@@ -10,7 +10,7 @@ This file tracks all major tracks for the project. Each track has its own detail
### Architecture & Backend
1. [x] **Track: RAG Support**
1. [ ] **Track: RAG Support**
*Link: [./tracks/rag_support_20260308/](./tracks/rag_support_20260308/)*
*Goal: Add support for RAG (Retrieval-Augmented Generation) using local vector stores (Chroma/Qdrant), native vendor retrieval, and external RAG APIs. Implement indexing pipeline and retrieval UI.*
@@ -22,29 +22,21 @@ This file tracks all major tracks for the project. Each track has its own detail
*Link: [./tracks/hook_api_expansion_20260308/](./tracks/hook_api_expansion_20260308/)*
*Goal: Maximize internal state exposure and provide comprehensive control endpoints (worker spawn/kill, pipeline pause/resume, DAG mutation) via the Hook API. Implement WebSocket-based real-time event streaming.*
4. [x] **Track: Codebase Audit and Cleanup**
4. [ ] **Track: Codebase Audit and Cleanup**
*Link: [./tracks/codebase_audit_20260308/](./tracks/codebase_audit_20260308/)*
5. [x] **Track: Expanded Test Coverage and Stress Testing**
5. [ ] **Track: Expanded Test Coverage and Stress Testing**
*Link: [./tracks/test_coverage_expansion_20260309/](./tracks/test_coverage_expansion_20260309/)*
6. [x] **Track: Beads Mode Integration**
6. [ ] **Track: Beads Mode Integration**
*Link: [./tracks/beads_mode_20260309/](./tracks/beads_mode_20260309/)*
*Goal: Integrate Beads (git-backed graph issue tracker) as an alternative backend for MMA implementation tracks and tickets.*
7. [x] **Track: Optimization pass for Data-Oriented Python heuristics**
7. [ ] **Track: Optimization pass for Data-Oriented Python heuristics**
*Link: [./tracks/data_oriented_optimization_20260312/](./tracks/data_oriented_optimization_20260312/)*
8. [x] **Track: Rich Thinking Trace Handling** - *Parse and display AI thinking/reasoning traces*
*Link: [./tracks/thinking_trace_handling_20260313/](./tracks/thinking_trace_handling_20260313/)*
9. [x] **Track: Smarter Aggregation with Sub-Agent Summarization**
*Link: [./tracks/aggregation_smarter_summaries_20260322/](./tracks/aggregation_smarter_summaries_20260322/)*
*Goal: Sub-agent summarization during aggregation pass, hash-based caching for file summaries, smart outline generation for code vs text files.*
10. [x] **Track: System Context Exposure**
*Link: [./tracks/system_context_exposure_20260322/](./tracks/system_context_exposure_20260322/)*
*Goal: Expose hidden _SYSTEM_PROMPT from ai_client.py to users for customization via AI Settings.*
8. [ ] **Track: Rich Thinking Trace Handling**
*Link: [./tracks/thinking_trace_handling_20260313/](./tracks/thinking_trace_handling_20260313/)*
---
@@ -56,7 +48,7 @@ This file tracks all major tracks for the project. Each track has its own detail
2. [x] **Track: UI Theme Overhaul & Style System**
*Link: [./tracks/ui_theme_overhaul_20260308/](./tracks/ui_theme_overhaul_20260308/)*
*Goal: Modernize UI with Inter/Maple Mono fonts, a professional subtle rounded theme, custom shadesrs (corners, blur, AA), multi-viewport support, and layout presets.*
*Goal: Modernize UI with Inter/Maple Mono fonts, a professional subtle rounded theme, custom shaders (corners, blur, AA), multi-viewport support, and layout presets.*
3. [x] **Track: Selectable GUI Text & UX Improvements**
*Link: [./tracks/selectable_ui_text_20260308/](./tracks/selectable_ui_text_20260308/)*
@@ -68,39 +60,38 @@ This file tracks all major tracks for the project. Each track has its own detail
5. [x] **Track: NERV UI Theme Integration** (Archived 2026-03-09)
6. [X] **Track: Custom Shader and Window Frame Support**
6. [x] **Track: Custom Shader and Window Frame Support**
*Link: [./tracks/custom_shaders_20260309/](./tracks/custom_shaders_20260309/)*
7. [x] **Track: UI/UX Improvements - Presets and AI Settings**
*Link: [./tracks/presets_ai_settings_ux_20260311/](./tracks/presets_ai_settings_ux_20260311/)*
*Goal: Improve the layout, scaling, and control ergonomics of the Preset windows (Personas, Prompts, Tools) and AI Settings panel. Includes dual-control sliders and categorized tool management.*
8. [x] ~~**Track: Session Context Snapshots & Visibility**~~ (Archived 2026-03-22 - Replaced by discussion_hub_panel_reorganization)
8. [ ] **Track: Session Context Snapshots & Visibility**
*Link: [./tracks/session_context_snapshots_20260311/](./tracks/session_context_snapshots_20260311/)*
*Goal: Session-scoped context management, saving Context Presets, MMA assignment, and agent-focused session filtering in the UI.*
9. [x] ~~**Track: Discussion Takes & Timeline Branching**~~ (Archived 2026-03-22 - Replaced by discussion_hub_panel_reorganization)
9. [ ] **Track: Discussion Takes & Timeline Branching**
*Link: [./tracks/discussion_takes_branching_20260311/](./tracks/discussion_takes_branching_20260311/)*
*Goal: Non-linear discussion timelines via tabbed "takes", message branching, and synthesis generation workflows.*
12. [x] **Track: Discussion Hub Panel Reorganization**
*Link: [./tracks/discussion_hub_panel_reorganization_20260322/](./tracks/discussion_hub_panel_reorganization_20260322/)*
*Goal: Properly merge Session Hub into Discussion Hub (4 tabs: Discussion | Context Composition | Snapshot | Takes), establish Files & Media as project-level inventory, deprecate ui_summary_only, implement Context Composition and DAW-style Takes.*
10. [x] **Track: Undo/Redo History Support**
10. [ ] **Track: Undo/Redo History Support**
*Link: [./tracks/undo_redo_history_20260311/](./tracks/undo_redo_history_20260311/)*
*Goal: Robust, non-provider based undo/redo for text inputs, UI controls, discussion mutations, and context management. Includes hotkey support and a history list view.*
11. [x] **Track: Advanced Text Viewer with Syntax Highlighting**
11. [ ] **Track: Advanced Text Viewer with Syntax Highlighting**
*Link: [./tracks/text_viewer_rich_rendering_20260313/](./tracks/text_viewer_rich_rendering_20260313/)*
12. [x] **Track: Frosted Glass Background Effect**
*Link: [./tracks/frosted_glass_20260313/](./tracks/frosted_glass_20260313/)*
---
### Additional Language Support
1. [x] **Track: Tree-Sitter C/C++ MCP Tools**
1. [ ] **Track: Tree-Sitter C/C++ MCP Tools**
*Link: [./tracks/ts_cpp_tree_sitter_20260308/](./tracks/ts_cpp_tree_sitter_20260308/)*
*Goal: Add tree-sitter C and C++ grammars. Extend ASTParser to support C/C++ skeleton and outline extraction. Add MCP tools ts_c_get_skeleton, ts_cpp_get_skeleton, ts_c_get_code_outline, ts_cpp_get_code_outline. (Extended: definitions, signatures, and surgical updates).*
*Goal: Add tree-sitter C and C++ grammars. Extend ASTParser to support C/C++ skeleton and outline extraction. Add MCP tools ts_c_get_skeleton, ts_cpp_get_skeleton, ts_c_get_code_outline, ts_cpp_get_code_outline.*
2. [ ] **Track: Bootstrap gencpp Python Bindings**
*Link: [./tracks/gencpp_python_bindings_20260308/](./tracks/gencpp_python_bindings_20260308/)*
@@ -140,13 +131,10 @@ This file tracks all major tracks for the project. Each track has its own detail
5. [x] **Track: OpenCode Configuration Overhaul** (Archived 2026-03-10)
6. [x] **Track: Advanced Workspace Docking & Layout Profiles**
6. [ ] **Track: Advanced Workspace Docking & Layout Profiles**
*Link: [./tracks/workspace_profiles_20260310/](./tracks/workspace_profiles_20260310/)*
*Goal: Expand layout preset logic to allow users to save and switch between named workspace configurations.*
- [x] **Track: Review investigation of codebase and expose/cull any hidden invisible prompting either from the system or directly that the user cannot handle for any discussion/session.**
*Link: [./tracks/cull_hidden_prompts_20260502/](./tracks/cull_hidden_prompts_20260502/)*
---
### Model Providers
@@ -176,10 +164,6 @@ This file tracks all major tracks for the project. Each track has its own detail
### Completed / Archived
-. [ ] ~~**Track: Frosted Glass Background Effect**~~ ***NOT WORTH THE PAIN***
*Link: [./tracks/frosted_glass_20260313/](./tracks/frosted_glass_20260313/)*
- [x] **Track: External MCP Server Support** (Archived 2026-03-12)
- [x] **Track: Project-Specific Conductor Directory** (Archived 2026-03-12)
- [x] **Track: GUI Path Configuration in Context Hub** (Archived 2026-03-12)
@@ -1,17 +0,0 @@
{
"name": "aggregation_smarter_summaries",
"created": "2026-03-22",
"status": "future",
"priority": "medium",
"affected_files": [
"src/aggregate.py",
"src/file_cache.py",
"src/ai_client.py",
"src/models.py"
],
"related_tracks": [
"discussion_hub_panel_reorganization (in_progress)",
"system_context_exposure (future)"
],
"notes": "Deferred from discussion_hub_panel_reorganization planning. Improves aggregation with sub-agent summarization and hash-based caching."
}
@@ -1,49 +0,0 @@
# Implementation Plan: Smarter Aggregation with Sub-Agent Summarization
## Phase 1: Hash-Based Summary Cache [checkpoint: e972cf4]
Focus: Implement file hashing and cache storage
- [x] Task: Research existing file hash implementations in codebase 3218104
- [x] Task: Design cache storage format (file-based vs project state) 3218104
- [x] Task: Implement hash computation for aggregation files 3218104
- [x] Task: Implement summary cache storage and retrieval 3218104
- [x] Task: Add cache invalidation when file content changes 3218104
- [x] Task: Write tests for hash computation and cache 3218104
- [x] Task: Conductor - User Manual Verification 'Phase 1: Hash-Based Summary Cache' e972cf4
## Phase 2: Sub-Agent Summarization [checkpoint: 7efcc7c]
Focus: Implement sub-agent summarization during aggregation
- [x] Task: Audit current aggregate.py flow 3218104
- [x] Task: Define summarization prompt strategy for code vs text files 3218104
- [x] Task: Implement sub-agent invocation during aggregation 3218104
- [x] Task: Handle provider-specific differences in sub-agent calls 3218104
- [x] Task: Write tests for sub-agent summarization 3218104
- [x] Task: Conductor - User Manual Verification 'Phase 2: Sub-Agent Summarization' 7efcc7c
## Phase 3: Tiered Aggregation Strategy [checkpoint: fa00a84]
Focus: Respect tier-level aggregation configuration
- [x] Task: Audit how tiers receive context currently 628b580
- [x] Task: Implement tier-level aggregation strategy selection 628b580
- [x] Task: Connect tier strategy to Persona configuration 628b580
- [x] Task: Write tests for tiered aggregation 628b580
- [x] Task: Conductor - User Manual Verification 'Phase 3: Tiered Aggregation Strategy' fa00a84
## Phase 4: UI Integration [checkpoint: a1c204f]
Focus: Expose cache status and controls in UI
- [x] Task: Add cache status indicator to Files & Media panel 6bf6c79
- [x] Task: Add "Clear Summary Cache" button 6bf6c79
- [x] Task: Add aggregation configuration to Project Settings or AI Settings 6bf6c79
- [x] Task: Write tests for UI integration 6bf6c79
- [x] Task: Conductor - User Manual Verification 'Phase 4: UI Integration' a1c204f
## Phase 5: Cache Persistence & Optimization [checkpoint: e0737dc]
Focus: Ensure cache persists and is performant
- [x] Task: Implement persistent cache storage to disk fb2df2a
- [x] Task: Add cache size management (max entries, LRU) fb2df2a
- [x] Task: Performance testing with large codebases fb2df2a
- [x] Task: Write tests for persistence fb2df2a
- [x] Task: Conductor - User Manual Verification 'Phase 5: Cache Persistence & Optimization' e0737dc
@@ -1,103 +0,0 @@
# Specification: Smarter Aggregation with Sub-Agent Summarization
## 1. Overview
This track improves the context aggregation system to use sub-agent passes for intelligent summarization and hash-based caching to avoid redundant work.
**Current Problem:**
- Aggregation is a simple pass that either injects full file content or a basic skeleton
- No intelligence applied to determine what level of detail is needed
- Same files get re-summarized on every discussion start even if unchanged
**Goal:**
- Use a sub-agent during aggregation pass for high-tier agents to generate succinct summaries
- Cache summaries based on file hash - only re-summarize if file changed
- Smart outline generation for code files, summary for text files
## 2. Current State Audit
### Existing Aggregation Behavior
- `aggregate.py` handles context aggregation
- `file_cache.py` provides AST parsing and skeleton generation
- Per-file flags: `Auto-Aggregate` (summarize), `Force Full` (inject raw)
- No caching of summarization results
### Provider API Considerations
- Different providers have different prompt/caching mechanisms
- Need to verify how each provider handles system context and caching
- May need provider-specific aggregation strategies
## 3. Functional Requirements
### 3.1 Hash-Based Summary Cache
- Generate SHA256 hash of file content
- Store summaries in a cache (file-based or in project state)
- Before summarizing, check if file hash matches cached summary
- Cache invalidation when file content changes
### 3.2 Sub-Agent Summarization Pass
- During aggregation, optionally invoke sub-agent for summarization
- Sub-agent generates concise summary of file purpose and key points
- Different strategies for:
- Code files: AST-based outline + key function signatures
- Text files: Paragraph-level summary
- Config files: Key-value extraction
### 3.3 Tiered Aggregation Strategy
- Tier 3/4 workers: Get skeleton outlines (fast, cheap)
- Tier 2 (Tech Lead): Get summaries with key details
- Tier 1 (Orchestrator): May get full content or enhanced summaries
- Configurable per-agent via Persona
### 3.4 Cache Persistence
- Summaries persist across sessions
- Stored in project directory or centralized cache location
- Manual cache clear option in UI
## 4. Data Model
### 4.1 Summary Cache Entry
```python
{
"file_path": str,
"file_hash": str, # SHA256 of content
"summary": str,
"outline": str, # For code files
"generated_at": str, # ISO timestamp
"generator_tier": str, # Which tier generated it
}
```
### 4.2 Aggregation Config
```toml
[aggregation]
default_mode = "summarize" # "full", "summarize", "outline"
cache_enabled = true
cache_dir = ".slop_cache"
```
## 5. UI Changes
- Add "Clear Summary Cache" button in Files & Media or Context Composition
- Show cached status indicator on files (similar to AST cache indicator)
- Configuration in AI Settings or Project Settings
## 6. Acceptance Criteria
- [ ] File hash computed before summarization
- [ ] Summary cache persists across app restarts
- [ ] Sub-agent generates better summaries than basic skeleton
- [ ] Aggregation respects tier-level configuration
- [ ] Cache can be manually cleared
- [ ] Provider APIs handle aggregated context correctly
## 7. Out of Scope
- Changes to provider API internals
- Vector store / embeddings for RAG (separate track)
- Changes to Session Hub / Discussion Hub layout
## 8. Dependencies
- `aggregate.py` - main aggregation logic
- `file_cache.py` - AST parsing and caching
- `ai_client.py` - sub-agent invocation
- `models.py` - may need new config structures
+18 -18
View File
@@ -1,27 +1,27 @@
# Implementation Plan: Beads Mode Integration
## Phase 1: Environment & Core Configuration
- [x] Task: Audit existing `AppController` and `project_manager.py` for project mode handling.
- [x] Task: Write Tests: Verify `manual_slop.toml` can parse and store the `execution_mode` (native/beads).
- [x] Task: Implement: Add `execution_mode` toggle to `AppController` state and persistence logic.
- [x] Task: Conductor - User Manual Verification 'Phase 1: Environment & Core Configuration' (Protocol in workflow.md)
- [ ] Task: Audit existing `AppController` and `project_manager.py` for project mode handling.
- [ ] Task: Write Tests: Verify `manual_slop.toml` can parse and store the `execution_mode` (native/beads).
- [ ] Task: Implement: Add `execution_mode` toggle to `AppController` state and persistence logic.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Environment & Core Configuration' (Protocol in workflow.md)
## Phase 2: Beads Backend & Tooling
- [x] Task: Write Tests: Verify a basic Beads/Dolt repository can be initialized and queried via a Python wrapper.
- [x] Task: Implement: Create `src/beads_client.py` to interface with the `bd` CLI or direct Dolt SQL backend.
- [x] Task: Write Tests: Verify agents can create and update Beads using a mock Beads environment.
- [x] Task: Implement: Add a suite of MCP tools (`bd_create`, `bd_update`, `bd_ready`, `bd_list`) to `src/mcp_client.py`.
- [x] Task: Conductor - User Manual Verification 'Phase 2: Beads Backend & Tooling' (Protocol in workflow.md)
- [ ] Task: Write Tests: Verify a basic Beads/Dolt repository can be initialized and queried via a Python wrapper.
- [ ] Task: Implement: Create `src/beads_client.py` to interface with the `bd` CLI or direct Dolt SQL backend.
- [ ] Task: Write Tests: Verify agents can create and update Beads using a mock Beads environment.
- [ ] Task: Implement: Add a suite of MCP tools (`bd_create`, `bd_update`, `bd_ready`, `bd_list`) to `src/mcp_client.py`.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: Beads Backend & Tooling' (Protocol in workflow.md)
## Phase 3: GUI Integration & Visual DAG
- [x] Task: Write Tests: Verify the Visual DAG can load node data from a non-markdown source (Beads graph).
- [x] Task: Implement: Refactor `_render_mma_dashboard` and the DAG renderer to pull from the active mode's backend.
- [x] Task: Implement: Add a "Beads" tab to the MMA Dashboard for browsing the raw Dolt-backed issue graph.
- [x] Task: Implement: Update Tier Streams to include metadata for Beads-specific status changes.
- [x] Task: Conductor - User Manual Verification 'Phase 3: GUI Integration & Visual DAG' (Protocol in workflow.md)
- [ ] Task: Write Tests: Verify the Visual DAG can load node data from a non-markdown source (Beads graph).
- [ ] Task: Implement: Refactor `_render_mma_dashboard` and the DAG renderer to pull from the active mode's backend.
- [ ] Task: Implement: Add a "Beads" tab to the MMA Dashboard for browsing the raw Dolt-backed issue graph.
- [ ] Task: Implement: Update Tier Streams to include metadata for Beads-specific status changes.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: GUI Integration & Visual DAG' (Protocol in workflow.md)
## Phase 4: Context Optimization & Polish
- [x] Task: Write Tests: Verify that "Compaction" correctly summarizes completed Beads into a concise text block.
- [x] Task: Implement: Add Compaction logic to the context aggregation pipeline for Beads Mode.
- [x] Task: Implement: Final UI polish, icons for Bead nodes, and robust error handling for missing `dolt`/`bd` binaries.
- [~] Task: Conductor - User Manual Verification 'Phase 4: Context Optimization & Polish' (Protocol in workflow.md)
- [ ] Task: Write Tests: Verify that "Compaction" correctly summarizes completed Beads into a concise text block.
- [ ] Task: Implement: Add Compaction logic to the context aggregation pipeline for Beads Mode.
- [ ] Task: Implement: Final UI polish, icons for Bead nodes, and robust error handling for missing `dolt`/`bd` binaries.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: Context Optimization & Polish' (Protocol in workflow.md)
@@ -1,39 +0,0 @@
# Codebase Audit Report - 2026-05-02
## Overview
This report summarizes the findings of the codebase audit performed on the `./src` directory. The audit focused on human readability, maintainability, and identifying architectural redundancies.
## Key Findings: Architectural Redundancies
### 1. AI Client Provider Proliferation (`src/ai_client.py`)
**Observation:** The `ai_client.py` module contains significantly redundant code paths for each supported LLM provider (Gemini, Anthropic, DeepSeek, MiniMax). Specifically:
- **Send Methods:** Each provider has its own `_send_<provider>` method with nearly identical structure for tool handling and response parsing.
- **Error Classification:** Multiple `_classify_<provider>_error` functions perform similar mappings of vendor exceptions to internal `ProviderError`.
- **Model Listing:** Redundant `_list_<provider>_models` functions.
- **History Management:** Separate locks and list structures for each provider's history.
**Recommendation:** Abstract the provider logic into a base `AIProvider` class or interface. Each vendor (Gemini, Anthropic, etc.) should implement this interface, allowing `ai_client.py` to dispatch calls polymorphically.
### 2. Tool Name Redundancy (`src/mcp_client.py` & `src/models.py`)
**Observation:** The list of available agent tools was defined in multiple places:
- `mcp_client.TOOL_NAMES` (Hardcoded set)
- `models.AGENT_TOOL_NAMES` (Hardcoded list)
- `mcp_client.MCP_TOOL_SPECS` (Canonical source for tool definitions)
**Action Taken:** `mcp_client.TOOL_NAMES` was refactored to be dynamically generated from `MCP_TOOL_SPECS`.
**Recommendation:** Consolidate `models.AGENT_TOOL_NAMES` to also derive from `mcp_client` or a shared tool registry to ensure synchronization when new tools are added.
### 3. Orchestrator Wrapper Redundancy (`src/native_orchestrator.py`)
**Observation:** The `NativeOrchestrator` class methods (e.g., `load_plan`, `save_track`) were found to be thin wrappers around module-level helper functions.
**Action Taken:** Replaced hardcoded paths in these helpers with calls to the standardized `src.paths` module.
**Recommendation:** Evaluate if the `NativeOrchestrator` class is necessary if it remains state-free, or move the helper logic entirely into class methods.
## Documentation Improvements
- Added missing docstrings to critical public functions in `ai_client.py`, `mcp_client.py`, `native_orchestrator.py`, `api_hook_client.py`, and `api_hooks.py`.
- Consolidated module-level docstrings in `multi_agent_conductor.py`.
- Ensured consistent 1-space indentation and CRLF line endings across all modified files.
## Conclusion
The core orchestration and AI client layers are functionally robust but would benefit from an abstraction pass to reduce the maintenance burden of adding new providers or tools.
@@ -1,36 +1,36 @@
# Implementation Plan: Codebase Audit and Cleanup
## Phase 1: Audit and Refactor Orchestration & DAG Core [checkpoint: db03a78]
- [x] Task: Audit `src/multi_agent_conductor.py` for redundant logic, missing docstrings, and organization. 373f4ed
## Phase 1: Audit and Refactor Orchestration & DAG Core
- [ ] Task: Audit `src/multi_agent_conductor.py` for redundant logic, missing docstrings, and organization.
- [ ] Perform minor refactoring of small redundancies.
- [ ] Add minimal docstrings to critical paths.
- [ ] Document large architectural redundancies if found.
- [x] Task: Audit `src/dag_engine.py` for redundant logic, missing docstrings, and organization. f11a219
- [ ] Task: Audit `src/dag_engine.py` for redundant logic, missing docstrings, and organization.
- [ ] Perform minor refactoring of small redundancies.
- [ ] Add minimal docstrings to critical paths.
- [ ] Document large architectural redundancies if found.
- [x] Task: Audit `src/native_orchestrator.py` and `src/orchestrator_pm.py`. 48abdc9
- [ ] Task: Audit `src/native_orchestrator.py` and `src/orchestrator_pm.py`.
- [ ] Perform minor refactoring of small redundancies.
- [ ] Add minimal docstrings to critical paths.
- [ ] Document large architectural redundancies if found.
- [x] Task: Conductor - User Manual Verification 'Phase 1: Audit and Refactor Orchestration & DAG Core' (Protocol in workflow.md)
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Audit and Refactor Orchestration & DAG Core' (Protocol in workflow.md)
## Phase 2: Audit and Refactor AI Clients & Tools [checkpoint: 27bcfb3]
- [x] Task: Audit `src/ai_client.py` and `src/gemini_cli_adapter.py`. 29dd6ec
## Phase 2: Audit and Refactor AI Clients & Tools
- [ ] Task: Audit `src/ai_client.py` and `src/gemini_cli_adapter.py`.
- [ ] Perform minor refactoring of small redundancies.
- [ ] Add minimal docstrings to critical paths.
- [ ] Document large architectural redundancies if found.
- [x] Task: Audit `src/mcp_client.py` and `src/shell_runner.py`. 6dd9b67
- [ ] Task: Audit `src/mcp_client.py` and `src/shell_runner.py`.
- [ ] Perform minor refactoring of small redundancies.
- [ ] Add minimal docstrings to critical paths.
- [ ] Document large architectural redundancies if found.
- [x] Task: Audit `src/api_hook_client.py` and `src/api_hooks.py`. f9b5acd
- [ ] Task: Audit `src/api_hook_client.py` and `src/api_hooks.py`.
- [ ] Perform minor refactoring of small redundancies.
- [ ] Add minimal docstrings to critical paths.
- [ ] Document large architectural redundancies if found.
- [x] Task: Conductor - User Manual Verification 'Phase 2: Audit and Refactor AI Clients & Tools' (Protocol in workflow.md)
- [ ] Task: Conductor - User Manual Verification 'Phase 2: Audit and Refactor AI Clients & Tools' (Protocol in workflow.md)
## Phase 3: Final Review and Reporting [checkpoint: 7e30a31]
- [x] Task: Compile findings of large architectural redundancies from Phase 1 and 2. 8364070
## Phase 3: Final Review and Reporting
- [ ] Task: Compile findings of large architectural redundancies from Phase 1 and 2.
- [ ] Generate a markdown report summarizing the findings.
- [x] Task: Conductor - User Manual Verification 'Phase 3: Final Review and Reporting' (Protocol in workflow.md)
- [ ] Task: Conductor - User Manual Verification 'Phase 3: Final Review and Reporting' (Protocol in workflow.md)
@@ -1,38 +0,0 @@
# Audit of Hidden Prompts
## 1. `_SYSTEM_PROMPT` (src/ai_client.py, L128)
```python
_SYSTEM_PROMPT: str = (
"You are a helpful coding assistant with access to a PowerShell tool (run_powershell) and MCP tools (file access: read_file, list_directory, search_files, get_file_summary, web access: web_search, fetch_url). "
"When calling file/directory tools, always use the 'path' parameter for the target path. "
"When asked to create or edit files, prefer targeted edits over full rewrites. "
"Always explain what you are doing before invoking the tool.\n\n"
"When writing or rewriting large files (especially those containing quotes, backticks, or special characters), "
"avoid python -c with inline strings. Instead: (1) write a .py helper script to disk using a PS here-string "
"(@'...'@ for literal content), (2) run it with `python <script>`, (3) delete the helper. "
"For small targeted edits, use PowerShell's (Get-Content) / .Replace() / Set-Content or Add-Content directly.\n\n"
"When making function calls using tools that accept array or object parameters "
"ensure those are structured using JSON. For example:\n"
"When you need to verify a change, rely on the exit code and stdout/stderr from the tool — "
"the user's context files are automatically refreshed after every tool call, so you do NOT "
"need to re-read files that are already provided in the <context> block."
)
```
**Status:** Necessary for reliable agent functioning, especially the instructions about writing large files and avoiding re-reading automatically refreshed context. However, it should be exposed so advanced users can override or customize it.
## 2. File Refresh Markers (src/ai_client.py)
**Gemini:** `\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}` (Lines 1111, 1222, 1845, 2066)
**Anthropic:** `[FILES UPDATED — current contents below. Do NOT re-read these files with PowerShell.]\n\n{ctx}` (Line 1557)
**Status:** Necessary for the agent to realize files have changed post-tool execution. Could be simplified or made configurable, but hardcoding them isn't the worst offense as they are functional markers. Exposing the text of these markers might just cause users to accidentally break the agent's context awareness. We should probably keep them as hardcoded constants but maybe unify them or expose a toggle in settings if someone wants to disable auto-refresh. The spec says to "expose them in the GUI... Create fields for project-specific context markers."
## 3. Max Rounds Warning (src/ai_client.py)
**Gemini:** `\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]`
**Anthropic:** `SYSTEM WARNING: MAX TOOL ROUNDS REACHED. YOU MUST PROVIDE YOUR FINAL ANSWER NOW WITHOUT CALLING ANY MORE TOOLS.`
**Status:** Necessary functional safety net.
## 4. `src/aggregate.py`
No hidden prompts or markers found here. The context aggregation simply structures the files into markdown `### <path>\n\n<content>`.
## Conclusion
The `_SYSTEM_PROMPT` is the primary target for exposure. It's a large block of text that heavily biases the agent's behavior. We should expose it as "Global Agent Instructions" in the AI Settings.
The context markers (`[FILES UPDATED]`) should also be exposed per the specification, perhaps as "Context Refresh Marker" and "Max Rounds Warning" fields.
@@ -1,5 +0,0 @@
# Track cull_hidden_prompts_20260502 Context
- [Specification](./spec.md)
- [Implementation Plan](./plan.md)
- [Metadata](./metadata.json)
@@ -1,8 +0,0 @@
{
"track_id": "cull_hidden_prompts_20260502",
"type": "chore",
"status": "new",
"created_at": "2026-05-02T12:00:00Z",
"updated_at": "2026-05-02T12:00:00Z",
"description": "Review investigation of codebase and expose/cull any hidden invisible prompting either from the system or directly that the user cannot handle for any discussion/session."
}
@@ -1,22 +0,0 @@
# Implementation Plan: Expose/Cull Hidden Invisible Prompting
## Phase 1: Audit and Identification [checkpoint: 30107fd]
- [x] Task: Audit `src/ai_client.py` to identify all hardcoded `_SYSTEM_PROMPT` strings and tool execution instructions.
- [x] Task: Audit `src/aggregate.py` to identify all injected context markers (e.g., `[SYSTEM: FILES UPDATED]`).
- [x] Task: Document identified hidden prompts and determine their necessity vs. redundancy.
- [x] Task: Conductor - User Manual Verification 'Phase 1: Audit and Identification' (Protocol in workflow.md)
## Phase 2: Expose Necessary Prompts in GUI [checkpoint: 3b59028]
- [x] Task: Modify `src/gui_2.py` to add new editable text areas in the "AI Settings" or "Project Settings" panel.
- [x] Create fields for global system tool instructions.
- [x] Create fields for project-specific context markers.
- [x] Task: Update `src/app_controller.py` state initialization to load these new fields from `config.toml` and `manual_slop.toml`.
- [x] Task: Ensure changes are correctly saved and flushed to the project files via `_flush_to_project()` and `_flush_to_config()`.
- [x] Task: Conductor - User Manual Verification 'Phase 2: Expose Necessary Prompts in GUI' (Protocol in workflow.md)
## Phase 3: Cull and Integrate Configured Prompts
- [x] Task: Update `src/ai_client.py`'s `_get_combined_system_prompt()` to utilize the user-configured tool instructions from the AppController state instead of hardcoded strings.
- [x] Task: Update `src/aggregate.py` or `src/ai_client.py` to use the user-configured context markers (like `[FILES UPDATED]`) instead of hardcoded ones.
- [x] Task: Remove the legacy hardcoded strings from the codebase.
- [x] Task: Run tests to ensure tool execution and context refresh still function correctly.
- [x] Task: Conductor - User Manual Verification 'Phase 3: Cull and Integrate Configured Prompts' (Protocol in workflow.md)
@@ -1,28 +0,0 @@
# Specification: Expose/Cull Hidden Invisible Prompting
## 1. Overview
The goal of this track is to review the codebase to identify, expose, or cull any hidden or invisible prompting injected by the system during discussion/sessions. This ensures the user has full control and visibility over the exact context sent to the AI API.
## 2. Functional Requirements
### 2.1 Identify Hardcoded Prompts
- Audit `src/ai_client.py` to identify the hardcoded `_SYSTEM_PROMPT` and any tool execution instructions appended to requests.
- Audit `src/aggregate.py` to identify headers and contextual markers injected during context aggregation (e.g., `[SYSTEM: FILES UPDATED]`).
### 2.2 Expose Prompts in GUI
- For prompts that are necessary for the system to function (e.g., tool usage instructions, `[FILES UPDATED]` logic), expose them in the GUI (e.g., in "AI Settings" or "Project Settings").
- Create editable text areas or configurable options so the user can modify or disable these prompts per-project or globally.
- Ensure the modified prompts are correctly persisted and loaded by the `AppController`.
### 2.3 Cull Redundant Prompts
- Remove any legacy or redundant prompting that no longer serves a purpose or duplicates user-defined system prompts.
## 3. Acceptance Criteria
- [ ] All hardcoded system prompts in `ai_client.py` and `aggregate.py` are identified.
- [ ] Necessary system prompts are exposed as editable fields within the GUI.
- [ ] Users can modify or disable the default tool instructions or aggregation markers.
- [ ] The `ai_client` utilizes the user-configured prompts instead of hardcoded strings.
- [ ] Unnecessary or redundant hidden prompts are removed from the codebase.
## 4. Out of Scope
- Modifying the Tiered MMA worker prompts in `mma_prompts.py` (this track focuses on the core discussion/session loop).
- Adding a "Raw Prompt Preview" modal (this was an alternative option not selected).
@@ -1,37 +0,0 @@
# Identified Bottleneck Targets: Data-Oriented Python Optimization Pass
## Target 1: Context Aggregation Logic (`src/aggregate.py`)
- **Bottleneck:** O(N*M) membership checks in `build_tier3_context` and `build_tier1_context`.
- **Symptom:** As the number of focus files and total project files increase, context building becomes slower.
- **Heuristic Violation:** "Less Python does, the better." Iterative string matching in a loop is expensive in Python.
- **Proposed Fix:** Pre-calculate a set of focus paths and use O(1) lookups.
## Target 2: DAG Graph Operations (`src/dag_engine.py`)
- **Bottleneck:** Recursive DFS in `has_cycle` and `topological_sort`.
- **Symptom:** Risk of `RecursionError` on very deep graphs; function call overhead for every node visit.
- **Heuristic Violation:** Deep recursion is a "More Python" approach.
- **Proposed Fix:** Implement iterative versions of DFS using an explicit stack.
## Target 3: Transitive Blocking Propagation (`src/dag_engine.py`)
- **Bottleneck:** O(N^2) or O(N*D) stable-loop in `cascade_blocks`.
- **Symptom:** Repeated iteration over the entire ticket list until no more changes occur.
- **Heuristic Violation:** Redundant iterations.
- **Proposed Fix:** Use a more efficient propagation algorithm (e.g., propagating only from modified nodes or using a topological traversal).
## Target 4: Orchestrator Main Loop (`src/multi_agent_conductor.py`)
- **Bottleneck:** Nested imports inside `ConductorEngine.run` loop.
- **Symptom:** Repeatedly calling `import` and searching the module cache every second.
- **Heuristic Violation:** Unnecessary JIT/interpreter work.
- **Proposed Fix:** Move all imports to the top of the file.
## Target 5: Orchestrator Idle Overhead (`src/multi_agent_conductor.py`)
- **Bottleneck:** Unnecessary `tick()` and `cascade_blocks()` calls in the main loop when no tasks are running or finished.
- **Symptom:** CPU waste in the background thread.
- **Heuristic Violation:** "The less Python does, the better." Don't recalculate what hasn't changed.
- **Proposed Fix:** Only trigger a DAG tick when a significant state change occurs (e.g., a ticket is completed).
## Target 6: Simulation Typing Latency (`simulation/user_agent.py`)
- **Bottleneck:** Character-by-character `time.sleep` in `simulate_typing`.
- **Symptom:** Extremely slow simulations for large inputs.
- **Heuristic Violation:** Excessive blocking in a loop.
- **Proposed Fix:** Batch typing or provide a toggle to disable jitter for performance-oriented simulations.
@@ -1,23 +0,0 @@
# C Extension Evaluation: Data-Oriented Python Optimization Pass
## Candidates for Future C Extension Porting
While the current Python optimizations have significantly improved performance, the following components remain candidates for lower-level implementation if project scale increases by an order of magnitude.
### 1. AST Structural Pruning (`src/file_cache.py`)
- **Reason:** Current skeletonization and curated view generation rely on the Python `ast` module and iterative tree traversal.
- **Benefit:** A C-based AST visitor (or tree-sitter integration) would reduce context building time for large codebases.
- **Priority:** Medium
### 2. Large-Scale Graph Operations (`src/dag_engine.py`)
- **Reason:** Although Kahn's algorithm and queue-based propagation are efficient, Python's overhead for object management in graphs with >10,000 nodes could become visible.
- **Benefit:** C++ graph backend would ensure zero-latency orchestration even for massive tracks.
- **Priority:** Low (Current performance is sub-millisecond for hundreds of nodes).
### 3. High-Frequency GUI Data Marshalling (`src/gui_2.py`)
- **Reason:** Preparing complex data structures (e.g., token usage history, metric graphs) for ImGui in the main render loop consumes Python JIT time.
- **Benefit:** Moving data preparation to a background thread or a C buffer would further reduce input lag.
- **Priority:** Low
## Summary
The current optimizations have established a solid "Less Python" foundation. C extensions are not strictly necessary at the current project scale but should be considered if context aggregation or DAG orchestration exceeds 50ms in real-world scenarios.
@@ -1,27 +1,27 @@
# Implementation Plan: Data-Oriented Python Optimization Pass
## Phase 1: Guidelines and Instrumentation
- [x] Task: Update `conductor/product-guidelines.md` with Data-Oriented Python heuristics and the "less Python does the better" philosophy. (fbaef6c)
- [x] Task: Review existing profiling instrumentation in `src/performance_monitor.py` or diagnostic hooks. (ae2b79a)
- [x] Task: Expand profiling instrumentation to capture more detailed execution times for non-GUI data structures/processes if necessary. (23c1e21)
- [x] Task: Conductor - User Manual Verification 'Phase 1: Guidelines and Instrumentation' (Protocol in workflow.md) (56e9627)
- [ ] Task: Update `conductor/product-guidelines.md` with Data-Oriented Python heuristics and the "less Python does the better" philosophy.
- [ ] Task: Review existing profiling instrumentation in `src/performance_monitor.py` or diagnostic hooks.
- [ ] Task: Expand profiling instrumentation to capture more detailed execution times for non-GUI data structures/processes if necessary.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Guidelines and Instrumentation' (Protocol in workflow.md)
## Phase 2: Audit and Profiling (`src/` and `simulation/`)
- [x] Task: Run profiling scenarios (especially utilizing simulations) to generate baseline metrics. (83afc90)
- [x] Task: Audit `src/` (e.g., `dag_engine.py`, `multi_agent_conductor.py`, `aggregate.py`) against the new guidelines, cross-referencing with profiling data to identify bottlenecks. (7dc91dd)
- [x] Task: Audit `simulation/` files against the new guidelines to ensure the test harness is performant and non-blocking. (05db5bd)
- [x] Task: Compile a list of identified bottleneck targets to refactor. (1294619)
- [x] Task: Conductor - User Manual Verification 'Phase 2: Audit and Profiling (`src/` and `simulation/`)' (Protocol in workflow.md) (7a72987)
- [ ] Task: Run profiling scenarios (especially utilizing simulations) to generate baseline metrics.
- [ ] Task: Audit `src/` (e.g., `dag_engine.py`, `multi_agent_conductor.py`, `aggregate.py`) against the new guidelines, cross-referencing with profiling data to identify bottlenecks.
- [ ] Task: Audit `simulation/` files against the new guidelines to ensure the test harness is performant and non-blocking.
- [ ] Task: Compile a list of identified bottleneck targets to refactor.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: Audit and Profiling (`src/` and `simulation/`)' (Protocol in workflow.md)
## Phase 3: Targeted Optimization and Refactoring
- [x] Task: Write/update tests for the first identified bottleneck to establish a performance or structural baseline (Red Phase). (2e68f1e)
- [x] Task: Refactor the first identified bottleneck to align with data-oriented guidelines (Green Phase). (2e68f1e)
- [x] Task: Write/update tests for remaining identified bottlenecks. (56e9627)
- [x] Task: Refactor remaining identified bottlenecks. (d0aff71)
- [x] Task: Conductor - User Manual Verification 'Phase 3: Targeted Optimization and Refactoring' (Protocol in workflow.md) (f628e0b)
- [ ] Task: Write/update tests for the first identified bottleneck to establish a performance or structural baseline (Red Phase).
- [ ] Task: Refactor the first identified bottleneck to align with data-oriented guidelines (Green Phase).
- [ ] Task: Write/update tests for remaining identified bottlenecks.
- [ ] Task: Refactor remaining identified bottlenecks.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: Targeted Optimization and Refactoring' (Protocol in workflow.md)
## Phase 4: Final Evaluation and Documentation
- [x] Task: Re-run all profiling scenarios to compare against the baseline metrics. (90807d3)
- [x] Task: Analyze remaining bottlenecks that did not reach performance thresholds and document them as candidates for C/C++ bindings (Last Resort). (7a72987)
- [x] Task: Generate a final summary report of the optimizations applied and the C extension evaluation. (7a72987)
- [x] Task: Conductor - User Manual Verification 'Phase 4: Final Evaluation and Documentation' (Protocol in workflow.md) (299d9e5)
- [ ] Task: Re-run all profiling scenarios to compare against the baseline metrics.
- [ ] Task: Analyze remaining bottlenecks that did not reach performance thresholds and document them as candidates for C/C++ bindings (Last Resort).
- [ ] Task: Generate a final summary report of the optimizations applied and the C extension evaluation.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: Final Evaluation and Documentation' (Protocol in workflow.md)
@@ -1,43 +0,0 @@
# Final Summary Report: Data-Oriented Python Optimization Pass
## Overview
Successfully executed a full optimization pass across the Manual Slop codebase, aligning with data-oriented heuristics and minimizing Python JIT/interpreter overhead. The track focused on context aggregation, DAG orchestration, and the main conductor loop.
## Key Performance Improvements (Stress Tests)
| Component | Baseline | Optimized | Improvement |
| :--- | :--- | :--- | :--- |
| Context Aggregation (500 files) | 13.11 ms | 7.43 ms | **43.3% Faster** |
| DAG Topological Sort (500 nodes) | 0.45 ms | 0.32 ms | **28.9% Faster** |
| DAG Cascade Blocking (500 nodes) | 1.49 ms | 0.20 ms | **86.6% Faster** |
## Technical Accomplishments
### 1. High-Precision Instrumentation
- Upgraded `PerformanceMonitor` to use `time.perf_counter()` for micro-second precision.
- Implemented `PerformanceScope` context manager for robust and concise component timing.
- Added tracking for hit counts, maximum, and minimum execution times.
- Expanded UI Diagnostics panel to display these extended metrics.
### 2. Context Aggregation Optimization
- Eliminated O(N*M) membership checks in `src/aggregate.py` by implementing set-based lookups for focus files.
- Hoisted `ASTParser` instantiation out of high-frequency loops.
### 3. DAG Engine Refactoring
- Replaced recursive DFS in `has_cycle()` with an efficient iterative implementation.
- Implemented Kahn's Algorithm for `topological_sort()`, providing O(V+E) performance and single-pass cycle detection.
- Refactored `cascade_blocks()` to use queue-based BFS propagation, eliminating the O(N^2) stable-loop.
### 4. Orchestrator Loop Hardening
- Eliminated nested imports within the `ConductorEngine.run` loop to reduce per-second JIT overhead.
- Implemented a `_dirty` flag state machine to avoid redundant DAG evaluations when no state changes occur.
### 5. High-Fidelity Simulation Optimization
- Added a `batch_typing` mode to `UserSimAgent` to accelerate performance-oriented simulation runs by bypassing character-by-character delays.
## Future Considerations
- **C Extensions:** Evaluation identifies AST pruning and massive graph operations as candidates if project scale increases significantly.
- **Background Data Preparation:** Consider moving metric history processing to a background thread to ensure consistent 60FPS UI performance.
## Conclusion
The Manual Slop engine is now significantly more efficient and adheres strictly to the "Less Python Does, the Better" philosophy. The architectural foundations are prepared for larger implementation tracks and more complex multi-agent orchestration.
@@ -1,22 +0,0 @@
{
"name": "discussion_hub_panel_reorganization",
"created": "2026-03-22",
"status": "in_progress",
"priority": "high",
"affected_files": [
"src/gui_2.py",
"src/models.py",
"src/project_manager.py",
"tests/test_gui_context_presets.py",
"tests/test_discussion_takes.py"
],
"replaces": [
"session_context_snapshots_20260311",
"discussion_takes_branching_20260311"
],
"related_tracks": [
"aggregation_smarter_summaries (future)",
"system_context_exposure (future)"
],
"notes": "These earlier tracks were marked complete but the UI panel reorganization was not properly implemented. This track consolidates and properly executes the intended UX."
}
@@ -1,55 +0,0 @@
# Implementation Plan: Discussion Hub Panel Reorganization
## Phase 1: Cleanup & Project Settings Rename
Focus: Remove redundant ui_summary_only, rename Context Hub, establish project-level vs discussion-level separation
- [x] Task: Audit current ui_summary_only usages and document behavior to deprecate [f6fe3ba] (embedded audit)
- [x] Task: Remove ui_summary_only checkbox from _render_projects_panel (gui_2.py) [f5d4913]
- [x] Task: Rename Context Hub to "Project Settings" in _gui_func tab bar [2ed9867]
- [x] Task: Remove Context Presets tab from Project Settings (Context Hub) [9ddbcd2]
- [x] Task: Update references in show_windows dict and any help text [2ed9867] (renamed Context Hub -> Project Settings)
- [x] Task: Write tests verifying ui_summary_only removal doesn't break existing functionality [f5d4913]
- [x] Task: Conductor - User Manual Verification 'Phase 1: Cleanup & Project Settings Rename'
## Phase 2: Merge Session Hub into Discussion Hub [checkpoint: 2b73745]
Focus: Move Session Hub tabs into Discussion Hub, eliminate separate Session Hub window
- [x] Task: Audit Session Hub (_render_session_hub) tab content [documented above]
- [x] Task: Add Snapshot tab to Discussion Hub containing Aggregate MD + System Prompt preview [2b73745]
- [x] Task: Remove Session Hub window from _gui_func [2b73745]
- [x] Task: Add Discussion Hub tab bar structure (Discussion | Context Composition | Snapshot | Takes) [2b73745]
- [x] Task: Write tests for new tab structure rendering [2b73745]
- [x] Task: Conductor - User Manual Verification 'Phase 2: Merge Session Hub into Discussion Hub'
## Phase 3: Context Composition Tab [checkpoint: a3c8d4b]
Focus: Per-discussion file filter with save/load preset functionality
- [x] Task: Write tests for Context Composition state management [a3c8d4b]
- [x] Task: Create _render_context_composition_panel method [a3c8d4b]
- [x] Task: Implement file/screenshot selection display (filtered from Files & Media) [a3c8d4b]
- [x] Task: Implement per-file flags display (Auto-Aggregate, Force Full) [a3c8d4b]
- [x] Task: Implement Save as Preset / Load Preset buttons [a3c8d4b]
- [x] Task: Connect Context Presets storage to this panel [a3c8d4b]
- [x] Task: Update Persona editor to reference Context Composition presets (NOTE: already done via existing context_preset field in Persona) [a3c8d4b]
- [x] Task: Write tests for Context Composition preset save/load [a3c8d4b]
- [x] Task: Conductor - User Manual Verification 'Phase 3: Context Composition Tab'
## Phase 4: Takes Timeline Integration [checkpoint: cc6a651]
Focus: DAW-style branching with proper visual timeline and synthesis
- [x] Task: Audit existing takes data structure and synthesis_formatter [documented above]
- [ ] Task: Enhance takes data model with parent_entry and parent_take tracking (deferred - existing model sufficient)
- [x] Task: Implement Branch from Entry action in discussion history [already existed]
- [x] Task: Implement visual timeline showing take divergence [_render_takes_panel with table view]
- [x] Task: Integrate synthesis panel into Takes tab [cc6a651]
- [x] Task: Implement take selection for synthesis [cc6a651]
- [x] Task: Write tests for take branching and synthesis [cc6a651]
- [x] Task: Conductor - User Manual Verification 'Phase 4: Takes Timeline Integration'
## Phase 5: Final Integration & Cleanup
Focus: Ensure all panels work together, remove dead code
- [ ] Task: Run full test suite to verify no regressions
- [x] Task: Remove dead code from ui_summary_only references [verified]
- [x] Task: Update conductor/tracks.md to mark old session_context_snapshots and discussion_takes_branching as archived/replaced [verified]
- [ ] Task: Conductor - User Manual Verification 'Phase 5: Final Integration & Cleanup'
@@ -1,137 +0,0 @@
# Specification: Discussion Hub Panel Reorganization
## 1. Overview
This track addresses the fragmented implementation of Session Context Snapshots and Discussion Takes & Timeline Branching tracks (2026-03-11). Those tracks were marked complete but the UI panel layout was not properly reorganized.
**Goal:** Create a coherent Discussion Hub that absorbs Session Hub functionality, establishes Files & Media as project-level file inventory, and properly implements Context Composition and DAW-style Takes branching.
## 2. Current State Audit (as of 2026-03-22)
### Already Implemented (DO NOT re-implement)
- `ui_summary_only` checkbox in Projects panel
- Session Hub as separate window with tabs: Aggregate MD | System Prompt
- Context Hub with tabs: Projects | Paths | Context Presets
- Context Presets save/load mechanism in project TOML
- `_render_synthesis_panel()` method (gui_2.py:2612-2643) - basic synthesis UI
- Takes data structure in `project['discussion']['discussions']`
- Per-file `Auto-Aggregate` and `Force Full` flags in Files & Media
### Gaps to Fill (This Track's Scope)
1. `ui_summary_only` is redundant with per-file flags - deprecate it
2. Context Hub renamed to "Project Settings" (remove Context Presets tab)
3. Session Hub merged into Discussion Hub as tabs
4. Files & Media stays separate as project-level inventory
5. Context Composition tab in Discussion Hub for per-discussion filter
6. Context Presets accessible via Context Composition (save/load filters)
7. DAW-style Takes timeline properly integrated into Discussion Hub
8. Synthesis properly integrated with Take selection
## 3. Panel Layout Target
| Panel | Location | Purpose |
|-------|----------|---------|
| **AI Settings** | Separate dockable | Provider, model, system prompts, tool presets, bias profiles |
| **Files & Media** | Separate dockable | Project-level file inventory (addressable files) |
| **Project Settings** | Context Hub → rename | Git dir, paths, project list (NO context stuff) |
| **Discussion Hub** | Main hub | All discussion-related UI (tabs below) |
| **MMA Dashboard** | Separate dockable | Multi-agent orchestration |
| **Operations Hub** | Separate dockable | Tool calls, comms history, external tools |
| **Diagnostics** | Separate dockable | Telemetry, logs |
**Discussion Hub Tabs:**
1. **Discussion** - Main conversation view (current implementation)
2. **Context Composition** - File/screenshot filter + presets (NEW)
3. **Snapshot** - Aggregate MD + System Prompt preview (moved from Session Hub)
4. **Takes** - DAW-style timeline branching + synthesis (integrated, not separate panel)
## 4. Functional Requirements
### 4.1 Deprecate ui_summary_only
- Remove `ui_summary_only` checkbox from Projects panel
- Per-file flags (`Auto-Aggregate`, `Force Full`) are the intended mechanism
- Document migration path for users
### 4.2 Rename Context Hub → Project Settings
- Context Hub tab bar: Projects | Paths
- Remove "Context Presets" tab
- All context-related functionality moves to Discussion Hub → Context Composition
### 4.3 Merge Session Hub into Discussion Hub
- Session Hub window eliminated
- Its content becomes tabs in Discussion Hub:
- **Snapshot tab**: Aggregate MD preview, System Prompt preview, "Copy" buttons
- These were previously in Session Hub
### 4.4 Context Composition Tab (NEW)
- Shows currently selected files/screenshots for THIS discussion
- Per-file flags: Auto-Aggregate, Force Full
- **"Save as Preset"** / **"Load Preset"** buttons
- Dropdown to select from saved presets
- Relationship to Files & Media:
- Files & Media = the inventory (project-level)
- Context Composition = selected filter for current discussion
### 4.5 Takes Timeline (DAW-Style)
- **New Take**: Start fresh discussion thread
- **Branch Take**: Fork from any discussion entry
- **Switch Take**: Make a take the active discussion
- **Rename/Delete Take**
- All takes share the same Files & Media (not duplicated)
- Non-destructive branching
- Visual timeline showing divergence points
### 4.6 Synthesis Integration
- User selects 2+ takes via checkboxes
- Click "Synthesize" button
- AI generates "resolved" response considering all selected approaches
- Result appears as new take
- Accessible from Discussion Hub → Takes tab
## 5. Data Model Changes
### 5.1 Discussion State Structure
```python
# Per discussion in project['discussion']['discussions']
{
"name": str,
"history": [
{"role": "user"|"assistant", "content": str, "ts": str, "files_injected": [...]}
],
"parent_entry": Optional[int], # index of parent message if branched
"parent_take": Optional[str], # name of parent take if branched
}
```
### 5.2 Context Preset Format
```toml
[context_preset.my_filter]
files = ["path/to/file_a.py"]
auto_aggregate = true
force_full = false
screenshots = ["path/to/shot1.png"]
```
## 6. Non-Functional Requirements
- All changes must not break existing tests
- New tests required for new functionality
- Follow 1-space indentation Python code style
- No comments unless explicitly requested
## 7. Acceptance Criteria
- [ ] `ui_summary_only` removed from Projects panel
- [ ] Context Hub renamed to Project Settings
- [ ] Session Hub window eliminated
- [ ] Discussion Hub has 4 tabs: Discussion, Context Composition, Snapshot, Takes
- [ ] Context Composition allows save/load of filter presets
- [ ] Takes can be branched from any entry
- [ ] Takes timeline shows divergence visually
- [ ] Synthesis works with 2+ selected takes
- [ ] All existing tests still pass
- [ ] New tests cover new functionality
## 8. Out of Scope
- Aggregation improvements (sub-agent summarization, hash-based caching) - separate future track
- System prompt exposure (`_SYSTEM_PROMPT` in ai_client.py) - separate future track
- Session sophistication (Session as container for multiple discussions) - deferred
@@ -1,28 +1,25 @@
# Implementation Plan: Discussion Takes & Timeline Branching
## Phase 1: Backend Support for Timeline Branching [checkpoint: 4039589]
- [x] Task: Write failing tests for extending the session state model to support branching (tree-like history or parallel linear "takes" with a shared ancestor). [fefa06b]
- [x] Task: Implement backend logic to branch a session history at a specific message index into a new take ID. [fefa06b]
- [x] Task: Implement backend logic to promote a specific take ID into an independent, top-level session. [fefa06b]
- [x] Task: Conductor - User Manual Verification 'Phase 1: Backend Support for Timeline Branching' (Protocol in workflow.md)
## Phase 1: Backend Support for Timeline Branching
- [ ] Task: Write failing tests for extending the session state model to support branching (tree-like history or parallel linear "takes" with a shared ancestor).
- [ ] Task: Implement backend logic to branch a session history at a specific message index into a new take ID.
- [ ] Task: Implement backend logic to promote a specific take ID into an independent, top-level session.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Backend Support for Timeline Branching' (Protocol in workflow.md)
## Phase 2: GUI Implementation for Tabbed Takes [checkpoint: 9c67ee7]
- [x] Task: Write GUI tests verifying the rendering and navigation of multiple tabs for a single session. [3225125]
- [x] Task: Implement a tabbed interface within the Discussion window to switch between different takes of the active session. [3225125]
- [x] Task: Add a "Split/Branch from here" action to individual message entries in the discussion history. [e48835f]
- [x] Task: Add a UI button/action to promote the currently active take to a new separate session. [1f7880a]
- [x] Task: Conductor - User Manual Verification 'Phase 2: GUI Implementation for Tabbed Takes' (Protocol in workflow.md)
## Phase 2: GUI Implementation for Tabbed Takes
- [ ] Task: Write GUI tests verifying the rendering and navigation of multiple tabs for a single session.
- [ ] Task: Implement a tabbed interface within the Discussion window to switch between different takes of the active session.
- [ ] Task: Add a "Split/Branch from here" action to individual message entries in the discussion history.
- [ ] Task: Add a UI button/action to promote the currently active take to a new separate session.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: GUI Implementation for Tabbed Takes' (Protocol in workflow.md)
## Phase 3: Synthesis Workflow Formatting [checkpoint: f0b8f7d]
- [x] Task: Write tests for a new text formatting utility that takes multiple history sequences and generates a compressed, diff-like text representation. [510527c]
- [x] Task: Implement the sequence differencing and compression logic to clearly highlight variances between takes. [510527c]
- [x] Task: Conductor - User Manual Verification 'Phase 3: Synthesis Workflow Formatting' (Protocol in workflow.md)
## Phase 3: Synthesis Workflow Formatting
- [ ] Task: Write tests for a new text formatting utility that takes multiple history sequences and generates a compressed, diff-like text representation.
- [ ] Task: Implement the sequence differencing and compression logic to clearly highlight variances between takes.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: Synthesis Workflow Formatting' (Protocol in workflow.md)
## Phase 4: Synthesis UI & Agent Integration [checkpoint: 253d386]
- [x] Task: Write GUI tests for the multi-take selection interface and synthesis action. [a452c72]
- [x] Task: Implement a UI mechanism allowing users to select multiple takes and provide a synthesis prompt. [a452c72]
- [x] Task: Implement the execution pipeline to feed the compressed differences and user prompt to an AI agent, and route the generated synthesis to a new "take" tab. [a452c72]
- [x] Task: Conductor - User Manual Verification 'Phase 4: Synthesis UI & Agent Integration' (Protocol in workflow.md)
## Phase: Review Fixes
- [x] Task: Apply review suggestions [2a8af5f]
## Phase 4: Synthesis UI & Agent Integration
- [ ] Task: Write GUI tests for the multi-take selection interface and synthesis action.
- [ ] Task: Implement a UI mechanism allowing users to select multiple takes and provide a synthesis prompt.
- [ ] Task: Implement the execution pipeline to feed the compressed differences and user prompt to an AI agent, and route the generated synthesis to a new "take" tab.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: Synthesis UI & Agent Integration' (Protocol in workflow.md)
@@ -0,0 +1,26 @@
# Implementation Plan: Frosted Glass Background Effect
## Phase 1: Shader Development & Integration [checkpoint: 55f3bd8]
- [x] Task: Audit `src/shader_manager.py` to identify existing background/post-process integration points. [1328bc1]
- [x] Task: Write Tests: Verify `ShaderManager` can compile and bind a multi-pass blur shader. [1328bc1]
- [x] Task: Implement: Add `FrostedGlassShader` (GLSL) to `src/shader_manager.py`. [1328bc1]
- [x] Task: Implement: Integrate the blur shader into the `ShaderManager` lifecycle. [1328bc1]
- [x] Task: Conductor - User Manual Verification 'Phase 1: Shader Development & Integration' (Protocol in workflow.md) [55f3bd8]
## Phase 2: Framebuffer Capture Pipeline [checkpoint: e9b7875]
- [x] Task: Write Tests: Verify the FBO capture mechanism correctly samples the back buffer and stores it in a texture. [f297e7a]
- [x] Task: Implement: Update `src/shader_manager.py` or `src/gui_2.py` to handle "pre-rendering" of the background into a texture for blurring. [f297e7a]
- [x] Task: Implement: Ensure the blurred texture is updated every frame or on window move events. [f297e7a]
- [x] Task: Conductor - User Manual Verification 'Phase 2: Framebuffer Capture Pipeline' (Protocol in workflow.md) [e9b7875]
## Phase 3: GUI Integration & Rendering [checkpoint: cecbe22]
- [x] Task: Write Tests: Verify that a mocked ImGui window successfully calls the frosted glass rendering logic. [cecbe22]
- [x] Task: Implement: Create a `_render_frosted_background(self, pos, size)` helper in `src/gui_2.py`. [cecbe22]
- [x] Task: Implement: Update panel rendering loops (e.g. `_gui_func`) to inject the frosted background before calling `imgui.begin()` for major panels. [cecbe22]
- [x] Task: Conductor - User Manual Verification 'Phase 3: GUI Integration & Rendering' (Protocol in workflow.md) [cecbe22]
## Phase 4: UI Controls & Configuration [checkpoint: cecbe22]
- [x] Task: Write Tests: Verify that modifying blur uniforms via the Live Editor updates the shader state. [cecbe22]
- [x] Task: Implement: Add "Frosted Glass" sliders (Blur, Tint, Opacity) to the **Shader Editor** in `src/gui_2.py`. [cecbe22]
- [x] Task: Implement: Update `src/theme.py` to parse and store frosted glass settings from `config.toml`. [cecbe22]
- [x] Task: Conductor - User Manual Verification 'Phase 4: UI Controls & Configuration' (Protocol in workflow.md) [cecbe22]
+39 -40
View File
@@ -1,47 +1,46 @@
# Implementation Plan: RAG Support
## Phase 1: Foundation & Vector Store Integration [checkpoint: dd042d9]
- [x] Task: Define the RAG architecture and configuration schema. e80cd6b
- [x] Update `src/models.py` to include `RAGConfig` and `VectorStoreConfig`. e80cd6b
- [x] Implement configuration loading/saving in `AppController`. e80cd6b
- [x] Task: Integrate a local vector store. e80cd6b
- [x] Add `chromadb` or `qdrant-client` to `requirements.txt`. e80cd6b
- [x] Create `src/rag_engine.py` to manage the vector database lifecycle (init, add, search, delete). e80cd6b
- [x] Task: Implement embedding providers. e80cd6b
- [x] Implement Gemini embedding wrapper in `src/rag_engine.py`. e80cd6b
- [x] Implement local embedding wrapper (e.g., using `sentence-transformers`) in `src/rag_engine.py`. e80cd6b
- [x] Task: Write unit tests for vector store operations and embedding generation. e80cd6b
- [x] Task: Conductor - User Manual Verification 'Phase 1: Foundation & Vector Store' (Protocol in workflow.md) dd042d9
## Phase 1: Foundation & Vector Store Integration
- [ ] Task: Define the RAG architecture and configuration schema.
- [ ] Update `src/models.py` to include `RAGConfig` and `VectorStoreConfig`.
- [ ] Implement configuration loading/saving in `AppController`.
- [ ] Task: Integrate a local vector store.
- [ ] Add `chromadb` or `qdrant-client` to `requirements.txt`.
- [ ] Create `src/rag_engine.py` to manage the vector database lifecycle (init, add, search, delete).
- [ ] Task: Implement embedding providers.
- [ ] Implement Gemini embedding wrapper in `src/rag_engine.py`.
- [ ] Implement local embedding wrapper (e.g., using `sentence-transformers`) in `src/rag_engine.py`.
- [ ] Task: Write unit tests for vector store operations and embedding generation.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Foundation & Vector Store' (Protocol in workflow.md)
## Phase 2: Indexing & Retrieval Logic [checkpoint: fe0069c]
- [x] Task: Implement the indexing pipeline. fe0069c
- [x] Implement file chunking strategies (e.g., character-based, AST-aware) in `src/rag_engine.py`. fe0069c
- [x] Create a background indexing task in `AppController`. fe0069c
- [x] Implement auto-indexing logic triggered by Context Hub changes. fe0069c
- [x] Task: Implement the retrieval pipeline. fe0069c
- [x] Implement similarity search with configurable top-k and threshold. fe0069c
- [x] Implement "Native Retrieval" logic for Gemini (leveraging `ai_client.py`). fe0069c
- [x] Task: Update `ai_client.py` to support RAG. fe0069c
- [x] Add a `retrieve_context()` step to the `send()` loop. fe0069c
- [x] Format and inject retrieved fragments into the model's system prompt or context block. fe0069c
- [x] Task: Write integration tests for the indexing and retrieval flow. fe0069c
- [x] Task: Conductor - User Manual Verification 'Phase 2: Indexing & Retrieval Logic' (Protocol in workflow.md) fe0069c
## Phase 2: Indexing & Retrieval Logic
- [ ] Task: Implement the indexing pipeline.
- [ ] Implement file chunking strategies (e.g., character-based, AST-aware) in `src/rag_engine.py`.
- [ ] Create a background indexing task in `AppController`.
- [ ] Implement auto-indexing logic triggered by Context Hub changes.
- [ ] Task: Implement the retrieval pipeline.
- [ ] Implement similarity search with configurable top-k and threshold.
- [ ] Implement "Native Retrieval" logic for Gemini (leveraging `ai_client.py`).
- [ ] Task: Update `ai_client.py` to support RAG.
- [ ] Add a `retrieve_context()` step to the `send()` loop.
- [ ] Format and inject retrieved fragments into the model's system prompt or context block.
- [ ] Task: Write integration tests for the indexing and retrieval flow.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: Indexing & Retrieval Logic' (Protocol in workflow.md)
## Phase 3: GUI Integration & Visualization
- [x] Task: Implement the RAG Settings panel in `src/gui_2.py`. f57e2fe
- [x] Add UI controls for choosing the RAG source, embedding model, and retrieval parameters. f57e2fe
- [x] Add a "Rebuild Index" button and status progress bar. f57e2fe
- [x] Task: Implement retrieval visualization in the Discussion history. d4dc237
- [x] Display "Retrieved Context" blocks with expandable summaries. d4dc237
- [x] Add "Source" buttons to each block that open the file at the specific chunk's location. d4dc237
- [x] Task: Implement auto-start/indexing status indicators in the GUI. 8b48753
- [x] Task: Write visual regression tests or simulation scripts to verify the RAG UI components. f57e2fe
- [x] Task: Conductor - User Manual Verification 'Phase 3: GUI Integration & Visualization' (Protocol in workflow.md) [checkpoint: 213747a]
- [ ] Task: Implement the RAG Settings panel in `src/gui_2.py`.
- [ ] Add UI controls for choosing the RAG source, embedding model, and retrieval parameters.
- [ ] Add a "Rebuild Index" button and status progress bar.
- [ ] Task: Implement retrieval visualization in the Discussion history.
- [ ] Display "Retrieved Context" blocks with expandable summaries.
- [ ] Add "Source" buttons to each block that open the file at the specific chunk's location.
- [ ] Task: Implement auto-start/indexing status indicators in the GUI.
- [ ] Task: Write visual regression tests or simulation scripts to verify the RAG UI components.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: GUI Integration & Visualization' (Protocol in workflow.md)
## Phase 4: Refinement & Advanced RAG
- [x] Task: Implement support for external RAG APIs/MCP servers. f57e2fe
- [x] Create a bridge in `src/rag_engine.py` to call external RAG tools via the MCP interface. f57e2fe
- [x] Task: Optimize indexing performance for large projects (e.g., incremental updates, parallel chunking). f57e2fe
- [x] Task: Perform a final end-to-end verification with a large codebase. f57e2fe
- [x] Task: Conductor - User Manual Verification 'Phase 4: Refinement & Advanced RAG' (Protocol in workflow.md) f57e2fe
- [ ] Task: Implement support for external RAG APIs/MCP servers.
- [ ] Create a bridge in `src/rag_engine.py` to call external RAG tools via the MCP interface.
- [ ] Task: Optimize indexing performance for large projects (e.g., incremental updates, parallel chunking).
- [ ] Task: Perform a final end-to-end verification with a large codebase.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: Refinement & Advanced RAG' (Protocol in workflow.md)
@@ -1,24 +1,24 @@
# Implementation Plan: Session Context Snapshots & Visibility
## Phase 1: Backend Support for Context Presets
- [x] Task: Write failing tests for saving, loading, and listing Context Presets in the project configuration. 93a590c
- [x] Task: Implement Context Preset storage logic (e.g., updating TOML schemas in `project_manager.py`) to manage file/screenshot lists. 93a590c
- [x] Task: Conductor - User Manual Verification 'Phase 1: Backend Support for Context Presets' (Protocol in workflow.md) 93a590c
- [ ] Task: Write failing tests for saving, loading, and listing Context Presets in the project configuration.
- [ ] Task: Implement Context Preset storage logic (e.g., updating TOML schemas in `project_manager.py`) to manage file/screenshot lists.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Backend Support for Context Presets' (Protocol in workflow.md)
## Phase 2: GUI Integration & Persona Assignment
- [x] Task: Write tests for the Context Hub UI components handling preset saving and loading. 573f5ee
- [x] Task: Implement the UI controls in the Context Hub to save current selections as a preset and load existing presets. 573f5ee
- [x] Task: Update the Persona configuration UI (`personas.py` / `gui_2.py`) to allow assigning a named Context Preset to an agent persona. 791e1b7
- [x] Task: Conductor - User Manual Verification 'Phase 2: GUI Integration & Persona Assignment' (Protocol in workflow.md) 791e1b7
- [ ] Task: Write tests for the Context Hub UI components handling preset saving and loading.
- [ ] Task: Implement the UI controls in the Context Hub to save current selections as a preset and load existing presets.
- [ ] Task: Update the Persona configuration UI (`personas.py` / `gui_2.py`) to allow assigning a named Context Preset to an agent persona.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: GUI Integration & Persona Assignment' (Protocol in workflow.md)
## Phase 3: Transparent Context Visibility
- [x] Task: Write tests to ensure the initial aggregate markdown, resolved system prompt, and file injection timestamps are accurately recorded in the session state. 84b6266
- [x] Task: Implement UI elements in the Session Hub to expose the aggregated markdown and the active system prompt. 84b6266
- [x] Task: Enhance the discussion timeline rendering in `gui_2.py` to visually indicate exactly when files and screenshots were injected into the context. 84b6266
- [x] Task: Conductor - User Manual Verification 'Phase 3: Transparent Context Visibility' (Protocol in workflow.md) 84b6266
- [ ] Task: Write tests to ensure the initial aggregate markdown, resolved system prompt, and file injection timestamps are accurately recorded in the session state.
- [ ] Task: Implement UI elements in the Session Hub to expose the aggregated markdown and the active system prompt.
- [ ] Task: Enhance the discussion timeline rendering in `gui_2.py` to visually indicate exactly when files and screenshots were injected into the context.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: Transparent Context Visibility' (Protocol in workflow.md)
## Phase 4: Agent-Focused Session Filtering
- [x] Task: Write tests for the GUI state filtering logic when focusing on a specific agent's session. 038c909
- [x] Task: Relocate the 'Focus Agent' feature from the Operations Hub to the MMA Dashboard. 038c909
- [x] Task: Implement the action to filter the Session and Discussion hubs based on the selected agent's context. 038c909
- [x] Task: Conductor - User Manual Verification 'Phase 4: Agent-Focused Session Filtering' (Protocol in workflow.md) 038c909
- [ ] Task: Write tests for the GUI state filtering logic when focusing on a specific agent's session.
- [ ] Task: Relocate the 'Focus Agent' feature from the Operations Hub to the MMA Dashboard.
- [ ] Task: Implement the action to filter the Session and Discussion hubs based on the selected agent's context.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: Agent-Focused Session Filtering' (Protocol in workflow.md)
@@ -1,16 +0,0 @@
{
"name": "system_context_exposure",
"created": "2026-03-22",
"status": "future",
"priority": "medium",
"affected_files": [
"src/ai_client.py",
"src/gui_2.py",
"src/models.py"
],
"related_tracks": [
"discussion_hub_panel_reorganization (in_progress)",
"aggregation_smarter_summaries (future)"
],
"notes": "Deferred from discussion_hub_panel_reorganization planning. The _SYSTEM_PROMPT in ai_client.py is hidden from users - this exposes it for customization."
}
@@ -1,41 +0,0 @@
# Implementation Plan: System Context Exposure
## Phase 1: Backend Changes [checkpoint: a0fb086]
Focus: Make _SYSTEM_PROMPT configurable
- [x] Task: Audit ai_client.py system prompt flow b654c7c
- [x] Task: Move _SYSTEM_PROMPT to configurable storage 4f1bcea
- [x] Task: Implement load/save of base system prompt 4f1bcea
- [x] Task: Modify _get_combined_system_prompt() to use config 4f1bcea
- [x] Task: Write tests for configurable system prompt 4f1bcea
- [x] Task: Conductor - User Manual Verification 'Phase 1: Backend Changes' a0fb086
## Phase 2: UI Implementation [checkpoint: c3a114d]
Focus: Add base prompt editor to AI Settings
- [x] Task: Add UI controls to _render_system_prompts_panel c74971b
- [x] Task: Implement checkbox for "Use Default Base" c74971b
- [x] Task: Implement collapsible base prompt editor c74971b
- [x] Task: Add "Reset to Default" button c74971b
- [x] Task: Write tests for UI controls c74971b
- [x] Task: Conductor - User Manual Verification 'Phase 2: UI Implementation' c3a114d
## Phase 3: Persistence & Provider Testing [checkpoint: 40db835]
Focus: Ensure persistence and cross-provider compatibility
- [x] Task: Verify base prompt persists across app restarts e24ea60
- [x] Task: Test with Gemini provider e24ea60
- [x] Task: Test with Anthropic provider e24ea60
- [x] Task: Test with DeepSeek provider e24ea60
- [x] Task: Test with Gemini CLI adapter e24ea60
- [x] Task: Conductor - User Manual Verification 'Phase 3: Persistence & Provider Testing' 40db835
## Phase 4: Safety & Defaults [checkpoint: 2441ea6]
Focus: Ensure users can recover from bad edits
- [x] Task: Implement confirmation dialog before saving custom base 68d18f4
- [x] Task: Add validation for empty/invalid prompts 68d18f4
- [x] Task: Document the base prompt purpose in UI 68d18f4
- [x] Task: Add "Show Diff" between default and custom 68d18f4
- [x] Task: Write tests for safety features 68d18f4
- [x] Task: Conductor - User Manual Verification 'Phase 4: Safety & Defaults' 2441ea6
@@ -1,120 +0,0 @@
# Specification: System Context Exposure
## 1. Overview
This track exposes the hidden system prompt from `ai_client.py` to users for customization.
**Current Problem:**
- `_SYSTEM_PROMPT` in `ai_client.py` (lines ~118-143) is hardcoded
- It contains foundational instructions: "You are a helpful coding assistant with access to a PowerShell tool..."
- Users can only see/appending their custom portion via `_custom_system_prompt`
- The base prompt that defines core agent capabilities is invisible
**Goal:**
- Make `_SYSTEM_PROMPT` visible and editable in the UI
- Allow users to customize the foundational agent instructions
- Maintain sensible defaults while enabling expert customization
## 2. Current State Audit
### Hidden System Prompt Location
`src/ai_client.py`:
```python
_SYSTEM_PROMPT: str = (
"You are a helpful coding assistant with access to a PowerShell tool (run_powershell) and MCP tools (file access: read_file, list_directory, search_files, get_file_summary, web access: web_search, fetch_url). "
"When calling file/directory tools, always use the 'path' parameter for the target path. "
...
)
```
### Related State
- `_custom_system_prompt` - user-defined append/injection
- `_get_combined_system_prompt()` - merges both
- `set_custom_system_prompt()` - setter for user portion
### UI Current State
- AI Settings → System Prompts shows global and project prompts
- These are injected as `[USER SYSTEM PROMPT]` after `_SYSTEM_PROMPT`
- But `_SYSTEM_PROMPT` itself is never shown
## 3. Functional Requirements
### 3.1 Base System Prompt Visibility
- Add "Base System Prompt" section in AI Settings
- Display current `_SYSTEM_PROMPT` content
- Allow editing with syntax highlighting (it's markdown text)
### 3.2 Default vs Custom Base
- Maintain default base prompt as reference
- User can reset to default if they mess it up
- Show diff between default and custom
### 3.3 Persistence
- Custom base prompt stored in config or project TOML
- Loaded on app start
- Applied before `_custom_system_prompt` in `_get_combined_system_prompt()`
### 3.4 Provider Considerations
- Some providers handle system prompts differently
- Verify behavior across Gemini, Anthropic, DeepSeek
- May need provider-specific base prompts
## 4. Data Model
### 4.1 Config Storage
```toml
[ai_settings]
base_system_prompt = """..."""
use_default_base = true
```
### 4.2 Combined Prompt Order
1. `_SYSTEM_PROMPT` (or custom base if enabled)
2. `[USER SYSTEM PROMPT]` (from AI Settings global/project)
3. Tooling strategy (from bias engine)
## 5. UI Design
**Location:** AI Settings panel → System Prompts section
```
┌─ System Prompts ──────────────────────────────┐
│ ☑ Use Default Base System Prompt │
│ │
│ Base System Prompt (collapsed by default): │
│ ┌──────────────────────────────────────────┐ │
│ │ You are a helpful coding assistant... │ │
│ └──────────────────────────────────────────┘ │
│ │
│ [Show Editor] [Reset to Default] │
│ │
│ Global System Prompt: │
│ ┌──────────────────────────────────────────┐ │
│ │ [current global prompt content] │ │
│ └──────────────────────────────────────────┘ │
└──────────────────────────────────────────────┘
```
When "Show Editor" clicked:
- Expand to full editor for base prompt
- Syntax highlighting for markdown
- Character count
## 6. Acceptance Criteria
- [ ] `_SYSTEM_PROMPT` visible in AI Settings
- [ ] User can edit base system prompt
- [ ] Changes persist across app restarts
- [ ] "Reset to Default" restores original
- [ ] Provider APIs receive modified prompt correctly
- [ ] No regression in agent behavior with defaults
## 7. Out of Scope
- Changes to actual agent behavior logic
- Changes to tool definitions or availability
- Changes to aggregation or context handling
## 8. Dependencies
- `ai_client.py` - `_SYSTEM_PROMPT` and `_get_combined_system_prompt()`
- `gui_2.py` - AI Settings panel rendering
- `models.py` - Config structures
@@ -1,19 +1,19 @@
# Implementation Plan: Expanded Test Coverage and Stress Testing
## Phase 1: Tool Accessibility and State Unit Tests [checkpoint: 6989b37]
- [x] Task: Review current tool registration and disabling logic in `src/mcp_client.py` and `src/api_hooks.py`.
- [x] Task: Write Tests: Create unit tests in `tests/test_agent_tools_wiring.py` (or similar) to verify turning a tool off removes it from the agent's available tool list. 2666a33
- [x] Task: Implement: If tests fail due to missing logic, update the tool filtering implementation to ensure disabled tools are strictly excluded from the context sent to the provider. 2666a33
- [x] Task: Conductor - User Manual Verification 'Phase 1: Tool Accessibility and State Unit Tests' (Protocol in workflow.md)
## Phase 1: Tool Accessibility and State Unit Tests
- [ ] Task: Review current tool registration and disabling logic in `src/mcp_client.py` and `src/api_hooks.py`.
- [ ] Task: Write Tests: Create unit tests in `tests/test_agent_tools_wiring.py` (or similar) to verify turning a tool off removes it from the agent's available tool list.
- [ ] Task: Implement: If tests fail due to missing logic, update the tool filtering implementation to ensure disabled tools are strictly excluded from the context sent to the provider.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Tool Accessibility and State Unit Tests' (Protocol in workflow.md)
## Phase 2: MMA Agent 'Step Mode' Simulation Tests [checkpoint: b88c796]
- [x] Task: Investigate existing simulation test patterns in `tests/simulation/` and the Hook API coverage for Step Mode.
- [x] Task: Write Tests: Create a new simulation test (`tests/test_mma_step_mode_sim.py`) that initializes an MMA track and specifically forces 'Step Mode' via API hooks. 9f67a31
- [x] Task: Implement/Refine: Ensure the simulation script correctly waits for and manually approves task transitions, validating that the execution engine pauses appropriately between steps. 7fdf6c9
- [x] Task: Conductor - User Manual Verification 'Phase 2: MMA Agent Step Mode Simulation Tests' (Protocol in workflow.md)
## Phase 2: MMA Agent 'Step Mode' Simulation Tests
- [ ] Task: Investigate existing simulation test patterns in `tests/simulation/` and the Hook API coverage for Step Mode.
- [ ] Task: Write Tests: Create a new simulation test (`tests/test_mma_step_mode_sim.py`) that initializes an MMA track and specifically forces 'Step Mode' via API hooks.
- [ ] Task: Implement/Refine: Ensure the simulation script correctly waits for and manually approves task transitions, validating that the execution engine pauses appropriately between steps.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: MMA Agent Step Mode Simulation Tests' (Protocol in workflow.md)
## Phase 3: Multi-Epic and Advanced DAG Stress Tests [checkpoint: 9566012]
- [x] Task: Analyze the DAG execution engine (`src/dag_engine.py` and `src/multi_agent_conductor.py`) for handling multiple concurrent tracks/epics.
- [x] Task: Write Tests: Create an integration/simulation test that loads two or more complex tracks with interconnected dependencies simultaneously. 9f67a31
- [x] Task: Implement/Refine: Stress test the system by allowing the agent pool to execute these concurrent DAGs. Verify that blocked statuses propagate correctly and that the orchestrator does not deadlock or crash. 6b18474
- [x] Task: Conductor - User Manual Verification 'Phase 3: Multi-Epic and Advanced DAG Stress Tests' (Protocol in workflow.md)
## Phase 3: Multi-Epic and Advanced DAG Stress Tests
- [ ] Task: Analyze the DAG execution engine (`src/dag_engine.py` and `src/multi_agent_conductor.py`) for handling multiple concurrent tracks/epics.
- [ ] Task: Write Tests: Create an integration/simulation test that loads two or more complex tracks with interconnected dependencies simultaneously.
- [ ] Task: Implement/Refine: Stress test the system by allowing the agent pool to execute these concurrent DAGs. Verify that blocked statuses propagate correctly and that the orchestrator does not deadlock or crash.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: Multi-Epic and Advanced DAG Stress Tests' (Protocol in workflow.md)
@@ -1,29 +1,29 @@
# Implementation Plan: Advanced Text Viewer with Syntax Highlighting
## Phase 1: State & Interface Update
- [x] Task: Audit `src/gui_2.py` to ensure all `text_viewer_*` state variables are explicitly initialized in `App.__init__`. e28af48
- [x] Task: Implement: Update `App.__init__` to initialize `self.show_text_viewer`, `self.text_viewer_title`, `self.text_viewer_content`, and new `self.text_viewer_type` (defaulting to "text"). e28af48
- [x] Task: Implement: Update `self.text_viewer_wrap` (defaulting to True) to allow independent word wrap. e28af48
- [x] Task: Implement: Update `_render_text_viewer(self, label: str, content: str, text_type: str = "text")` signature and caller usage. e28af48
- [x] Task: Conductor - User Manual Verification 'Phase 1: State & Interface Update' (Protocol in workflow.md) e28af48
- [ ] Task: Audit `src/gui_2.py` to ensure all `text_viewer_*` state variables are explicitly initialized in `App.__init__`.
- [ ] Task: Implement: Update `App.__init__` to initialize `self.show_text_viewer`, `self.text_viewer_title`, `self.text_viewer_content`, and new `self.text_viewer_type` (defaulting to "text").
- [ ] Task: Implement: Update `self.text_viewer_wrap` (defaulting to True) to allow independent word wrap.
- [ ] Task: Implement: Update `_render_text_viewer(self, label: str, content: str, text_type: str = "text")` signature and caller usage.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: State & Interface Update' (Protocol in workflow.md)
## Phase 2: Core Rendering Logic (Code & MD)
- [x] Task: Write Tests: Create a simulation test in `tests/test_gui_text_viewer.py` to verify the viewer opens and switches rendering paths based on `text_type`. a91b8dc
- [x] Task: Implement: In `src/gui_2.py`, refactor the text viewer window loop to: a91b8dc
- Use `MarkdownRenderer.render` if `text_type == "markdown"`. a91b8dc
- Use a cached `ImGuiColorTextEdit.TextEditor` if `text_type` matches a code language. a91b8dc
- Fallback to `imgui.input_text_multiline` for plain text. a91b8dc
- [x] Task: Implement: Ensure the `TextEditor` instance is properly cached using a unique key for the text viewer to maintain state. a91b8dc
- [x] Task: Conductor - User Manual Verification 'Phase 2: Core Rendering Logic' (Protocol in workflow.md) a91b8dc
- [ ] Task: Write Tests: Create a simulation test in `tests/test_gui_text_viewer.py` to verify the viewer opens and switches rendering paths based on `text_type`.
- [ ] Task: Implement: In `src/gui_2.py`, refactor the text viewer window loop to:
- Use `MarkdownRenderer.render` if `text_type == "markdown"`.
- Use a cached `ImGuiColorTextEdit.TextEditor` if `text_type` matches a code language.
- Fallback to `imgui.input_text_multiline` for plain text.
- [ ] Task: Implement: Ensure the `TextEditor` instance is properly cached using a unique key for the text viewer to maintain state.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: Core Rendering Logic' (Protocol in workflow.md)
## Phase 3: UI Features (Copy, Line Numbers, Wrap)
- [x] Task: Write Tests: Update `tests/test_gui_text_viewer.py` to verify the copy-to-clipboard functionality and word wrap toggle. a91b8dc
- [x] Task: Implement: Add a "Copy" button to the text viewer title bar or a small toolbar at the top of the window. a91b8dc
- [x] Task: Implement: Add a "Word Wrap" checkbox inside the text viewer window. a91b8dc
- [x] Task: Implement: Configure the `TextEditor` instance to show line numbers and be read-only. a91b8dc
- [x] Task: Conductor - User Manual Verification 'Phase 3: UI Features' (Protocol in workflow.md) a91b8dc
- [ ] Task: Write Tests: Update `tests/test_gui_text_viewer.py` to verify the copy-to-clipboard functionality and word wrap toggle.
- [ ] Task: Implement: Add a "Copy" button to the text viewer title bar or a small toolbar at the top of the window.
- [ ] Task: Implement: Add a "Word Wrap" checkbox inside the text viewer window.
- [ ] Task: Implement: Configure the `TextEditor` instance to show line numbers and be read-only.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: UI Features' (Protocol in workflow.md)
## Phase 4: Integration & Rollout
- [x] Task: Implement: Update all existing calls to `_render_text_viewer` in `src/gui_2.py` (e.g., in `_render_files_panel`, `_render_tool_calls_panel`) to pass the correct `text_type` based on file extension or content. 2826ad5
- [x] Task: Implement: Add "Markdown Preview" support for system prompt presets using the new text viewer logic. 2826ad5
- [x] Task: Conductor - User Manual Verification 'Phase 4: Integration & Rollout' (Protocol in workflow.md) 2826ad5
- [ ] Task: Implement: Update all existing calls to `_render_text_viewer` in `src/gui_2.py` (e.g., in `_render_files_panel`, `_render_tool_calls_panel`) to pass the correct `text_type` based on file extension or content.
- [ ] Task: Implement: Add "Markdown Preview" support for system prompt presets using the new text viewer logic.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: Integration & Rollout' (Protocol in workflow.md)
@@ -1,23 +1,26 @@
# Implementation Plan: Rich Thinking Trace Handling
## Status: COMPLETE (2026-03-14)
## Phase 1: Core Parsing & Model Update
- [ ] Task: Audit `src/models.py` and `src/project_manager.py` to identify current message serialization schemas.
- [ ] Task: Write Tests: Verify that raw AI responses with `<thinking>`, `<thought>`, and `Thinking:` markers are correctly parsed into segmented data structures (Thinking vs. Response).
- [ ] Task: Implement: Add `ThinkingSegment` model and update `ChatMessage` schema in `src/models.py` to support optional thinking traces.
- [ ] Task: Implement: Update parsing logic in `src/ai_client.py` or a dedicated utility to extract segments from raw provider responses.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Core Parsing & Model Update' (Protocol in workflow.md)
## Summary
Implemented thinking trace parsing, model, persistence, and GUI rendering for AI responses containing `<thinking>`, `<thought>`, and `Thinking:` markers.
## Phase 2: Persistence & History Integration
- [ ] Task: Write Tests: Verify that `ProjectManager` correctly serializes and deserializes messages with thinking segments to/from TOML history files.
- [ ] Task: Implement: Update `src/project_manager.py` to handle the new `ChatMessage` schema during session save/load.
- [ ] Task: Implement: Ensure `src/aggregate.py` or relevant context builders include thinking traces in the "Discussion History" sent back to the AI.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: Persistence & History Integration' (Protocol in workflow.md)
## Files Created/Modified:
- `src/thinking_parser.py` - Parser for thinking traces
- `src/models.py` - ThinkingSegment model
- `src/gui_2.py` - _render_thinking_trace helper + integration
- `tests/test_thinking_trace.py` - 7 parsing tests
- `tests/test_thinking_persistence.py` - 4 persistence tests
- `tests/test_thinking_gui.py` - 4 GUI tests
## Phase 3: GUI Rendering - Comms & Discussion
- [ ] Task: Write Tests: Verify the GUI rendering logic correctly handles messages with and without thinking segments.
- [ ] Task: Implement: Create a reusable `_render_thinking_trace` helper in `src/gui_2.py` using a collapsible header (e.g., `imgui.collapsing_header`).
- [ ] Task: Implement: Integrate the thinking trace renderer into the **Comms History** panel in `src/gui_2.py`.
- [ ] Task: Implement: Integrate the thinking trace renderer into the **Discussion Hub** message loop in `src/gui_2.py`.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: GUI Rendering - Comms & Discussion' (Protocol in workflow.md)
## Implementation Details:
- **Parser**: Extracts thinking segments from `<thinking>`, `<thought>`, `Thinking:` markers
- **Model**: `ThinkingSegment` dataclass with content and marker fields
- **GUI**: `_render_thinking_trace` with collapsible "Monologue" header
- **Styling**: Tinted background (dark brown), gold/amber text
- **Indicator**: Existing "THINKING..." in Discussion Hub
## Total Tests: 15 passing
## Phase 4: Final Polish & Theming
- [ ] Task: Implement: Apply specialized styling (e.g., tinted background or italicized text) to expanded thinking traces to distinguish them from direct responses.
- [ ] Task: Implement: Ensure thinking trace headers show a "Calculating..." or "Monologue" indicator while an agent is active.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: Final Polish & Theming' (Protocol in workflow.md)
@@ -3,66 +3,98 @@
## Overview
Add tree-sitter-based C and C++ parsing to mcp_client with skeleton and outline tools.
## Phase 1: Dependencies [checkpoint: 1f86c62]
## Phase 1: Dependencies
Focus: Add tree-sitter C/C++ grammars
- [x] Task 1.1: Add tree-sitter-c and tree-sitter-cpp to pyproject.toml 568c549
- [ ] Task 1.1: Add tree-sitter-c and tree-sitter-cpp to pyproject.toml
- WHERE: pyproject.toml:16-17
- WHAT: Add `"tree-sitter-c>=0.23.0", "tree-sitter-cpp>=0.3.0"` to dependencies
- HOW: Edit dependencies array
- SAFETY: No breaking changes
## Phase 2: ASTParser Extensions [checkpoint: 7bc4642]
## Phase 2: ASTParser Extensions
Focus: Extend ASTParser to support C/C++ languages
- [x] Task 2.1: Modify ASTParser.__init__ to accept "c" and "cpp" languages c025ebc
- [ ] Task 2.1: Modify ASTParser.__init__ to accept "c" and "cpp" languages
- WHERE: src/file_cache.py:22-28
- WHAT: Add language loading for tree-sitter-c and tree-sitter-cpp
- HOW: Import tree_sitter_c, tree_sitter_cpp; load Language(tree_sitter_c.language()) etc.
- SAFETY: Maintain existing Python support
- [x] Task 2.2: Implement C skeleton extraction d3cd7cf
- [x] Task 2.3: Implement C++ skeleton extraction d3cd7cf
- [x] Task 2.4: Implement code outline for C and C++ d3cd7cf
- [ ] Task 2.2: Implement C skeleton extraction
- WHERE: src/file_cache.py (new method or extend get_skeleton)
- WHAT: Extract function_definition, struct_specifier, enum_specifier, typedef, union_specifier
- HOW: Tree-sitter node traversal similar to Python pattern
- SAFETY: New method, no modifications to existing
- [ ] Task 2.3: Implement C++ skeleton extraction
- WHERE: src/file_cache.py
- WHAT: Add class_specifier, template_declaration, access_specifier, namespace_specifier
- HOW: Extend C skeleton logic with C++ specific nodes
- SAFETY: New method
- [ ] Task 2.4: Implement code outline for C and C++
- WHERE: src/file_cache.py
- WHAT: Return hierarchical structure with line ranges (matching py_get_code_outline format)
- HOW: Similar to Python get_code_outline pattern
- SAFETY: New method
## Phase 3: MCP Tool Integration
Focus: Add tools to mcp_client dispatch
- [x] Task 3.1: Add ts_c_get_skeleton tool 0db41ef
- [x] Task 3.2: Add ts_cpp_get_skeleton tool 0db41ef
- [x] Task 3.3: Add ts_c_get_code_outline tool 0db41ef
- [x] Task 3.4: Add ts_cpp_get_code_outline tool 0db41ef
- [x] Task 3.5: Register tools in get_tool_schemas 0db41ef
- [ ] Task 3.1: Add ts_c_get_skeleton tool
- WHERE: src/mcp_client.py (add function and register)
- WHAT: Tool that calls file_cache ASTParser for C skeleton
- HOW: Follow py_get_skeleton pattern
- SAFETY: New tool, no modifications to existing
## Phase 4: Tests [checkpoint: 4f08677]
- [ ] Task 3.2: Add ts_cpp_get_skeleton tool
- WHERE: src/mcp_client.py
- WHAT: Tool that calls file_cache ASTParser for C++ skeleton
- HOW: Same as above with cpp language
- SAFETY: New tool
- [ ] Task 3.3: Add ts_c_get_code_outline tool
- WHERE: src/mcp_client.py
- WHAT: Tool that calls file_cache for C code outline
- HOW: Follow py_get_code_outline pattern
- SAFETY: New tool
- [ ] Task 3.4: Add ts_cpp_get_code_outline tool
- WHERE: src/mcp_client.py
- WHAT: Tool that calls file_cache for C++ code outline
- HOW: Same as above with cpp language
- SAFETY: New tool
- [ ] Task 3.5: Register tools in get_tool_schemas
- WHERE: src/mcp_client.py:998-1000
- WHAT: Add schemas for all 4 new tools
- HOW: Append to MCP_TOOL_SPECS list
- SAFETY: Append only
## Phase 4: Tests
Focus: Verify C/C++ tools work correctly
- [x] Task 4.1: Write tests for ts_c_get_skeleton 3bb850a
- [x] Task 4.2: Write tests for ts_cpp_get_skeleton 3bb850a
- [x] Task 4.3: Write tests for code outline tools 3bb850a
- [x] Task 4.4: Integration test - verify tools dispatch correctly 3bb850a
- [ ] Task 4.1: Write tests for ts_c_get_skeleton
- WHERE: tests/test_ts_c_tools.py (new file)
- WHAT: Test C skeleton extraction on sample C code
- HOW: Use pytest with sample C file content
- SAFETY: New test file
## Phase 5: Parity with Python Tools [checkpoint: 2e43b45]
Focus: Implement definitions, signatures, and update tools
- [ ] Task 4.2: Write tests for ts_cpp_get_skeleton
- WHERE: tests/test_ts_cpp_tools.py (new file)
- WHAT: Test C++ skeleton extraction on sample C++ code
- HOW: Use pytest with sample C++ code
- SAFETY: New test file
- [x] Task 5.1: Implement get_definition for C and C++ 799feb0
- [x] Task 5.2: Implement get_signature for C and C++ 799feb0
- [ ] Task 4.3: Write tests for code outline tools
- WHERE: tests/test_ts_c_tools.py / test_ts_cpp_tools.py
- WHAT: Test line range extraction
- HOW: Assert correct line numbers
- SAFETY: New tests
- [x] Task 5.3: Implement update_definition for C and C++ 8642d89
- WHERE: src/mcp_client.py
- WHAT: Implement `ts_c_update_definition` and `ts_cpp_update_definition`
- HOW: Use AST to find target range; surgically replace lines in file
- SAFETY: Verify AST still parses after edit (optional but recommended)
- [x] Task 5.4: Register Phase 5 tools in dispatch and schema 4e8b397
- WHERE: src/mcp_client.py
- WHAT: Add tools to `get_tool_schemas` and `dispatch`
- HOW: Standard registration pattern
## Phase 6: Robust Testing with gencpp [checkpoint: 992e206]
Focus: Verify against real-world C++ components
- [x] Task 6.1: Define test corpus from gencpp samples 992e206
- [x] Task 6.2: Run exhaustive skeleton/outline tests on gencpp corpus 992e206
- [x] Task 6.3: Verify surgical updates on gencpp components 992e206
- [x] Task 6.4: Final audit and track closure 992e206
- [ ] Task 4.4: Integration test - verify tools dispatch correctly
- WHERE: tests/test_mcp_client.py
- WHAT: Test dispatch of ts_c_* and ts_cpp_* tools
- HOW: Mock file_cache, verify correct function called
- SAFETY: Additive test
@@ -16,7 +16,6 @@ Add tree-sitter-based C and C++ parsing support to the MCP client, providing ske
- No C/C++ tree-sitter grammars installed
- No C/C++ parsing logic in ASTParser
- No MCP tools for C/C++ code extraction
- No MCP tools for C/C++ code modification (Parity with Python `py_update_definition`)
## Goals
@@ -24,9 +23,7 @@ Add tree-sitter-based C and C++ parsing support to the MCP client, providing ske
2. Extend ASTParser to support C and C++ languages
3. Implement skeleton and outline generation for C/C++ (functions, structs, enums, classes)
4. Add MCP tools: `ts_c_get_skeleton`, `ts_cpp_get_skeleton`, `ts_c_get_code_outline`, `ts_cpp_get_code_outline`
5. Implement definition, signature, and update tools for C/C++ parity
6. Register tools in mcp_client dispatch
7. Validate tools against real-world C++ components from `gencpp` repository
5. Register tools in mcp_client dispatch
## Functional Requirements
@@ -48,19 +45,11 @@ Add tree-sitter-based C and C++ parsing support to the MCP client, providing ske
| `ts_cpp_get_skeleton` | Returns C++ file skeleton (above + class methods, templates, namespaces) |
| `ts_c_get_code_outline` | Returns hierarchical C outline (functions, structs, enums, globals with line ranges) |
| `ts_cpp_get_code_outline` | Returns hierarchical C++ outline (above + classes, templates, namespaces) |
| `ts_c_get_definition` | Get full source code of a specific C function or struct |
| `ts_cpp_get_definition` | Get full source code of a specific C++ class, function, or method |
| `ts_c_get_signature` | Get only the signature part of a C function |
| `ts_cpp_get_signature` | Get only the signature part of a C++ function or method |
| `ts_c_update_definition` | Surgically replace the definition of a C function or struct |
| `ts_cpp_update_definition` | Surgically replace the definition of a C++ class, function, or method |
### Tool Output Format
Match existing Python tool formats for consistency:
- Skeleton: signatures + docstrings, bodies replaced with `...`
- Outline: hierarchical list with `[Class] name (Lines X-Y)` format
- Definition: raw source code of the identified range
- Signature: code from start of definition until start of block/semicolon
## Non-Functional Requirements
@@ -79,4 +68,5 @@ Match existing Python tool formats for consistency:
- Cross-file symbol resolution (AI uses search tools for this)
- Template instantiation analysis
- Macro expansion
- gencpp integration (orchestrating gencpp logic itself is a separate track)
- gencpp integration (future separate track)
- Writing to C/C++ files (read-only for now)
@@ -2,29 +2,28 @@
This plan implements a robust undo/redo system focusing on text inputs, control states, and discussion structure.
## Phase 1: History Core Logic & State Management [checkpoint: 9a699a5]
- [x] Task: Design and implement a generic `HistoryManager` class to handle undo/redo stacks and state snapshots. 7743b15
- [x] Task: Write failing tests for the `HistoryManager` core logic, including capacity limits and basic undo/redo functionality. 7743b15
- [x] Task: Implement `HistoryManager` to pass tests, ensuring it correctly manages a fixed stack of 50-100 actions. 7743b15
- [x] Task: Conductor - User Manual Verification 'Phase 1: History Core Logic & State Management' (Protocol in workflow.md) 9a699a5
## Phase 1: History Core Logic & State Management
- [ ] Task: Design and implement a generic `HistoryManager` class to handle undo/redo stacks and state snapshots.
- [ ] Task: Write failing tests for the `HistoryManager` core logic, including capacity limits and basic undo/redo functionality.
- [ ] Task: Implement `HistoryManager` to pass tests, ensuring it correctly manages a fixed stack of 50-100 actions.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: History Core Logic & State Management' (Protocol in workflow.md)
## Phase 2: Text Input & Control Undo/Redo [checkpoint: a02849b]
- [x] Task: Integrate `HistoryManager` with `src/gui_2.py` for system prompt and discussion entry text fields. 095368b
- [x] Task: Implement state snapshots for AI model parameter sliders (Temperature, Top-P) and checkboxes. 095368b
- [x] Task: Write simulation tests using `live_gui` to verify undo/redo for text edits and control changes. 095368b
- [x] Task: Conductor - User Manual Verification 'Phase 2: Text Input & Control Undo/Redo' (Protocol in workflow.md) a02849b
## Phase 2: Text Input & Control Undo/Redo
- [ ] Task: Integrate `HistoryManager` with `src/gui_2.py` for system prompt and discussion entry text fields.
- [ ] Task: Implement state snapshots for AI model parameter sliders (Temperature, Top-P) and checkboxes.
- [ ] Task: Write simulation tests using `live_gui` to verify undo/redo for text edits and control changes.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: Text Input & Control Undo/Redo' (Protocol in workflow.md)
## Phase 3: Discussion & Context Structure Mutation [checkpoint: 0a5b90e]
- [x] Task: Implement undo/redo for adding, deleting, and reordering discussion entries in `src/app_controller.py`. 095368b
- [x] Task: Extend the history system to track context file and screenshot additions/removals in `src/aggregate.py`. 095368b
- [x] Task: Write failing tests for reverting and redoing complex discussion tree mutations. 095368b
- [x] Task: Implement mutation tracking and restoration logic to pass tests. 095368b
- [x] Task: Conductor - User Manual Verification 'Phase 3: Discussion & Context Structure Mutation' (Protocol in workflow.md) 0a5b90e
## Phase 4: UI Features - Hotkeys & History List [checkpoint: 446a587]
- [x] Task: Implement global hotkey handling for `Ctrl+Z` and `Ctrl+Y` / `Ctrl+Shift+Z` in the main GUI loop. 095368b
- [x] Task: Create a dedicated 'History List' panel in `src/gui_2.py` showing a scrollable list of recent actions. a3d7376
- [x] Task: Implement functionality to jump to a specific historical state via the History List. a3d7376
- [x] Task: Write final integration tests for the full undo/redo cycle across all supported areas. 7bed4a8
- [x] Task: Conductor - User Manual Verification 'Phase 4: UI Features - Hotkeys & History List' (Protocol in workflow.md) 446a587
## Phase 3: Discussion & Context Structure Mutation
- [ ] Task: Implement undo/redo for adding, deleting, and reordering discussion entries in `src/app_controller.py`.
- [ ] Task: Extend the history system to track context file and screenshot additions/removals in `src/aggregate.py`.
- [ ] Task: Write failing tests for reverting and redoing complex discussion tree mutations.
- [ ] Task: Implement mutation tracking and restoration logic to pass tests.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: Discussion & Context Structure Mutation' (Protocol in workflow.md)
## Phase 4: UI Features - Hotkeys & History List
- [ ] Task: Implement global hotkey handling for `Ctrl+Z` and `Ctrl+Y` / `Ctrl+Shift+Z` in the main GUI loop.
- [ ] Task: Create a dedicated 'History List' panel in `src/gui_2.py` showing a scrollable list of recent actions.
- [ ] Task: Implement functionality to jump to a specific historical state via the History List.
- [ ] Task: Write final integration tests for the full undo/redo cycle across all supported areas.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: UI Features - Hotkeys & History List' (Protocol in workflow.md)
@@ -1,25 +1,25 @@
# Implementation Plan: Advanced Workspace Docking & Layout Profiles
## Phase 1: Data Model & Persistence Engine
- [x] Task: Create a `WorkspaceProfile` dataclass in `src/models.py` to store INI string, `show_windows` dict, and panel states. [9840035]
- [x] Task: Implement `WorkspaceManager` (similar to `PresetManager`) to handle saving/loading profiles from `config.toml` and `project.toml`. [5b3173a]
- [x] Task: Write Tests: Verify the manager correctly merges global and project profiles and serializes the ImGui INI string properly. [b7ba7a1]
- [x] Task: Conductor - User Manual Verification 'Phase 1: Data Model & Persistence Engine' (Protocol in workflow.md)
- [ ] Task: Create a `WorkspaceProfile` dataclass in `src/models.py` to store INI string, `show_windows` dict, and panel states.
- [ ] Task: Implement `WorkspaceManager` (similar to `PresetManager`) to handle saving/loading profiles from `config.toml` and `project.toml`.
- [ ] Task: Write Tests: Verify the manager correctly merges global and project profiles and serializes the ImGui INI string properly.
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Data Model & Persistence Engine' (Protocol in workflow.md)
## Phase 2: ImGui State Extraction & Restoration
- [x] Task: Implement methods in `src/gui_2.py` (or a helper module) to safely capture the current ImGui layout (`imgui.save_ini_settings_to_memory()`). [eab1945]
- [x] Task: Implement methods to safely restore layout (`imgui.load_ini_settings_from_memory()`) and apply the associated `show_windows` state. [eab1945]
- [x] Task: Write Tests: Verify using `live_gui` that saving a layout and loading it back does not cause crashes or assertion failures in the ImGui render loop. [41bc8bb]
- [x] Task: Conductor - User Manual Verification 'Phase 2: ImGui State Extraction & Restoration' (Protocol in workflow.md)
- [ ] Task: Implement methods in `src/gui_2.py` (or a helper module) to safely capture the current ImGui layout (`imgui.save_ini_settings_to_memory()`).
- [ ] Task: Implement methods to safely restore layout (`imgui.load_ini_settings_from_memory()`) and apply the associated `show_windows` state.
- [ ] Task: Write Tests: Verify using `live_gui` that saving a layout and loading it back does not cause crashes or assertion failures in the ImGui render loop.
- [ ] Task: Conductor - User Manual Verification 'Phase 2: ImGui State Extraction & Restoration' (Protocol in workflow.md)
## Phase 3: GUI Menu Integration [checkpoint: f22265b]
- [x] Task: Add a "Layout Profiles" menu under the main "Windows" or "View" menu bar in `src/gui_2.py`. [ded9f38]
- [x] Task: Implement "Save Current Layout" modal (prompting for name and scope: Global/Project). [ded9f38]
- [x] Task: Populate the menu with a dynamically generated list of available profiles to load. [ded9f38]
- [x] Task: Conductor - User Manual Verification 'Phase 3: GUI Menu Integration' (Protocol in workflow.md)
## Phase 3: GUI Menu Integration
- [ ] Task: Add a "Layout Profiles" menu under the main "Windows" or "View" menu bar in `src/gui_2.py`.
- [ ] Task: Implement "Save Current Layout" modal (prompting for name and scope: Global/Project).
- [ ] Task: Populate the menu with a dynamically generated list of available profiles to load.
- [ ] Task: Conductor - User Manual Verification 'Phase 3: GUI Menu Integration' (Protocol in workflow.md)
## Phase 4: Contextual Auto-Switch (Experimental) [checkpoint: 470b7b2]
- [x] Task: Add UI in "AI Settings" or "Operations Hub" to enable "Experimental: Auto-switch layout by Tier". [ecc5a66]
- [x] Task: Add UI to bind specific profiles to Tiers 1 through 4. [ecc5a66]
- [x] Task: Implement the event hook in `AppController` so that when the `active_tier` changes, the bound profile is automatically loaded if the feature is enabled. [ecc5a66]
- [x] Task: Conductor - User Manual Verification 'Phase 4: Contextual Auto-Switch (Experimental)' (Protocol in workflow.md) [470b7b2]
## Phase 4: Contextual Auto-Switch (Experimental)
- [ ] Task: Add UI in "AI Settings" or "Operations Hub" to enable "Experimental: Auto-switch layout by Tier".
- [ ] Task: Add UI to bind specific profiles to Tiers 1 through 4.
- [ ] Task: Implement the event hook in `AppController` so that when the `active_tier` changes, the bound profile is automatically loaded if the feature is enabled.
- [ ] Task: Conductor - User Manual Verification 'Phase 4: Contextual Auto-Switch (Experimental)' (Protocol in workflow.md)
+22 -26
View File
@@ -7,14 +7,17 @@ max_tokens = 32000
history_trunc_limit = 900000
active_preset = "Default"
system_prompt = ""
base_system_prompt = ""
use_default_base_prompt = false
[projects]
paths = [
"C:/projects/gencpp/.ai/gencpp_sloppy.toml",
"C:/projects/gencpp/gencpp_sloppy.toml",
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_livecontextsim.toml",
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_liveaisettingssim.toml",
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_livetoolssim.toml",
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_liveexecutionsim.toml",
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_project.toml",
]
active = "C:/projects/gencpp/.ai/gencpp_sloppy.toml"
active = "C:/projects/gencpp/gencpp_sloppy.toml"
[gui]
separate_message_panel = false
@@ -23,20 +26,20 @@ separate_tool_calls_panel = false
bg_shader_enabled = false
crt_filter_enabled = false
separate_task_dag = false
separate_usage_analytics = false
separate_usage_analytics = true
separate_tier1 = false
separate_tier2 = false
separate_tier3 = false
separate_tier4 = false
separate_external_tools = false
separate_external_tools = true
[gui.show_windows]
"Project Settings" = true
"Context Hub" = true
"Files & Media" = true
"AI Settings" = true
"MMA Dashboard" = true
"Task DAG" = false
"Usage Analytics" = false
"Usage Analytics" = true
"Tier 1" = false
"Tier 2" = false
"Tier 3" = false
@@ -48,22 +51,25 @@ separate_external_tools = false
"Discussion Hub" = true
"Operations Hub" = true
Message = false
Response = false
"Tool Calls" = false
Theme = false
Response = true
"Tool Calls" = true
Theme = true
"Log Management" = true
Diagnostics = false
"External Tools" = false
"Shader Editor" = false
"Undo/Redo History" = true
"Shader Editor" = true
[theme]
palette = "Nord Dark"
font_path = "fonts/Inter-Regular.ttf"
font_path = "C:/projects/manual_slop/assets/fonts/MapleMono-Regular.ttf"
font_size = 16.0
scale = 1.0
transparency = 1.0
child_transparency = 1.0
transparency = 0.699999988079071
child_transparency = 0.6899999976158142
frosted_blur_radius = 29.68400001525879
frosted_tint_intensity = 0.5659999847412109
frosted_opacity = 0.5389999747276306
frosted_glass_enabled = true
[mma]
max_workers = 4
@@ -75,13 +81,3 @@ api_key = "test-secret-key"
conductor_dir = "C:\\projects\\gencpp\\.ai\\conductor"
logs_dir = "C:\\projects\\manual_slop\\logs"
scripts_dir = "C:\\projects\\manual_slop\\scripts"
[rag]
enabled = false
embedding_provider = "gemini"
chunk_size = 1000
chunk_overlap = 200
[rag.vector_store]
provider = "mock"
collection_name = "manual_slop"
+76 -124
View File
@@ -44,20 +44,19 @@ Collapsed=0
DockId=0x00000005,0
[Window][Message]
Pos=475,163
Size=327,652
Pos=661,1321
Size=716,455
Collapsed=0
[Window][Response]
Pos=447,143
Size=1442,1129
Pos=2437,925
Size=1111,773
Collapsed=0
[Window][Tool Calls]
Pos=1028,1668
Size=1397,340
Pos=1039,464
Size=587,510
Collapsed=0
DockId=0x0000000E,0
[Window][Comms History]
ViewportPos=43,95
@@ -74,10 +73,10 @@ Collapsed=0
DockId=0xAFC85805,2
[Window][Theme]
Pos=0,975
Size=1010,730
Pos=2671,24
Size=1169,2136
Collapsed=0
DockId=0x00000007,0
DockId=0x00000002,1
[Window][Text Viewer - Entry #7]
Pos=379,324
@@ -85,15 +84,16 @@ Size=900,700
Collapsed=0
[Window][Diagnostics]
Pos=1945,734
Size=1211,713
Pos=1649,24
Size=580,1284
Collapsed=0
DockId=0x00000004,2
[Window][Context Hub]
Pos=0,975
Size=1010,730
Pos=0,1719
Size=999,441
Collapsed=0
DockId=0x00000007,0
DockId=0x00000006,0
[Window][AI Settings Hub]
Pos=406,17
@@ -102,26 +102,26 @@ Collapsed=0
DockId=0x0000000D,0
[Window][Discussion Hub]
Pos=87,24
Size=1593,1176
Pos=1762,24
Size=907,2136
Collapsed=0
DockId=0x00000006,0
DockId=0x00000011,0
[Window][Operations Hub]
Pos=0,24
Size=85,1176
Pos=1001,24
Size=759,2136
Collapsed=0
DockId=0x00000005,2
DockId=0x00000010,0
[Window][Files & Media]
Pos=87,24
Size=1593,1176
Pos=0,1719
Size=999,441
Collapsed=0
DockId=0x00000006,1
[Window][AI Settings]
Pos=0,24
Size=85,1176
Size=999,1693
Collapsed=0
DockId=0x00000005,0
@@ -131,16 +131,16 @@ Size=416,325
Collapsed=0
[Window][MMA Dashboard]
Pos=87,24
Size=1593,1176
Pos=2671,24
Size=1169,2136
Collapsed=0
DockId=0x00000006,2
DockId=0x00000002,0
[Window][Log Management]
Pos=87,24
Size=1593,1176
Pos=1931,24
Size=629,1416
Collapsed=0
DockId=0x00000006,3
DockId=0x00000002,1
[Window][Track Proposal]
Pos=709,326
@@ -166,7 +166,7 @@ Collapsed=0
Pos=2822,1717
Size=1018,420
Collapsed=0
DockId=0x0000000C,0
DockId=0x00000004,0
[Window][Approve PowerShell Command]
Pos=649,435
@@ -174,8 +174,8 @@ Size=381,329
Collapsed=0
[Window][Last Script Output]
Pos=310,166
Size=1085,1154
Pos=2810,265
Size=800,562
Collapsed=0
[Window][Text Viewer - Log Entry #1 (request)]
@@ -189,7 +189,7 @@ Size=1005,366
Collapsed=0
[Window][Text Viewer - Entry #11]
Pos=1010,564
Pos=60,60
Size=1529,925
Collapsed=0
@@ -209,7 +209,7 @@ Size=3840,32
Collapsed=0
[Window][Text Viewer - message]
Pos=568,1226
Pos=562,588
Size=900,700
Collapsed=0
@@ -219,13 +219,13 @@ Size=900,700
Collapsed=0
[Window][Text Viewer - text]
Pos=1297,550
Pos=555,644
Size=900,700
Collapsed=0
[Window][Text Viewer - system]
Pos=-1,263
Size=876,536
Pos=377,705
Size=900,340
Collapsed=0
[Window][Text Viewer - Entry #15]
@@ -239,8 +239,8 @@ Size=900,700
Collapsed=0
[Window][Text Viewer - tool_calls]
Pos=1106,942
Size=831,482
Pos=589,490
Size=900,700
Collapsed=0
[Window][Text Viewer - Tool Script #1]
@@ -284,7 +284,7 @@ Size=900,700
Collapsed=0
[Window][Text Viewer - Tool Call #1 Details]
Pos=963,716
Pos=165,1081
Size=727,725
Collapsed=0
@@ -329,18 +329,17 @@ Size=967,499
Collapsed=0
[Window][Usage Analytics]
Pos=518,24
Size=1162,1176
Pos=1702,689
Size=566,438
Collapsed=0
DockId=0x0000000F,0
[Window][Tool Preset Manager]
Pos=110,83
Pos=1301,302
Size=1469,1267
Collapsed=0
[Window][Persona Editor]
Pos=332,138
Pos=909,391
Size=1886,1234
Collapsed=0
@@ -365,7 +364,7 @@ Size=900,700
Collapsed=0
[Window][Text Viewer - Entry #4]
Pos=1165,782
Pos=828,397
Size=900,700
Collapsed=0
@@ -375,54 +374,16 @@ Size=1593,1240
Collapsed=0
[Window][Text Viewer - Entry #5]
Pos=989,778
Size=1366,1032
Collapsed=0
[Window][Shader Editor]
Pos=457,710
Size=573,280
Collapsed=0
[Window][Text Viewer - list_directory]
Pos=1376,796
Size=882,656
Collapsed=0
[Window][Text Viewer - Last Output]
Pos=60,60
Size=900,700
Collapsed=0
[Window][Text Viewer - Entry #2]
Pos=1518,488
Size=900,700
[Window][Shader Editor]
Pos=753,637
Size=493,487
Collapsed=0
[Window][Session Hub]
Pos=1163,24
Size=1234,1542
Collapsed=0
DockId=0x00000006,1
[Window][Project Settings]
Pos=0,24
Size=85,1176
Collapsed=0
DockId=0x00000005,1
[Window][Undo/Redo History]
Pos=1268,24
Size=1593,1754
Collapsed=0
DockId=0x00000006,4
[Window][Text Viewer - ts_cpp_get_skeleton]
Pos=60,58
Size=1422,1259
Collapsed=0
[Window][Text Viewer - ts_cpp_get_code_outline]
[Window][Text Viewer - list_directory]
Pos=60,60
Size=900,700
Collapsed=0
@@ -460,9 +421,9 @@ Column 4 Weight=1.0000
[Table][0x2A6000B6,4]
RefScale=16
Column 0 Width=48
Column 1 Width=67
Column 1 Width=68
Column 2 Weight=1.0000
Column 3 Width=243
Column 3 Width=120
[Table][0x8BCC69C7,6]
RefScale=13
@@ -474,17 +435,17 @@ Column 4 Weight=1.0000
Column 5 Width=50
[Table][0x3751446B,4]
RefScale=18
Column 0 Width=54
Column 1 Width=81
RefScale=16
Column 0 Width=48
Column 1 Width=72
Column 2 Weight=1.0000
Column 3 Width=135
Column 3 Width=120
[Table][0x2C515046,4]
RefScale=16
Column 0 Width=48
Column 1 Weight=1.0000
Column 2 Width=166
Column 2 Width=117
Column 3 Width=48
[Table][0xD99F45C5,4]
@@ -507,7 +468,7 @@ Column 2 Weight=1.0000
[Table][0xA02D8C87,3]
RefScale=16
Column 0 Width=179
Column 0 Width=180
Column 1 Width=120
Column 2 Weight=1.0000
@@ -523,12 +484,12 @@ Column 1 Weight=1.0000
[Table][0x8D8494AB,2]
RefScale=16
Column 0 Width=131
Column 0 Width=132
Column 1 Weight=1.0000
[Table][0x2C261E6E,2]
RefScale=16
Column 0 Width=98
Column 0 Width=99
Column 1 Weight=1.0000
[Table][0x9CB1E6FD,2]
@@ -536,32 +497,23 @@ RefScale=16
Column 0 Width=187
Column 1 Weight=1.0000
[Table][0x1DA1F4A6,2]
RefScale=16
Column 0 Weight=1.0000
Column 1 Width=120
[Table][0x5B562C13,3]
RefScale=16
Column 0 Weight=1.0000
Column 1 Width=80
Column 2 Width=150
[Docking][Data]
DockNode ID=0x00000008 Pos=3125,170 Size=593,1157 Split=Y
DockNode ID=0x00000009 Parent=0x00000008 SizeRef=1029,147 Selected=0x0469CA7A
DockNode ID=0x0000000A Parent=0x00000008 SizeRef=1029,145 Selected=0xDF822E02
DockSpace ID=0xAFC85805 Window=0x079D3A04 Pos=0,24 Size=1680,1176 Split=X
DockNode ID=0x00000003 Parent=0xAFC85805 SizeRef=2175,1183 Split=X
DockNode ID=0x0000000B Parent=0x00000003 SizeRef=404,1186 Split=X Selected=0xF4139CA2
DockNode ID=0x00000007 Parent=0x0000000B SizeRef=1512,858 Split=X Selected=0x8CA2375C
DockNode ID=0x00000005 Parent=0x00000007 SizeRef=1266,1681 CentralNode=1 Selected=0x418C7449
DockNode ID=0x00000006 Parent=0x00000007 SizeRef=1593,1681 Selected=0x2C0206CE
DockNode ID=0x0000000E Parent=0x0000000B SizeRef=1777,858 Selected=0x418C7449
DockNode ID=0x0000000D Parent=0x00000003 SizeRef=435,1186 Selected=0x363E93D6
DockNode ID=0x00000004 Parent=0xAFC85805 SizeRef=1162,1183 Split=X Selected=0x3AEC3498
DockNode ID=0x0000000C Parent=0x00000004 SizeRef=916,380 Selected=0x655BC6E9
DockNode ID=0x0000000F Parent=0x00000004 SizeRef=281,380 Selected=0xDEB547B6
DockNode ID=0x00000008 Pos=3125,170 Size=593,1157 Split=Y
DockNode ID=0x00000009 Parent=0x00000008 SizeRef=1029,147 Selected=0x0469CA7A
DockNode ID=0x0000000A Parent=0x00000008 SizeRef=1029,145 Selected=0xDF822E02
DockSpace ID=0xAFC85805 Window=0x079D3A04 Pos=0,24 Size=3840,2136 Split=X
DockNode ID=0x00000003 Parent=0xAFC85805 SizeRef=1617,1183 Split=X
DockNode ID=0x0000000B Parent=0x00000003 SizeRef=404,1186 Split=X Selected=0xF4139CA2
DockNode ID=0x00000007 Parent=0x0000000B SizeRef=999,858 Split=Y Selected=0x7BD57D6A
DockNode ID=0x00000005 Parent=0x00000007 SizeRef=639,904 CentralNode=1 Selected=0x7BD57D6A
DockNode ID=0x00000006 Parent=0x00000007 SizeRef=639,441 Selected=0x1DCB2623
DockNode ID=0x0000000E Parent=0x0000000B SizeRef=2839,858 Split=X Selected=0x418C7449
DockNode ID=0x00000001 Parent=0x0000000E SizeRef=1668,1288 Split=X Selected=0x6F2B5B04
DockNode ID=0x00000010 Parent=0x00000001 SizeRef=759,1416 Selected=0x418C7449
DockNode ID=0x00000011 Parent=0x00000001 SizeRef=907,1416 Selected=0x6F2B5B04
DockNode ID=0x00000002 Parent=0x0000000E SizeRef=1169,1288 Selected=0x8CA2375C
DockNode ID=0x0000000D Parent=0x00000003 SizeRef=435,1186 Selected=0x363E93D6
DockNode ID=0x00000004 Parent=0xAFC85805 SizeRef=511,1183 Selected=0x3AEC3498
;;;<<<Layout_655921752_Default>>>;;;
;;;<<<HelloImGui_Misc>>>;;;
File diff suppressed because it is too large Load Diff
+1 -2
View File
@@ -71,6 +71,5 @@
"logs/**",
"*.log"
]
},
"plugin": ["superpowers@git+https://github.com/obra/superpowers.git"]
}
}
-2
View File
@@ -17,8 +17,6 @@ paths = []
base_dir = "."
paths = []
[context_presets]
[gemini_cli]
binary_path = "gemini"
+1 -1
View File
@@ -9,5 +9,5 @@ active = "main"
[discussions.main]
git_commit = ""
last_updated = "2026-05-02T14:52:30"
last_updated = "2026-03-12T20:34:43"
history = []
-4
View File
@@ -15,13 +15,9 @@ dependencies = [
"uvicorn",
"tree-sitter>=0.25.2",
"tree-sitter-python>=0.25.0",
"tree-sitter-c>=0.23.2",
"tree-sitter-cpp>=0.23.2",
"mcp>=1.0.0",
"pytest-timeout>=2.4.0",
"pyopengl>=3.1.10",
"chromadb>=1.5.8",
"sentence-transformers>=5.4.1",
]
[dependency-groups]
-69
View File
@@ -1,69 +0,0 @@
import os
import re
import ast
from collections import Counter
def audit_file(path):
with open(path, 'r', encoding='utf-8') as f:
lines = f.readlines()
content = "".join(lines)
findings = []
# 1. Detect multiple identical import lines
imports = [line.strip() for line in lines if line.strip().startswith('import ')]
import_counts = Counter(imports)
for imp, count in import_counts.items():
if count > 1:
findings.append(f"Duplicate import: '{imp}' ({count} times)")
# 2. Detect multiple 'from X import Y' lines for the same module X and symbol Y
from_imports = [line.strip() for line in lines if line.strip().startswith('from ')]
from_counts = Counter(from_imports)
for imp, count in from_counts.items():
if count > 1:
findings.append(f"Duplicate from-import: '{imp}' ({count} times)")
# 3. Detect mixed indentation (look for 4-space blocks)
four_spaces = " "
for i, line in enumerate(lines):
if line.startswith(four_spaces):
findings.append(f"Mixed indentation: 4-space block found at line {i+1}")
break # Only report once per file
# 4. List all functions and classes that appear more than once
try:
tree = ast.parse(content)
defs = []
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
defs.append(node.name)
def_counts = Counter(defs)
for name, count in def_counts.items():
if count > 1:
findings.append(f"Duplicate definition: '{name}' ({count} times)")
except Exception as e:
findings.append(f"AST Parse Error: {e}")
return findings
def main():
src_dir = 'src'
if not os.path.exists(src_dir):
print(f"Directory {src_dir} not found.")
return
for root, dirs, files in os.walk(src_dir):
for file in files:
if file.endswith('.py'):
path = os.path.join(root, file)
findings = audit_file(path)
if findings:
print(f"--- {path} ---")
for f in findings:
print(f" {f}")
print()
if __name__ == "__main__":
main()
-78
View File
@@ -1,78 +0,0 @@
import sys
import os
import time
from pathlib import Path
# Add project root to sys.path
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if project_root not in sys.path:
sys.path.append(project_root)
from src.performance_monitor import get_monitor
from src.aggregate import build_file_items
from src.dag_engine import TrackDAG
from src.models import Ticket
def run_aggregation_profile():
monitor = get_monitor()
base_dir = Path(project_root)
# Get 30 python files for profiling
files = [str(p.relative_to(base_dir)) for p in base_dir.glob("src/*.py")][:30]
with monitor.scope("aggregation"):
build_file_items(base_dir, files)
def run_dag_profile():
monitor = get_monitor()
tickets = []
# Create 60 tickets with multiple dependencies to simulate complexity
for i in range(60):
deps = []
if i > 0: deps.append(f"ticket_{i-1}")
if i > 5: deps.append(f"ticket_{i-5}")
if i > 10: deps.append(f"ticket_{i-10}")
tickets.append(Ticket(
id=f"ticket_{i}",
description=f"Ticket {i}",
depends_on=deps
))
dag = TrackDAG(tickets)
with monitor.scope("dag_operations"):
# Run operations 50 times per profile call
for _ in range(50):
dag.topological_sort()
dag.has_cycle()
def print_metrics():
monitor = get_monitor()
metrics = monitor.get_metrics()
print(f"{'Component':<30} | {'Avg (ms)':<12} | {'Count':<8} | {'Max (ms)':<12} | {'Min (ms)':<12}")
print("-" * 85)
# Collect all base component names
components = set()
for key in metrics.keys():
if key.startswith("time_") and key.endswith("_ms") and not key.endswith("_avg"):
components.add(key[5:-3])
for comp in sorted(list(components)):
avg = metrics.get(f"time_{comp}_ms_avg", 0.0)
count = int(metrics.get(f"count_{comp}", 0))
max_val = metrics.get(f"max_{comp}_ms", 0.0)
min_val = metrics.get(f"min_{comp}_ms", 0.0)
print(f"{comp:<30} | {avg:<12.4f} | {count:<8} | {max_val:<12.4f} | {min_val:<12.4f}")
def main():
monitor = get_monitor()
monitor.enabled = True
print("Starting Profiling Baseline...")
print("Running aggregation profile (5 iterations)...")
for _ in range(5):
run_aggregation_profile()
print("Running DAG profile (5 iterations)...")
for _ in range(5):
run_dag_profile()
print("\nBaseline Performance Metrics:")
print_metrics()
monitor.stop()
if __name__ == "__main__":
main()
+13 -18
View File
@@ -4,11 +4,10 @@ from typing import Any, Callable
from src import ai_client
class UserSimAgent:
def __init__(self, hook_client: Any, model: str = "gemini-2.5-flash-lite", enable_delays: bool = True, batch_typing: bool = False) -> None:
def __init__(self, hook_client: Any, model: str = "gemini-2.5-flash-lite", enable_delays: bool = True) -> None:
self.hook_client = hook_client
self.model = model
self.enable_delays = enable_delays
self.batch_typing = batch_typing
self.system_prompt = (
"You are a software engineer testing an AI coding assistant called 'Manual Slop'. "
"You want to build a small Python project and verify the assistant's capabilities. "
@@ -31,22 +30,18 @@ class UserSimAgent:
delay = random.uniform(min_delay, max_delay)
time.sleep(delay)
def simulate_typing(self, text: str, jitter_range: tuple[float, float] = (0.01, 0.05), batch_typing: bool = False) -> None:
if not self.enable_delays:
return
if batch_typing or self.batch_typing:
time.sleep(0.01)
return
# Simulate typing by sleeping after chunks or characters to balance speed and realism
if len(text) > 200:
for i in range(0, len(text), 10):
time.sleep(random.uniform(jitter_range[0] * 3, jitter_range[1] * 3))
elif len(text) > 50:
for i in range(0, len(text), 3):
time.sleep(random.uniform(jitter_range[0] * 1.5, jitter_range[1] * 1.5))
else:
for char in text:
time.sleep(random.uniform(jitter_range[0], jitter_range[1]))
def simulate_typing(self, text: str, jitter_range: tuple[float, float] = (0.01, 0.05)) -> None:
if self.enable_delays:
# Simulate typing by sleeping after chunks or characters to balance speed and realism
if len(text) > 200:
for i in range(0, len(text), 10):
time.sleep(random.uniform(jitter_range[0] * 3, jitter_range[1] * 3))
elif len(text) > 50:
for i in range(0, len(text), 3):
time.sleep(random.uniform(jitter_range[0] * 1.5, jitter_range[1] * 1.5))
else:
for char in text:
time.sleep(random.uniform(jitter_range[0], jitter_range[1]))
def generate_response(self, conversation_history: list[dict]) -> str:
"""
+119 -161
View File
@@ -19,9 +19,7 @@ from pathlib import Path, PureWindowsPath
from typing import Any, cast
from src import summarize
from src import project_manager
from src import beads_client
from src.file_cache import ASTParser
from src.performance_monitor import get_monitor
def find_next_increment(output_dir: Path, namespace: str) -> int:
pattern = re.compile(rf"^{re.escape(namespace)}_(\d+)\.md$")
@@ -133,54 +131,52 @@ def build_file_items(base_dir: Path, files: list[str | dict[str, Any]]) -> list[
auto_aggregate : bool
force_full : bool
"""
with get_monitor().scope("build_file_items"):
items: list[dict[str, Any]] = []
for entry_raw in files:
if isinstance(entry_raw, dict):
entry = cast(str, entry_raw.get("path", ""))
tier = entry_raw.get("tier")
auto_aggregate = entry_raw.get("auto_aggregate", True)
force_full = entry_raw.get("force_full", False)
elif hasattr(entry_raw, "path"):
entry = entry_raw.path
tier = getattr(entry_raw, "tier", None)
auto_aggregate = getattr(entry_raw, "auto_aggregate", True)
force_full = getattr(entry_raw, "force_full", False)
else:
entry = entry_raw
tier = None
auto_aggregate = True
force_full = False
if not entry or not isinstance(entry, str):
continue
paths = resolve_paths(base_dir, entry)
if not paths:
items.append({"path": None, "entry": entry, "content": f"ERROR: no files matched: {entry}", "error": True, "mtime": 0.0, "tier": tier, "auto_aggregate": auto_aggregate, "force_full": force_full})
continue
for path in paths:
try:
content = path.read_text(encoding="utf-8")
mtime = path.stat().st_mtime
error = False
except FileNotFoundError:
content = f"ERROR: file not found: {path}"
mtime = 0.0
error = True
except Exception as e:
content = f"ERROR: {e}"
mtime = 0.0
error = True
items.append({"path": path, "entry": entry, "content": content, "error": error, "mtime": mtime, "tier": tier, "auto_aggregate": auto_aggregate, "force_full": force_full})
return items
items: list[dict[str, Any]] = []
for entry_raw in files:
if isinstance(entry_raw, dict):
entry = cast(str, entry_raw.get("path", ""))
tier = entry_raw.get("tier")
auto_aggregate = entry_raw.get("auto_aggregate", True)
force_full = entry_raw.get("force_full", False)
elif hasattr(entry_raw, "path"):
entry = entry_raw.path
tier = getattr(entry_raw, "tier", None)
auto_aggregate = getattr(entry_raw, "auto_aggregate", True)
force_full = getattr(entry_raw, "force_full", False)
else:
entry = entry_raw
tier = None
auto_aggregate = True
force_full = False
if not entry or not isinstance(entry, str):
continue
paths = resolve_paths(base_dir, entry)
if not paths:
items.append({"path": None, "entry": entry, "content": f"ERROR: no files matched: {entry}", "error": True, "mtime": 0.0, "tier": tier, "auto_aggregate": auto_aggregate, "force_full": force_full})
continue
for path in paths:
try:
content = path.read_text(encoding="utf-8")
mtime = path.stat().st_mtime
error = False
except FileNotFoundError:
content = f"ERROR: file not found: {path}"
mtime = 0.0
error = True
except Exception as e:
content = f"ERROR: {e}"
mtime = 0.0
error = True
items.append({"path": path, "entry": entry, "content": content, "error": error, "mtime": mtime, "tier": tier, "auto_aggregate": auto_aggregate, "force_full": force_full})
return items
def build_summary_section(base_dir: Path, files: list[str | dict[str, Any]]) -> str:
"""
Build a compact summary section using summarize.py — one short block per file.
Used as the initial <context> block instead of full file contents.
"""
with get_monitor().scope("build_summary_section"):
items = build_file_items(base_dir, files)
return summarize.build_summary_markdown(items)
items = build_file_items(base_dir, files)
return summarize.build_summary_markdown(items)
def _build_files_section_from_items(file_items: list[dict[str, Any]]) -> str:
"""Build the files markdown section from pre-read file items (avoids double I/O)."""
@@ -189,65 +185,37 @@ def _build_files_section_from_items(file_items: list[dict[str, Any]]) -> str:
if not item.get("auto_aggregate", True):
continue
path = item.get("path")
entry = item.get("entry", "unknown")
content = item.get("content", "")
entry = cast(str, item.get("entry", "unknown"))
content = cast(str, item.get("content", ""))
if path is None:
sections.append(f"### `{entry}`\n\n```text\n{content}\n```")
else:
suffix = path.suffix.lstrip(".") if path.suffix else "text"
original = entry if "*" not in entry else str(path)
sections.append(f"### `{original}`\n\n```{suffix}\n{content}\n```")
continue
p = cast(Path, path)
suffix = p.suffix.lstrip(".") if hasattr(p, "suffix") else "text"
lang = suffix if suffix else "text"
original = entry if "*" not in entry else str(p)
sections.append(f"### `{original}`\n\n```{lang}\n{content}\n```")
return "\n\n---\n\n".join(sections)
def build_beads_section(base_dir: Path) -> str:
client = beads_client.BeadsClient(base_dir)
if not client.is_initialized():
return ""
beads = client.list_beads()
if not beads:
return ""
active = [b for b in beads if b.status == "active"]
completed = [b for b in beads if b.status == "completed"]
parts = []
parts.append("## Beads Mode: Progress Track")
if completed:
parts.append("### Completed Beads")
comp_list = ", ".join([f"`{b.title}`" for b in completed])
parts.append(comp_list)
if active:
parts.append("### Active Beads")
for b in active:
parts.append(f"- **{b.title}** ({b.id}): {b.description}")
return "\n\n".join(parts)
def build_markdown_from_items(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False, aggregation_strategy: str = "auto", execution_mode: str = "standard", base_dir: Path | None = None) -> str:
def build_markdown_from_items(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False) -> str:
"""Build markdown from pre-read file items instead of re-reading from disk."""
parts = []
# STATIC PREFIX: Files and Screenshots must go first to maximize Cache Hits
if file_items:
if aggregation_strategy == "summarize":
if summary_only:
parts.append("## Files (Summary)\n\n" + summarize.build_summary_markdown(file_items))
elif aggregation_strategy == "full":
else:
parts.append("## Files\n\n" + _build_files_section_from_items(file_items))
else: # auto
if summary_only:
parts.append("## Files (Summary)\n\n" + summarize.build_summary_markdown(file_items))
else:
parts.append("## Files\n\n" + _build_files_section_from_items(file_items))
if screenshots:
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots))
if execution_mode == "beads" and base_dir:
beads_md = build_beads_section(base_dir)
if beads_md:
parts.append(beads_md)
# DYNAMIC SUFFIX: History changes every turn, must go last
# DYNAMIC SUFFIX: History changes every turn, must go last
if history:
parts.append("## Discussion History\n\n" + build_discussion_section(history))
return "\n\n---\n\n".join(parts)
def build_markdown_no_history(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], summary_only: bool = False, aggregation_strategy: str = "auto") -> str:
def build_markdown_no_history(file_items: list[dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], summary_only: bool = False) -> str:
"""Build markdown with only files + screenshots (no history). Used for stable caching."""
return build_markdown_from_items(file_items, screenshot_base_dir, screenshots, history=[], summary_only=summary_only, aggregation_strategy=aggregation_strategy)
return build_markdown_from_items(file_items, screenshot_base_dir, screenshots, history=[], summary_only=summary_only)
def build_discussion_text(history: list[str]) -> str:
"""Build just the discussion history section text. Returns empty string if no history."""
@@ -261,24 +229,24 @@ def build_tier1_context(file_items: list[dict[str, Any]], screenshot_base_dir: P
Full content for core conductor files and files with tier=1, summaries for others.
"""
core_files = {"product.md", "tech-stack.md", "workflow.md", "tracks.md"}
sections = []
for item in file_items:
if not item.get("auto_aggregate", True):
continue
path = item.get("path")
if not path: continue
entry = item.get("entry")
display_name = entry or str(path)
tier = item.get("tier")
force_full = item.get("force_full")
content = item.get("content", "")
if path.name in core_files or tier == 1 or force_full:
suffix = path.suffix.lstrip(".") if path.suffix else "text"
sections.append(f"### `{display_name}`\n\n```{suffix}\n{content}\n```")
else:
sections.append(f"### `{display_name}`\n\n{summarize.summarise_file(path, content)}")
parts = []
if sections:
# Files section
if file_items:
sections = []
for item in file_items:
if not item.get("auto_aggregate", True):
continue
path = item.get("path")
name = path.name if path and isinstance(path, Path) else ""
if name in core_files or item.get("tier") == 1 or item.get("force_full"):
# Include in full
sections.append("### `" + (cast(str, item.get("entry")) or str(path)) + "`\n\n" +
f"```{path.suffix.lstrip('.') if path and isinstance(path, Path) and path.suffix else 'text'}\n{item.get('content', '')}\n```")
else:
# Summarize
if path and isinstance(path, Path):
sections.append("### `" + (cast(str, item.get("entry")) or str(path)) + "`\n\n" +
summarize.summarise_file(path, cast(str, item.get("content", ""))))
parts.append("## Files (Tier 1 - Mixed)\n\n" + "\n\n---\n\n".join(sections))
if screenshots:
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots))
@@ -298,69 +266,60 @@ def build_tier3_context(file_items: list[dict[str, Any]], screenshot_base_dir: P
Tier 3 Context: Execution/Worker.
Full content for focus_files and files with tier=3, summaries/skeletons for others.
"""
with get_monitor().scope("build_tier3_context"):
focus_set = set(focus_files)
parser = ASTParser("python")
parts = []
if file_items:
sections = []
for item in file_items:
if not item.get("auto_aggregate", True):
continue
path = item.get("path")
entry = item.get("entry", "")
path = cast(Path, item.get("path"))
entry = cast(str, item.get("entry", ""))
path_str = str(path) if path else ""
name = path.name if path else ""
tier = item.get("tier")
force_full = item.get("force_full")
content = item.get("content", "")
is_focus = entry in focus_set or (name and name in focus_set) or (path_str and path_str in focus_set)
if not is_focus and path_str:
for focus in focus_set:
if focus in path_str:
is_focus = True
break
display_name = entry or path_str
if is_focus or tier == 3 or force_full:
suffix = path.suffix.lstrip(".") if path and path.suffix else "text"
sections.append(f"### `{display_name}`\n\n```{suffix}\n{content}\n```")
elif path:
if path.suffix == ".py" and not item.get("error"):
try:
skeleton = parser.get_skeleton(content)
sections.append(f"### `{display_name}` (AST Skeleton)\n\n```python\n{skeleton}\n```")
except Exception:
sections.append(f"### `{display_name}`\n\n{summarize.summarise_file(path, content)}")
else:
sections.append(f"### `{display_name}`\n\n{summarize.summarise_file(path, content)}")
parts = []
if sections:
parts.append("## Files (Tier 3 - Focused)\n\n" + "\n\n---\n\n".join(sections))
if screenshots:
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots))
if history:
parts.append("## Discussion History\n\n" + build_discussion_section(history))
return "\n\n---\n\n".join(parts)
def build_markdown(base_dir: Path, files: list[str | dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False, execution_mode: str = "standard") -> str:
with get_monitor().scope("build_markdown"):
parts = []
# STATIC PREFIX: Files and Screenshots must go first to maximize Cache Hits
if files:
if summary_only:
parts.append("## Files (Summary)\n\n" + build_summary_section(base_dir, files))
# Check if this file is in focus_files (by name or path)
is_focus = False
for focus in focus_files:
if focus == entry or (path and focus == path.name) or (path_str and focus in path_str):
is_focus = True
break
if is_focus or item.get("tier") == 3 or item.get("force_full"):
sections.append("### `" + (entry or path_str) + "`\n\n" +
f"```{path.suffix.lstrip('.') if path and path.suffix else 'text'}\n{item.get('content', '')}\n```")
else:
parts.append("## Files\n\n" + build_files_section(base_dir, files))
if screenshots:
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots))
if execution_mode == "beads":
beads_md = build_beads_section(base_dir)
if beads_md:
parts.append(beads_md)
# DYNAMIC SUFFIX: History changes every turn, must go last
if history:
parts.append("## Discussion History\n\n" + build_discussion_section(history))
return "\n\n---\n\n".join(parts)
content = cast(str, item.get("content", ""))
if path and path.suffix == ".py" and not item.get("error"):
try:
parser = ASTParser("python")
skeleton = parser.get_skeleton(content)
sections.append(f"### `{entry or path_str}` (AST Skeleton)\n\n```python\n{skeleton}\n```")
except Exception:
# Fallback to summary if AST parsing fails
sections.append(f"### `{entry or path_str}`\n\n" + summarize.summarise_file(path, content))
else:
if path:
sections.append(f"### `{entry or path_str}`\n\n" + summarize.summarise_file(path, content))
parts.append("## Files (Tier 3 - Focused)\n\n" + "\n\n---\n\n".join(sections))
if screenshots:
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots))
if history:
parts.append("## Discussion History\n\n" + build_discussion_section(history))
return "\n\n---\n\n".join(parts)
def run(config: dict[str, Any], aggregation_strategy: str = "auto") -> tuple[str, Path, list[dict[str, Any]]]:
def build_markdown(base_dir: Path, files: list[str | dict[str, Any]], screenshot_base_dir: Path, screenshots: list[str], history: list[str], summary_only: bool = False) -> str:
parts = []
# STATIC PREFIX: Files and Screenshots must go first to maximize Cache Hits
if files:
if summary_only:
parts.append("## Files (Summary)\n\n" + build_summary_section(base_dir, files))
else:
parts.append("## Files\n\n" + build_files_section(base_dir, files))
if screenshots:
parts.append("## Screenshots\n\n" + build_screenshots_section(screenshot_base_dir, screenshots))
# DYNAMIC SUFFIX: History changes every turn, must go last
if history:
parts.append("## Discussion History\n\n" + build_discussion_section(history))
return "\n\n---\n\n".join(parts)
def run(config: dict[str, Any]) -> tuple[str, Path, list[dict[str, Any]]]:
namespace = config.get("project", {}).get("name")
if not namespace:
namespace = config.get("output", {}).get("namespace", "project")
@@ -376,9 +335,8 @@ def run(config: dict[str, Any], aggregation_strategy: str = "auto") -> tuple[str
# Build file items once, then construct markdown from them (avoids double I/O)
file_items = build_file_items(base_dir, files)
summary_only = config.get("project", {}).get("summary_only", False)
execution_mode = config.get("project", {}).get("execution_mode", "standard")
markdown = build_markdown_from_items(file_items, screenshot_base_dir, screenshots, history,
summary_only=summary_only, aggregation_strategy=aggregation_strategy, execution_mode=execution_mode, base_dir=base_dir)
summary_only=summary_only)
output_file.write_text(markdown, encoding="utf-8")
return markdown, output_file, file_items
+19 -115
View File
@@ -51,7 +51,6 @@ _history_trunc_limit: int = 8000
events: EventEmitter = EventEmitter()
def set_model_params(temp: float, max_tok: int, trunc_limit: int = 8000, top_p: float = 1.0) -> None:
"""Sets global generation parameters like temperature and max tokens."""
global _temperature, _max_tokens, _history_trunc_limit, _top_p
_temperature = temp
_max_tokens = max_tok
@@ -143,48 +142,23 @@ _SYSTEM_PROMPT: str = (
)
_custom_system_prompt: str = ""
_base_system_prompt_override: str = ""
_use_default_base_system_prompt: bool = True
_project_context_marker: str = ""
def set_custom_system_prompt(prompt: str) -> None:
"""Sets a custom system prompt to be combined with the default instructions."""
global _custom_system_prompt
_custom_system_prompt = prompt
def set_base_system_prompt(prompt: str) -> None:
global _base_system_prompt_override
_base_system_prompt_override = prompt
def set_use_default_base_prompt(use_default: bool) -> None:
global _use_default_base_system_prompt
_use_default_base_system_prompt = use_default
def set_project_context_marker(marker: str) -> None:
global _project_context_marker
_project_context_marker = marker
def _get_context_marker() -> str:
return _project_context_marker if _project_context_marker.strip() else "[SYSTEM: FILES UPDATED]"
def _get_combined_system_prompt(preset: Optional[ToolPreset] = None, bias: Optional[BiasProfile] = None) -> str:
if preset is None: preset = _active_tool_preset
if bias is None: bias = _active_bias_profile
if _use_default_base_system_prompt:
base = _SYSTEM_PROMPT
else:
base = _base_system_prompt_override
base = _SYSTEM_PROMPT
if _custom_system_prompt.strip():
base = f"{base}\n\n[USER SYSTEM PROMPT]\n{_custom_system_prompt}"
base = f"{_SYSTEM_PROMPT}\n\n[USER SYSTEM PROMPT]\n{_custom_system_prompt}"
if preset and bias:
strategy = _BIAS_ENGINE.generate_tooling_strategy(preset, bias)
if strategy:
base += f"\n\n{strategy}"
return base
def get_combined_system_prompt(preset: Optional[ToolPreset] = None, bias: Optional[BiasProfile] = None) -> str:
return _get_combined_system_prompt(preset, bias)
from collections import deque
_comms_log: deque[dict[str, Any]] = deque(maxlen=1000)
@@ -368,7 +342,6 @@ def _classify_minimax_error(exc: Exception) -> ProviderError:
return ProviderError("unknown", "minimax", Exception(body))
def set_provider(provider: str, model: str) -> None:
"""Updates the active LLM provider and model name."""
global _provider, _model
_provider = provider
if provider == "gemini_cli":
@@ -387,11 +360,9 @@ def set_provider(provider: str, model: str) -> None:
_model = model
def get_provider() -> str:
"""Returns the current active provider name."""
return _provider
def cleanup() -> None:
"""Performs cleanup operations like deleting server-side Gemini caches."""
global _gemini_client, _gemini_cache, _gemini_cached_file_paths
if _gemini_client and _gemini_cache:
try:
@@ -401,13 +372,12 @@ def cleanup() -> None:
_gemini_cached_file_paths = []
def reset_session() -> None:
"""Clears conversation history and resets provider-specific session state."""
global _gemini_client, _gemini_chat, _gemini_cache
global _gemini_cache_md_hash, _gemini_cache_created_at, _gemini_cached_file_paths
global _anthropic_client, _anthropic_history
global _deepseek_client, _deepseek_history
global _minimax_client, _minimax_history
global _CACHED_ANTHROPIC_TOOLS, _CACHED_DEEPSEEK_TOOLS
global _CACHED_ANTHROPIC_TOOLS
global _gemini_cli_adapter
if _gemini_client and _gemini_cache:
try:
@@ -436,7 +406,6 @@ def reset_session() -> None:
with _minimax_history_lock:
_minimax_history = []
_CACHED_ANTHROPIC_TOOLS = None
_CACHED_DEEPSEEK_TOOLS = None
file_cache.reset_client()
def get_gemini_cache_stats() -> dict[str, Any]:
@@ -512,15 +481,13 @@ TOOL_NAME: str = "run_powershell"
_agent_tools: dict[str, bool] = {}
def set_agent_tools(tools: dict[str, bool]) -> None:
"""Configures which tools are enabled for the AI agent."""
global _agent_tools, _CACHED_ANTHROPIC_TOOLS, _CACHED_DEEPSEEK_TOOLS
global _agent_tools, _CACHED_ANTHROPIC_TOOLS
_agent_tools = tools
_CACHED_ANTHROPIC_TOOLS = None
_CACHED_DEEPSEEK_TOOLS = None
def set_tool_preset(preset_name: Optional[str]) -> None:
"""Loads a tool preset and applies it via set_agent_tools."""
global _agent_tools, _CACHED_ANTHROPIC_TOOLS, _CACHED_DEEPSEEK_TOOLS, _tool_approval_modes, _active_tool_preset
global _agent_tools, _CACHED_ANTHROPIC_TOOLS, _tool_approval_modes, _active_tool_preset
_tool_approval_modes = {}
if not preset_name or preset_name == "None":
# Enable all tools if no preset
@@ -547,10 +514,8 @@ def set_tool_preset(preset_name: Optional[str]) -> None:
sys.stderr.write(f"[ERROR] Failed to set tool preset '{preset_name}': {e}\n")
sys.stderr.flush()
_CACHED_ANTHROPIC_TOOLS = None
_CACHED_DEEPSEEK_TOOLS = None
def set_bias_profile(profile_name: Optional[str]) -> None:
"""Sets the active tool bias profile for tuning model behavior."""
global _active_bias_profile
if not profile_name or profile_name == "None":
_active_bias_profile = None
@@ -566,7 +531,6 @@ def set_bias_profile(profile_name: Optional[str]) -> None:
sys.stderr.flush()
def get_bias_profile() -> Optional[str]:
"""Returns the name of the currently active bias profile."""
return _active_bias_profile.name if _active_bias_profile else None
def _build_anthropic_tools() -> list[dict[str, Any]]:
@@ -1041,9 +1005,8 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
if isinstance(r, dict) and "output" in r:
val = r["output"]
if isinstance(val, str):
marker = _get_context_marker()
if marker in val:
val = val.split(marker)[0].strip()
if "[SYSTEM: FILES UPDATED]" in val:
val = val.split("[SYSTEM: FILES UPDATED]")[0].strip()
if _history_trunc_limit > 0 and len(val) > _history_trunc_limit:
val = val[:_history_trunc_limit] + "\n\n... [TRUNCATED BY SYSTEM TO SAVE TOKENS.]"
r["output"] = val
@@ -1145,7 +1108,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
file_items, changed = _reread_file_items(file_items)
ctx = _build_file_diff_text(changed)
if ctx:
out += f"\n\n{_get_context_marker()}\n\n{ctx}"
out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
if r_idx == MAX_TOOL_ROUNDS: out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
out = _truncate_tool_output(out)
@@ -1256,7 +1219,7 @@ def _send_gemini_cli(md_content: str, user_message: str, base_dir: str,
file_items, changed = _reread_file_items(file_items)
ctx = _build_file_diff_text(changed)
if ctx:
out += f"\n\n{_get_context_marker()}\n\n{ctx}"
out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
if r_idx == MAX_TOOL_ROUNDS:
out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
@@ -1282,7 +1245,7 @@ def _send_gemini_cli(md_content: str, user_message: str, base_dir: str,
_CHARS_PER_TOKEN: float = 3.5
_ANTHROPIC_MAX_PROMPT_TOKENS: int = 180_000
_GEMINI_MAX_INPUT_TOKENS: int = 900_000
_FILE_REFRESH_MARKER: str = _project_context_marker if _project_context_marker.strip() else "[SYSTEM: FILES UPDATED]"
_FILE_REFRESH_MARKER: str = "[FILES UPDATED"
def _estimate_message_tokens(msg: dict[str, Any]) -> int:
cached = msg.get("_est_tokens")
@@ -1589,12 +1552,13 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
refreshed_ctx = _build_file_diff_text(changed)
if refreshed_ctx:
tool_results.append({
"type": "text",
"text": (
f"{_get_context_marker()}\n\n"
+ refreshed_ctx
),
})
"type": "text",
"text": (
"[FILES UPDATED \u2014 current contents below. "
"Do NOT re-read these files with PowerShell.]\n\n"
+ refreshed_ctx
),
})
if round_idx == MAX_TOOL_ROUNDS:
tool_results.append({
"type": "text",
@@ -1878,7 +1842,7 @@ def _send_deepseek(md_content: str, user_message: str, base_dir: str,
file_items, changed = _reread_file_items(file_items)
ctx = _build_file_diff_text(changed)
if ctx:
out += f"\n\n{_get_context_marker()}\n\n{ctx}"
out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
if round_idx == MAX_TOOL_ROUNDS:
out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
@@ -2099,7 +2063,7 @@ def _send_minimax(md_content: str, user_message: str, base_dir: str,
file_items, changed = _reread_file_items(file_items)
ctx = _build_file_diff_text(changed)
if ctx:
out += f"\n\n{_get_context_marker()}\n\n{ctx}"
out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
if round_idx == MAX_TOOL_ROUNDS:
out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
@@ -2245,20 +2209,9 @@ def send(
enable_tools: bool = True,
stream_callback: Optional[Callable[[str], None]] = None,
patch_callback: Optional[Callable[[str, str], Optional[str]]] = None,
rag_engine: Optional[Any] = None,
) -> str:
monitor = performance_monitor.get_monitor()
if monitor.enabled: monitor.start_component("ai_client.send")
if rag_engine and getattr(rag_engine.config, "enabled", False) and "## Retrieved Context" not in user_message:
chunks = rag_engine.search(user_message)
if chunks:
context_block = "## Retrieved Context\n\n"
for i, chunk in enumerate(chunks):
path = chunk.get("metadata", {}).get("path", "unknown")
context_block += f"### Chunk {i+1} (Source: {path})\n{chunk.get('document', '')}\n\n"
user_message = context_block + user_message
_append_comms("OUT", "request", {"message": user_message, "system": _get_combined_system_prompt(_active_tool_preset, _active_bias_profile)})
with _send_lock:
if _provider == "gemini":
@@ -2461,52 +2414,3 @@ def get_history_bleed_stats(md_content: Optional[str] = None) -> dict[str, Any]:
"percentage": 0,
})
def run_subagent_summarization(file_path: str, content: str, is_code: bool, outline: str) -> str:
"""Performs a stateless summarization request using a sub-agent prompt."""
prompt_tmpl = mma_prompts.TIER4_SUMMARIZE_CODE_PROMPT if is_code else mma_prompts.TIER4_SUMMARIZE_TEXT_PROMPT
prompt = prompt_tmpl.format(file_path=file_path, outline=outline, content=content)
if _provider == "gemini":
_ensure_gemini_client()
if _gemini_client:
resp = _gemini_client.models.generate_content(
model=_model,
contents=prompt,
config=types.GenerateContentConfig(
temperature=0.0,
max_output_tokens=1024,
)
)
return resp.text or ""
elif _provider == "anthropic":
_ensure_anthropic_client()
if _anthropic_client:
resp = _anthropic_client.messages.create(
model=_model,
max_tokens=1024,
messages=[{"role": "user", "content": prompt}]
)
return "".join([b.text for b in resp.content if hasattr(b, "text") and b.text])
elif _provider == "deepseek":
creds = _load_credentials()
api_key = creds.get("deepseek", {}).get("api_key")
if not api_key: return "ERROR: DeepSeek API key missing"
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
payload = {
"model": _model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.0,
}
try:
r = requests.post("https://api.deepseek.com/chat/completions", headers=headers, json=payload, timeout=60)
r.raise_for_status()
return r.json()["choices"][0]["message"]["content"]
except Exception as e:
return f"ERROR: DeepSeek summarization failed: {e}"
elif _provider == "gemini_cli":
# Using the adapter for a one-off call
from src.gemini_cli_adapter import GeminiCliAdapter
adapter = GeminiCliAdapter(binary_path="gemini")
resp_data = adapter.send(prompt, model=_model)
return resp_data.get("text", "")
return "ERROR: Unsupported provider for sub-agent summarization"
-13
View File
@@ -83,7 +83,6 @@ class ApiHookClient:
return res
def post_project(self, project_data: dict) -> dict[str, Any]:
"""Updates the current project configuration."""
return self._make_request('POST', '/api/project', data=project_data) or {}
def get_project(self) -> dict[str, Any]:
@@ -99,12 +98,10 @@ class ApiHookClient:
return self._make_request('POST', '/api/session', data={"session": {"entries": session_entries}}) or {}
def get_events(self) -> list[dict[str, Any]]:
"""Retrieves any pending events from the API event queue."""
res = self._make_request('GET', '/api/events')
return res.get("events", []) if res else []
def clear_events(self) -> list[dict[str, Any]]:
"""Retrieves and clears the event queue."""
return self.get_events()
@@ -244,30 +241,20 @@ class ApiHookClient:
return self._make_request('GET', '/api/patch/status') or {}
def spawn_mma_worker(self, data: dict) -> dict:
"""Spawns a new MMA worker with the provided configuration."""
return self._make_request('POST', '/api/mma/workers/spawn', data=data) or {}
def kill_mma_worker(self, worker_id: str) -> dict:
"""Kills an active MMA worker by its ID."""
return self._make_request('POST', '/api/mma/workers/kill', data={"worker_id": worker_id}) or {}
def pause_mma_pipeline(self) -> dict:
"""Pauses the MMA execution pipeline."""
return self._make_request('POST', '/api/mma/pipeline/pause') or {}
def resume_mma_pipeline(self) -> dict:
"""Resumes the MMA execution pipeline."""
return self._make_request('POST', '/api/mma/pipeline/resume') or {}
def inject_context(self, data: dict) -> dict:
"""Injects custom file context into the application."""
return self._make_request('POST', '/api/context/inject', data=data) or {}
def mutate_mma_dag(self, data: dict) -> dict:
"""Mutates the MMA DAG (Directed Acyclic Graph) structure."""
return self._make_request('POST', '/api/mma/dag/mutate', data=data) or {}
def approve_mma_ticket(self, ticket_id: str) -> dict:
"""Manually approves a specific ticket for execution in Step Mode."""
return self._make_request('POST', '/api/mma/ticket/approve', data={"ticket_id": ticket_id}) or {}
+2 -28
View File
@@ -42,7 +42,6 @@ See Also:
"""
def _get_app_attr(app: Any, name: str, default: Any = None) -> Any:
"""Retrieves an attribute from the App or its Controller."""
if hasattr(app, name):
val = getattr(app, name)
return val
@@ -52,13 +51,11 @@ def _get_app_attr(app: Any, name: str, default: Any = None) -> Any:
return default
def _has_app_attr(app: Any, name: str) -> bool:
"""Checks if an attribute exists on the App or its Controller."""
if hasattr(app, name): return True
if hasattr(app, 'controller') and hasattr(app.controller, name): return True
return False
def _set_app_attr(app: Any, name: str, value: Any) -> None:
"""Sets an attribute on the App or its Controller."""
if hasattr(app, name):
setattr(app, name, value)
elif hasattr(app, 'controller'):
@@ -69,12 +66,10 @@ def _set_app_attr(app: Any, name: str, value: Any) -> None:
class HookServerInstance(ThreadingHTTPServer):
"""Custom HTTPServer that carries a reference to the main App instance."""
def __init__(self, server_address: tuple[str, int], RequestHandlerClass: type, app: Any) -> None:
"""Initializes the server instance with an app reference."""
super().__init__(server_address, RequestHandlerClass)
self.app = app
def _serialize_for_api(obj: Any) -> Any:
"""Serializes complex objects into API-friendly formats (dicts/lists)."""
if hasattr(obj, "to_dict"):
return obj.to_dict()
if isinstance(obj, list):
@@ -86,7 +81,6 @@ def _serialize_for_api(obj: Any) -> Any:
class HookHandler(BaseHTTPRequestHandler):
"""Handles incoming HTTP requests for the API hooks."""
def do_GET(self) -> None:
"""Handles GET requests by routing to the appropriate state provider."""
try:
app = self.server.app
session_logger.log_api_hook("GET", self.path, "")
@@ -231,9 +225,6 @@ class HookHandler(BaseHTTPRequestHandler):
for key, attr in gettable.items():
val = _get_app_attr(app, attr, None)
result[key] = _serialize_for_api(val)
result['show_text_viewer'] = _get_app_attr(app, 'show_text_viewer', False)
result['text_viewer_title'] = _get_app_attr(app, 'text_viewer_title', '')
result['text_viewer_type'] = _get_app_attr(app, 'text_viewer_type', 'markdown')
finally: event.set()
lock = _get_app_attr(app, "_pending_gui_tasks_lock")
tasks = _get_app_attr(app, "_pending_gui_tasks")
@@ -259,7 +250,7 @@ class HookHandler(BaseHTTPRequestHandler):
self.end_headers()
files = _get_app_attr(app, "files", [])
screenshots = _get_app_attr(app, "screenshots", [])
self.wfile.write(json.dumps({"files": _serialize_for_api(files), "screenshots": _serialize_for_api(screenshots)}).encode("utf-8"))
self.wfile.write(json.dumps({"files": files, "screenshots": screenshots}).encode("utf-8"))
elif self.path == "/api/metrics/financial":
self.send_response(200)
self.send_header("Content-Type", "application/json")
@@ -605,7 +596,7 @@ class HookHandler(BaseHTTPRequestHandler):
elif self.path == "/api/mma/dag/mutate":
def mutate_dag():
try:
func = _get_app_attr(app, "mutate_dag")
func = _get_app_attr(app, "_mutate_dag")
if func: func(data)
except Exception as e:
sys.stderr.write(f"[DEBUG] Hook API mutate_dag error: {e}\n")
@@ -618,23 +609,6 @@ class HookHandler(BaseHTTPRequestHandler):
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps({"status": "queued"}).encode("utf-8"))
elif self.path == "/api/mma/ticket/approve":
ticket_id = data.get("ticket_id")
def approve_ticket():
try:
func = _get_app_attr(app, "approve_ticket")
if func: func(ticket_id)
except Exception as e:
sys.stderr.write(f"[DEBUG] Hook API approve_ticket error: {e}\n")
sys.stderr.flush()
lock = _get_app_attr(app, "_pending_gui_tasks_lock")
tasks = _get_app_attr(app, "_pending_gui_tasks")
if lock and tasks is not None:
with lock: tasks.append({"action": "custom_callback", "callback": approve_ticket})
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps({"status": "queued"}).encode("utf-8"))
else:
self.send_response(404)
self.end_headers()
+46 -415
View File
@@ -6,7 +6,6 @@ import os
import re
from typing import Any, List, Dict, Optional, Callable
from pathlib import Path
from src import workspace_manager
import json
import uuid
import tomli_w
@@ -26,7 +25,6 @@ from src import project_manager
from src import performance_monitor
from src import models
from src import presets
from src import thinking_parser
from src.file_cache import ASTParser
from src import ai_client
from src import shell_runner
@@ -36,7 +34,6 @@ from src import orchestrator_pm
from src import conductor_tech_lead
from src import multi_agent_conductor
from src import tool_presets
from src import rag_engine
from src import theme_2 as theme
def hide_tk_root() -> Tk:
@@ -150,7 +147,6 @@ class AppController:
self.project_paths: List[str] = []
self.active_discussion: str = "main"
self.disc_entries: List[Dict[str, Any]] = []
self.ui_active_persona: str = ""
self.disc_roles: List[str] = []
self.files: List[str] = []
self.screenshots: List[str] = []
@@ -203,9 +199,6 @@ class AppController:
self._pending_actions: Dict[str, ConfirmDialog] = {}
self._pending_ask_dialog: bool = False
self.mcp_config: models.MCPConfiguration = models.MCPConfiguration()
self.rag_config: Optional[models.RAGConfig] = None
self.rag_engine: Optional[rag_engine.RAGEngine] = None
self.rag_status: str = 'idle'
# AI settings state
self._current_provider: str = "gemini"
self._current_model: str = "gemini-2.5-flash-lite"
@@ -234,16 +227,12 @@ class AppController:
self.ui_project_git_dir: str = ""
self.ui_project_main_context: str = ""
self.ui_project_system_prompt: str = ""
self.ui_project_execution_mode: str = "native"
self.ui_gemini_cli_path: str = "gemini"
self.ui_word_wrap: bool = True
self.ui_summary_only: bool = False
self.ui_auto_add_history: bool = False
self.ui_active_tool_preset: str | None = None
self.ui_global_system_prompt: str = ""
self.ui_base_system_prompt: str = ""
self.ui_use_default_base_prompt: bool = True
self._show_base_prompt_diff_modal: bool = False
self.ui_project_context_marker: str = ""
self.ui_agent_tools: Dict[str, bool] = {}
self.available_models: List[str] = []
self.all_available_models: Dict[str, List[str]] = {} # provider -> list of models
@@ -253,8 +242,6 @@ class AppController:
self.ai_status: str = 'idle'
self.ai_response: str = ''
self.last_md: str = ''
self.last_aggregate_markdown: str = ''
self.last_resolved_system_prompt: str = ''
self.last_md_path: Optional[Path] = None
self.last_file_items: List[Any] = []
self.send_thread: Optional[threading.Thread] = None
@@ -264,7 +251,6 @@ class AppController:
self.show_text_viewer: bool = False
self.text_viewer_title: str = ''
self.text_viewer_content: str = ''
self.text_viewer_type: str = 'text'
self._pending_comms: List[Dict[str, Any]] = []
self._pending_tool_calls: List[Dict[str, Any]] = []
self._pending_history_adds: List[Dict[str, Any]] = []
@@ -347,8 +333,6 @@ class AppController:
'gcli_path': 'ui_gemini_cli_path',
'output_dir': 'ui_output_dir',
'files_base_dir': 'ui_files_base_dir',
'files': 'files',
'screenshots': 'screenshots',
'ai_status': 'ai_status',
'ai_response': 'ai_response',
'active_discussion': 'active_discussion',
@@ -360,24 +344,12 @@ class AppController:
'show_confirm_modal': 'show_confirm_modal',
'mma_epic_input': 'ui_epic_input',
'mma_status': 'mma_status',
'rag_status': 'rag_status',
'rag_enabled': 'rag_enabled',
'rag_source': 'rag_source',
'rag_emb_provider': 'rag_emb_provider',
'rag_mcp_server': 'rag_mcp_server',
'rag_mcp_tool': 'rag_mcp_tool',
'rag_chunk_size': 'rag_chunk_size',
'rag_chunk_overlap': 'rag_chunk_overlap',
'mcp_config_json': 'mcp_config_json',
'mma_active_tier': 'active_tier',
'ui_new_track_name': 'ui_new_track_name',
'ui_new_track_desc': 'ui_new_track_desc',
'manual_approve': 'ui_manual_approve',
'global_system_prompt': 'ui_global_system_prompt',
'project_system_prompt': 'ui_project_system_prompt',
'base_system_prompt': 'ui_base_system_prompt',
'use_default_base_prompt': 'ui_use_default_base_prompt',
'show_base_prompt_diff_modal': '_show_base_prompt_diff_modal',
'global_preset_name': 'ui_global_preset_name',
'project_preset_name': 'ui_project_preset_name',
'ui_active_tool_preset': 'ui_active_tool_preset',
@@ -402,14 +374,7 @@ class AppController:
'ui_separate_tier1': 'ui_separate_tier1',
'ui_separate_tier2': 'ui_separate_tier2',
'ui_separate_tier3': 'ui_separate_tier3',
'ui_separate_tier4': 'ui_separate_tier4',
'show_text_viewer': 'show_text_viewer',
'text_viewer_title': 'text_viewer_title',
'text_viewer_type': 'text_viewer_type',
'disc_entries': 'disc_entries',
'ui_file_paths': 'ui_file_paths',
'ui_auto_switch_layout': 'ui_auto_switch_layout',
'ui_tier_layout_bindings': 'ui_tier_layout_bindings'
'ui_separate_tier4': 'ui_separate_tier4'
}
self._gettable_fields = dict(self._settable_fields)
self._gettable_fields.update({
@@ -436,9 +401,6 @@ class AppController:
'bg_shader_enabled': 'bg_shader_enabled',
'global_system_prompt': 'ui_global_system_prompt',
'project_system_prompt': 'ui_project_system_prompt',
'base_system_prompt': 'ui_base_system_prompt',
'use_default_base_prompt': 'ui_use_default_base_prompt',
'show_base_prompt_diff_modal': '_show_base_prompt_diff_modal',
'global_preset_name': 'ui_global_preset_name',
'project_preset_name': 'ui_project_preset_name',
'ui_active_tool_preset': 'ui_active_tool_preset',
@@ -459,10 +421,7 @@ class AppController:
'ui_separate_tier1': 'ui_separate_tier1',
'ui_separate_tier2': 'ui_separate_tier2',
'ui_separate_tier3': 'ui_separate_tier3',
'ui_separate_tier4': 'ui_separate_tier4',
'show_text_viewer': 'show_text_viewer',
'text_viewer_title': 'text_viewer_title',
'text_viewer_type': 'text_viewer_type'
'ui_separate_tier4': 'ui_separate_tier4'
})
self.perf_monitor = performance_monitor.get_monitor()
self._perf_profiling_enabled = False
@@ -514,91 +473,6 @@ class AppController:
def thinking_indicator(self) -> bool:
return self.ai_status in ("sending...", "streaming...")
@property
def rag_enabled(self) -> bool:
return self.rag_config.enabled if self.rag_config else False
@rag_enabled.setter
def rag_enabled(self, value: bool) -> None:
if self.rag_config:
self.rag_config.enabled = value
self.rag_engine = rag_engine.RAGEngine(self.rag_config, self.active_project_root)
@property
def rag_source(self) -> str:
return self.rag_config.vector_store.provider if self.rag_config else 'mock'
@rag_source.setter
def rag_source(self, value: str) -> None:
if self.rag_config:
self.rag_config.vector_store.provider = value
if self.rag_engine: self.rag_engine = rag_engine.RAGEngine(self.rag_config, self.active_project_root)
@property
def rag_emb_provider(self) -> str:
return self.rag_config.embedding_provider if self.rag_config else 'gemini'
@rag_emb_provider.setter
def rag_emb_provider(self, value: str) -> None:
if self.rag_config:
self.rag_config.embedding_provider = value
if self.rag_engine: self.rag_engine = rag_engine.RAGEngine(self.rag_config, self.active_project_root)
if self.rag_engine: self.rag_engine = rag_engine.RAGEngine(self.rag_config, self.active_project_root)
@property
def rag_chunk_size(self) -> int:
return self.rag_config.chunk_size if self.rag_config else 1000
@rag_chunk_size.setter
def rag_chunk_size(self, value: int) -> None:
if self.rag_config: self.rag_config.chunk_size = value
@property
def rag_chunk_overlap(self) -> int:
return self.rag_config.chunk_overlap if self.rag_config else 200
@rag_chunk_overlap.setter
def rag_chunk_overlap(self, value: int) -> None:
if self.rag_config: self.rag_config.chunk_overlap = value
@property
def rag_mcp_server(self) -> str:
return self.rag_config.vector_store.mcp_server or "" if self.rag_config else ""
@rag_mcp_server.setter
def rag_mcp_server(self, value: str) -> None:
if self.rag_config: self.rag_config.vector_store.mcp_server = value
@property
def rag_mcp_tool(self) -> str:
return self.rag_config.vector_store.mcp_tool or "" if self.rag_config else ""
@rag_mcp_tool.setter
def rag_mcp_tool(self, value: str) -> None:
if self.rag_config: self.rag_config.vector_store.mcp_tool = value
@property
def mcp_config_json(self) -> str:
return json.dumps(self.mcp_config.to_dict()) if self.mcp_config else "{}"
@mcp_config_json.setter
def mcp_config_json(self, value: str) -> None:
try:
data = json.loads(value)
self.mcp_config = models.MCPConfiguration.from_dict(data)
except:
pass
@property
def ui_file_paths(self) -> list[str]:
return [f.path if hasattr(f, 'path') else str(f) for f in self.files]
@ui_file_paths.setter
def ui_file_paths(self, value: list[str]) -> None:
old_files = {f.path: f for f in self.files if hasattr(f, 'path')}
new_files = []
import time
now = time.time()
for p in value:
if p in old_files:
new_files.append(old_files[p])
else:
from src import models
new_files.append(models.FileItem(path=p, injected_at=now))
self.files = new_files
@property
def operations_live_indicator(self) -> bool:
return not self.is_viewing_prior_session
@@ -625,9 +499,6 @@ class AppController:
'btn_approve_mma_step': lambda: self._handle_mma_respond(approved=True),
'btn_approve_spawn': lambda: self._handle_mma_respond(approved=True),
'btn_prune_logs': self.cb_prune_logs,
'btn_reset_base_prompt': self._cb_reset_base_prompt,
'btn_show_base_prompt_diff': self._cb_show_base_prompt_diff,
'btn_rebuild_rag_index': self._rebuild_rag_index,
}
self._predefined_callbacks: dict[str, Callable[..., Any]] = {
'_test_callback_func_write_to_file': self._test_callback_func_write_to_file,
@@ -639,10 +510,7 @@ class AppController:
'_cb_save_tool_preset': self._cb_save_tool_preset,
'_cb_delete_tool_preset': self._cb_delete_tool_preset,
'_switch_project': self._switch_project,
'_refresh_from_project': self._refresh_from_project,
'save_workspace_profile': self._cb_save_workspace_profile,
'load_workspace_profile': self._cb_load_workspace_profile,
'delete_workspace_profile': self._cb_delete_workspace_profile,
'_refresh_from_project': self._refresh_from_project
}
def _update_gcli_adapter(self, path: str) -> None:
@@ -669,46 +537,6 @@ class AppController:
"payload": status
})
def _set_rag_status(self, status: str) -> None:
"""Thread-safe update of rag_status via the GUI task queue."""
with self._pending_gui_tasks_lock:
self._pending_gui_tasks.append({
"action": "set_value",
"item": "rag_status",
"value": status
})
def _rebuild_rag_index(self) -> None:
"""Background thread that re-indexes all files in the current project."""
if not self.rag_config or not self.rag_config.enabled or not self.rag_engine:
return
def _run():
try:
self._set_rag_status("indexing...")
import concurrent.futures
# 1. Incremental indexing of current files in parallel
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
futures = []
for f in self.files:
path = f.path if hasattr(f, "path") else str(f)
futures.append(executor.submit(self.rag_engine.index_file, path))
concurrent.futures.wait(futures)
# 2. Cleanup stale entries (files no longer tracked)
indexed_paths = self.rag_engine.get_all_indexed_paths()
current_paths = {f.path if hasattr(f, "path") else str(f) for f in self.files}
stale_paths = [p for p in indexed_paths if p not in current_paths]
if stale_paths:
self.rag_engine.delete_documents_by_path(stale_paths)
self._set_rag_status("ready")
except Exception as e:
self._set_rag_status(f"error: {e}")
threading.Thread(target=_run, daemon=True).start()
def _trigger_gui_refresh(self):
with self._pending_gui_tasks_lock:
self._pending_gui_tasks.append({'action': 'set_comms_dirty'})
@@ -782,6 +610,16 @@ class AppController:
self._token_stats_dirty = True
if not is_streaming:
self._autofocus_response_tab = True
# ONLY add to history when turn is complete
if self.ui_auto_add_history and not stream_id and not is_streaming:
role = payload.get("role", "AI")
with self._pending_history_adds_lock:
self._pending_history_adds.append({
"role": role,
"content": self.ai_response,
"collapsed": True,
"ts": project_manager.now_ts()
})
elif action in ("mma_stream", "mma_stream_append"):
# Some events might have these at top level, some in a 'payload' dict
stream_id = task.get("stream_id") or task.get("payload", {}).get("stream_id")
@@ -803,18 +641,8 @@ class AppController:
sys.stderr.flush()
self.mma_status = p.get("status", self.mma_status)
old_tier = self.active_tier
self.active_tier = p.get("active_tier", self.active_tier)
if getattr(self, "ui_auto_switch_layout", False) and self.active_tier and self.active_tier != old_tier:
for tier_prefix in ["Tier 1", "Tier 2", "Tier 3", "Tier 4"]:
if self.active_tier.startswith(tier_prefix):
bound_profile = getattr(self, "ui_tier_layout_bindings", {}).get(tier_prefix)
if bound_profile:
self._cb_load_workspace_profile(bound_profile)
break
# Preserve existing model/provider config if not explicitly in payload
new_usage = p.get("tier_usage", {})
for tier, data in new_usage.items():
@@ -955,25 +783,6 @@ class AppController:
elapsed = end_time - start_time
self._completed_ticket_count += 1
self._avg_ticket_time = ((self._avg_ticket_time * (self._completed_ticket_count - 1)) + elapsed) / self._completed_ticket_count
elif action == "bead_updated":
payload = task.get("payload", {})
bid = payload.get("bead_id")
status = payload.get("status")
if bid and status:
stream_id = "Tier 2"
msg = f"\n[BEAD UPDATE] {bid} -> status: {status}\n"
if stream_id not in self.mma_streams:
self.mma_streams[stream_id] = ""
self.mma_streams[stream_id] += msg
elif action == "bead_updated":
payload = task.get("payload", {})
bead_id = payload.get("bead_id")
status = payload.get("status")
stream_id = "Tier 2 (Tech Lead)"
if stream_id not in self.mma_streams:
self.mma_streams[stream_id] = ""
self.mma_streams[stream_id] += f"[BEAD UPDATE] {bead_id} -> status: {status}\n"
except Exception as e:
import traceback
sys.stderr.write(f"[DEBUG] Error executing GUI task: {e}\n{traceback.format_exc()}\n")
@@ -1074,8 +883,6 @@ class AppController:
self.project_paths = list(projects_cfg.get("paths", []))
self.active_project_path = projects_cfg.get("active", "")
self._load_active_project()
self.workspace_manager = workspace_manager.WorkspaceManager(project_root=Path(self.active_project_path).parent if self.active_project_path else None)
self.workspace_profiles = self.workspace_manager.load_all_profiles()
# Deserialize FileItems in files.paths
raw_paths = self.project.get("files", {}).get("paths", [])
self.files = []
@@ -1105,11 +912,9 @@ class AppController:
self.ui_gemini_cli_path = self.project.get("gemini_cli", {}).get("binary_path", "gemini")
self._update_gcli_adapter(self.ui_gemini_cli_path)
self.ui_word_wrap = proj_meta.get("word_wrap", True)
self.ui_summary_only = proj_meta.get("summary_only", False)
self.ui_auto_add_history = disc_sec.get("auto_add", False)
self.ui_global_system_prompt = self.config.get("ai", {}).get("system_prompt", "")
self.ui_base_system_prompt = self.config.get("ai", {}).get("base_system_prompt", "")
self.ui_use_default_base_prompt = self.config.get("ai", {}).get("use_default_base_prompt", True)
self.ui_project_context_marker = proj_meta.get("context_marker", "")
self.preset_manager = presets.PresetManager(Path(self.active_project_path).parent if self.active_project_path else None)
self.presets = self.preset_manager.load_all()
@@ -1129,16 +934,6 @@ class AppController:
else:
self.mcp_config = models.MCPConfiguration()
rag_data = self.config.get('rag')
if rag_data:
self.rag_config = models.RAGConfig.from_dict(rag_data)
else:
self.rag_config = models.RAGConfig()
self.rag_engine = rag_engine.RAGEngine(self.rag_config, self.active_project_root)
if self.rag_config.enabled and self.rag_engine.is_empty():
self._rebuild_rag_index()
from src.personas import PersonaManager
self.persona_manager = PersonaManager(Path(self.active_project_path).parent if self.active_project_path else None)
self.personas = self.persona_manager.load_all()
@@ -1153,13 +948,11 @@ class AppController:
self.ui_project_preset_name = proj_meta.get("active_preset")
gui_cfg = self.config.get("gui", {})
self.ui_auto_switch_layout = gui_cfg.get("auto_switch_layout", False)
self.ui_tier_layout_bindings = gui_cfg.get("tier_layout_bindings", {"Tier 1": "", "Tier 2": "", "Tier 3": "", "Tier 4": ""})
from src import bg_shader
bg_shader.get_bg().enabled = gui_cfg.get("bg_shader_enabled", False)
_default_windows = {
"Project Settings": True,
"Context Hub": True,
"Files & Media": True,
"AI Settings": True,
"MMA Dashboard": True,
@@ -1612,14 +1405,10 @@ class AppController:
# Clear response area for new turn
self.ai_response = ""
csp = filter(bool, [self.ui_global_system_prompt.strip(), self.ui_project_system_prompt.strip()])
custom_prompt = "\n\n".join(csp)
ai_client.set_custom_system_prompt(custom_prompt)
ai_client.set_base_system_prompt(self.ui_base_system_prompt)
ai_client.set_use_default_base_prompt(self.ui_use_default_base_prompt)
ai_client.set_project_context_marker(self.ui_project_context_marker)
self.last_resolved_system_prompt = ai_client.get_combined_system_prompt()
ai_client.set_custom_system_prompt("\n\n".join(csp))
ai_client.set_model_params(self.temperature, self.max_tokens, self.history_trunc_limit, self.top_p)
ai_client.set_agent_tools(self.ui_agent_tools) # Force update adapter path right before send to bypass potential duplication issues
ai_client.set_agent_tools(self.ui_agent_tools)
# Force update adapter path right before send to bypass potential duplication issues
self._update_gcli_adapter(self.ui_gemini_cli_path)
sys.stderr.write(f"[DEBUG] Calling ai_client.send with provider={ai_client.get_provider()}, model={self.current_model}, gcli_path={self.ui_gemini_cli_path}\n")
sys.stderr.flush()
@@ -1634,8 +1423,7 @@ class AppController:
stream_callback=lambda text: self._on_ai_stream(text),
pre_tool_callback=self._confirm_and_run,
qa_callback=ai_client.run_tier4_analysis,
patch_callback=ai_client.run_tier4_patch_callback,
rag_engine=self.rag_engine
patch_callback=ai_client.run_tier4_patch_callback
)
self.event_queue.put("response", {"text": resp, "status": "done", "role": "AI"})
except ai_client.ProviderError as e:
@@ -1679,24 +1467,11 @@ class AppController:
if kind == "response" and "usage" in payload:
u = payload["usage"]
inp = u.get("input_tokens") or u.get("prompt_tokens") or 0
out = u.get("output_tokens") or u.get("completion_tokens") or 0
cache_read = u.get("cache_read_input_tokens") or 0
cache_create = u.get("cache_creation_input_tokens") or 0
total = u.get("total_tokens") or 0
# Store normalized usage back in payload for history rendering
u["input_tokens"] = inp
u["output_tokens"] = out
u["cache_read_input_tokens"] = cache_read
self.session_usage["input_tokens"] += inp
self.session_usage["output_tokens"] += out
self.session_usage["cache_read_input_tokens"] += cache_read
self.session_usage["cache_creation_input_tokens"] += cache_create
self.session_usage["total_tokens"] += total
input_t = u.get("input_tokens") or 0
output_t = u.get("output_tokens") or 0
for k in ["input_tokens", "output_tokens", "cache_read_input_tokens", "cache_creation_input_tokens", "total_tokens"]:
if k in u:
self.session_usage[k] += u.get(k, 0) or 0
input_t = u.get("input_tokens", 0)
output_t = u.get("output_tokens", 0)
model = payload.get("model", "unknown")
self._token_history.append({
"time": time.time(),
@@ -1715,42 +1490,22 @@ class AppController:
"ts": entry.get("ts", project_manager.now_ts())
})
if kind == "response":
if self.ui_auto_add_history:
role = payload.get("role", "AI")
text_content = payload.get("text", "")
if text_content.strip():
segments, parsed_response = thinking_parser.parse_thinking_trace(text_content)
entry_obj = {
if kind in ("tool_result", "tool_call"):
role = "Tool" if kind == "tool_result" else "Vendor API"
content = ""
if kind == "tool_result":
content = payload.get("output", "")
else:
content = payload.get("script") or payload.get("args") or payload.get("message", "")
if isinstance(content, dict):
content = json.dumps(content, indent=1)
with self._pending_history_adds_lock:
self._pending_history_adds.append({
"role": role,
"content": parsed_response.strip() if parsed_response else "",
"content": f"[{kind.upper().replace('_', ' ')}]\n{content}",
"collapsed": True,
"ts": entry.get("ts", project_manager.now_ts())
}
if segments:
entry_obj["thinking_segments"] = [{"content": s.content, "marker": s.marker} for s in segments]
if entry_obj["content"] or segments:
with self._pending_history_adds_lock:
self._pending_history_adds.append(entry_obj)
if kind in ("tool_result", "tool_call"):
if self.ui_auto_add_history:
role = "Tool" if kind == "tool_result" else "Vendor API"
content = ""
if kind == "tool_result":
content = payload.get("output", "")
else:
content = payload.get("script") or payload.get("args") or payload.get("message", "")
if isinstance(content, dict):
content = json.dumps(content, indent=1)
with self._pending_history_adds_lock:
self._pending_history_adds.append({
"role": role,
"content": f"[{kind.upper().replace('_', ' ')}]\n{content}",
"collapsed": True,
"ts": entry.get("ts", project_manager.now_ts())
})
})
if kind == "history_add":
payload = entry.get("payload", {})
with self._pending_history_adds_lock:
@@ -2037,9 +1792,6 @@ class AppController:
base_dir = self.active_project_root
csp = filter(bool, [self.ui_global_system_prompt.strip(), self.ui_project_system_prompt.strip()])
ai_client.set_custom_system_prompt("\n\n".join(csp))
ai_client.set_base_system_prompt(self.ui_base_system_prompt)
ai_client.set_use_default_base_prompt(self.ui_use_default_base_prompt)
ai_client.set_project_context_marker(self.ui_project_context_marker)
temp = req.temperature if req.temperature is not None else self.temperature
top_p = req.top_p if req.top_p is not None else self.top_p
tokens = req.max_tokens if req.max_tokens is not None else self.max_tokens
@@ -2054,7 +1806,7 @@ class AppController:
"ts": project_manager.now_ts()
})
try:
resp = ai_client.send(stable_md, user_msg, base_dir, self.last_file_items, disc_text, rag_engine=self.rag_engine)
resp = ai_client.send(stable_md, user_msg, base_dir, self.last_file_items, disc_text)
if req.auto_add_history:
with self._pending_history_adds_lock:
self._pending_history_adds.append({
@@ -2173,18 +1925,6 @@ class AppController:
models.save_config(self.config)
self._set_status("config saved")
def _cb_reset_base_prompt(self, user_data=None) -> None:
self.ui_base_system_prompt = ai_client._SYSTEM_PROMPT
self.ui_use_default_base_prompt = False
def _cb_clear_summary_cache(self, user_data=None) -> None:
from src import summarize
summarize._summary_cache.clear()
self._push_mma_state_update()
def _cb_show_base_prompt_diff(self, user_data=None) -> None:
self._show_base_prompt_diff_modal = True
def _cb_disc_create(self) -> None:
nm = self.ui_disc_new_name_input.strip()
if nm:
@@ -2211,17 +1951,7 @@ class AppController:
self._set_status(f"switched to: {Path(path).stem}")
def _refresh_from_project(self) -> None:
# Deserialize FileItems in files.paths
raw_paths = self.project.get("files", {}).get("paths", [])
self.files = []
for p in raw_paths:
if isinstance(p, models.FileItem):
self.files.append(p)
elif isinstance(p, dict):
self.files.append(models.FileItem.from_dict(p))
else:
self.files.append(models.FileItem(path=str(p)))
self.files = list(self.project.get("files", {}).get("paths", []))
self.screenshots = list(self.project.get("screenshots", {}).get("paths", []))
disc_sec = self.project.get("discussion", {})
self.disc_roles = list(disc_sec.get("roles", ["User", "AI", "Vendor API", "System"]))
@@ -2243,6 +1973,7 @@ class AppController:
self.ui_auto_scroll_comms = proj.get("project", {}).get("auto_scroll_comms", True)
self.ui_auto_scroll_tool_calls = proj.get("project", {}).get("auto_scroll_tool_calls", True)
self.ui_word_wrap = proj.get("project", {}).get("word_wrap", True)
self.ui_summary_only = proj.get("project", {}).get("summary_only", False)
agent_tools_cfg = proj.get("agent", {}).get("tools", {})
self.ui_agent_tools = {t: agent_tools_cfg.get(t, True) for t in models.AGENT_TOOL_NAMES}
# MMA Tracks
@@ -2287,29 +2018,6 @@ class AppController:
self.tool_presets = self.tool_preset_manager.load_all_presets()
self.bias_profiles = self.tool_preset_manager.load_all_bias_profiles()
if self.rag_config and self.rag_config.enabled:
self._rebuild_rag_index()
def _cb_save_workspace_profile(self, name: str, scope: str = 'project') -> None:
if not hasattr(self, '_app') or not self._app:
return
profile = self._app._capture_workspace_profile(name)
self.workspace_manager.save_profile(profile, scope=scope)
self.workspace_profiles = self.workspace_manager.load_all_profiles()
self._app.workspace_profiles = self.workspace_profiles
def _cb_delete_workspace_profile(self, name: str, scope: str = 'project') -> None:
self.workspace_manager.delete_profile(name, scope=scope)
self.workspace_profiles = self.workspace_manager.load_all_profiles()
if hasattr(self, '_app') and self._app:
self._app.workspace_profiles = self.workspace_profiles
def _cb_load_workspace_profile(self, name: str) -> None:
if name in self.workspace_profiles:
profile = self.workspace_profiles[name]
if hasattr(self, '_app') and self._app:
self._app._apply_workspace_profile(profile)
def _apply_preset(self, name: str, scope: str) -> None:
print(f"[DEBUG] _apply_preset: name={name}, scope={scope}")
if name == "None":
@@ -2387,8 +2095,8 @@ class AppController:
description=state.metadata.name,
tickets=tickets
)
# Keep dicts for UI table
self._load_active_tickets()
# Keep dicts for UI table (or convert models.Ticket objects back to dicts if needed)
self.active_tickets = [asdict(t) if not isinstance(t, dict) else t for t in tickets]
# Load track-scoped history
history = project_manager.load_track_history(track_id, self.active_project_root)
with self._disc_entries_lock:
@@ -2450,20 +2158,6 @@ class AppController:
discussions[name] = project_manager.default_discussion()
self._switch_discussion(name)
def _branch_discussion(self, index: int) -> None:
self._flush_disc_entries_to_project()
# Generate a unique branch name
base_name = self.active_discussion.split("_take_")[0]
counter = 1
new_name = f"{base_name}_take_{counter}"
disc_sec = self.project.get("discussion", {})
discussions = disc_sec.get("discussions", {})
while new_name in discussions:
counter += 1
new_name = f"{base_name}_take_{counter}"
project_manager.branch_discussion(self.project, self.active_discussion, new_name, index)
self._switch_discussion(new_name)
def _rename_discussion(self, old_name: str, new_name: str) -> None:
disc_sec = self.project.get("discussion", {})
discussions = disc_sec.get("discussions", {})
@@ -2609,16 +2303,6 @@ class AppController:
self._set_status("sending...")
user_msg = self.ui_ai_input
# RAG Retrieval
if self.rag_engine and self.rag_config and self.rag_config.enabled:
chunks = self.rag_engine.search(user_msg)
if chunks:
context_block = "## Retrieved Context\n\n"
for i, chunk in enumerate(chunks):
path = chunk.get("metadata", {}).get("path", "unknown")
context_block += f"### Chunk {i+1} (Source: {path})\n{chunk.get('document', '')}\n\n"
user_msg = context_block + user_msg
symbols = parse_symbols(user_msg)
file_paths = [f['path'] for f in file_items]
for symbol in symbols:
@@ -2727,6 +2411,7 @@ class AppController:
proj["project"]["main_context"] = self.ui_project_main_context
proj["project"]["active_preset"] = self.ui_project_preset_name
proj["project"]["word_wrap"] = self.ui_word_wrap
proj["project"]["summary_only"] = self.ui_summary_only
proj["project"]["auto_scroll_comms"] = self.ui_auto_scroll_comms
proj["project"]["auto_scroll_tool_calls"] = self.ui_auto_scroll_tool_calls
proj.setdefault("gemini_cli", {})["binary_path"] = self.ui_gemini_cli_path
@@ -2761,12 +2446,6 @@ class AppController:
"active_preset": self.ui_global_preset_name,
}
self.config["ai"]["system_prompt"] = self.ui_global_system_prompt
self.config["ai"]["base_system_prompt"] = self.ui_base_system_prompt
self.config["ai"]["use_default_base_prompt"] = self.ui_use_default_base_prompt
if self.rag_config:
self.config["rag"] = self.rag_config.to_dict()
self.config["projects"] = {"paths": self.project_paths, "active": self.active_project_path}
from src import bg_shader
# Update gui section while preserving other keys like bg_shader_enabled
@@ -2797,28 +2476,15 @@ class AppController:
models.save_config(self.config)
track_id = self.active_track.id if self.active_track else None
flat = project_manager.flat_config(self.project, self.active_discussion, track_id=track_id)
persona = self.personas.get(self.ui_active_persona)
strategy = persona.aggregation_strategy if persona else "auto"
full_md, path, file_items = aggregate.run(flat, aggregation_strategy=strategy)
full_md, path, file_items = aggregate.run(flat)
# Build stable markdown (no history) for Gemini caching
screenshot_base_dir = Path(flat.get("screenshots", {}).get("base_dir", "."))
screenshots = flat.get("screenshots", {}).get("paths", [])
summary_only = flat.get("project", {}).get("summary_only", False)
stable_md = aggregate.build_markdown_no_history(file_items, screenshot_base_dir, screenshots, summary_only=summary_only, aggregation_strategy=strategy)
stable_md = aggregate.build_markdown_no_history(file_items, screenshot_base_dir, screenshots, summary_only=summary_only)
# Build discussion history text separately
history = flat.get("discussion", {}).get("history", [])
discussion_text = aggregate.build_discussion_text(history)
csp = filter(bool, [self.ui_global_system_prompt.strip(), self.ui_project_system_prompt.strip()])
ai_client.set_custom_system_prompt("\n\n".join(csp))
ai_client.set_base_system_prompt(self.ui_base_system_prompt)
ai_client.set_use_default_base_prompt(self.ui_use_default_base_prompt)
ai_client.set_project_context_marker(self.ui_project_context_marker)
self.last_resolved_system_prompt = ai_client.get_combined_system_prompt()
self.last_aggregate_markdown = full_md
return full_md, path, file_items, stable_md, discussion_text
def _cb_plan_epic(self) -> None:
@@ -3058,18 +2724,6 @@ class AppController:
self.files.append(item)
self._refresh_from_project()
def approve_ticket(self, ticket_id: str) -> None:
"""Manually approves a ticket for execution."""
if self.engine and self.engine.engine:
self.engine.engine.approve_task(ticket_id)
else:
# Fallback if engine not running
for t in self.active_tickets:
if t.get('id') == ticket_id:
t['status'] = 'in_progress'
break
self._push_mma_state_update()
def mutate_dag(self, data: dict) -> None:
"""Modifies task dependencies."""
ticket_id = data.get("ticket_id")
@@ -3163,26 +2817,3 @@ class AppController:
)
project_manager.save_track_state(self.active_track.id, state, self.active_project_root)
def _load_active_tickets(self) -> None:
"""Populates self.active_tickets based on the current execution mode."""
if getattr(self, "ui_project_execution_mode", "native") == "beads":
from src import beads_client
bclient = beads_client.BeadsClient(Path(self.active_project_root))
beads = bclient.list_beads()
self.active_tickets = []
for b in beads:
self.active_tickets.append({
"id": b.id,
"title": b.title,
"description": b.description,
"status": b.status,
"assigned_to": "tier3-worker",
"target_file": "",
"depends_on": []
})
else:
if self.active_track:
self.active_tickets = [asdict(t) if not isinstance(t, dict) else t for t in self.active_track.tickets]
else:
self.active_tickets = []
-58
View File
@@ -1,58 +0,0 @@
from dataclasses import dataclass
from typing import List, Optional
from pathlib import Path
import json
@dataclass
class Bead:
id: str
title: str
description: str
status: str = "active"
class BeadsClient:
def __init__(self, working_dir: Path):
self.working_dir = Path(working_dir)
self.repo_dir = self.working_dir / ".beads_mock"
self.beads_file = self.repo_dir / "beads.json"
def init_repo(self) -> None:
"""Initialize the mock repository."""
self.repo_dir.mkdir(parents=True, exist_ok=True)
if not self.beads_file.exists():
self.beads_file.write_text("[]", encoding="utf-8")
def is_initialized(self) -> bool:
"""Check if the repository is initialized."""
return self.beads_file.exists()
def create_bead(self, title: str, description: str) -> str:
"""Create a new bead and return its ID."""
beads = self._read_beads()
bead_id = f"bead-{len(beads) + 1}"
bead = {"id": bead_id, "title": title, "description": description, "status": "active"}
beads.append(bead)
self._write_beads(beads)
return bead_id
def update_bead(self, bead_id: str, status: str) -> bool:
"""Update the status of an existing bead."""
beads = self._read_beads()
for bead in beads:
if bead["id"] == bead_id:
bead["status"] = status
self._write_beads(beads)
return True
return False
def list_beads(self) -> List[Bead]:
"""List all beads."""
return [Bead(**b) for b in self._read_beads()]
def _read_beads(self) -> List[dict]:
if not self.beads_file.exists():
return []
return json.loads(self.beads_file.read_text(encoding="utf-8"))
def _write_beads(self, beads: List[dict]) -> None:
self.beads_file.write_text(json.dumps(beads, indent=1), encoding="utf-8")
+76 -87
View File
@@ -28,7 +28,6 @@ See Also:
"""
from typing import List
from src.models import Ticket
from src.performance_monitor import get_monitor
class TrackDAG:
"""
@@ -48,37 +47,19 @@ class TrackDAG:
def cascade_blocks(self) -> None:
"""
Transitively marks `todo` tickets as `blocked` if any dependency is `blocked`.
Propagates 'blocked' status from initially blocked nodes to their dependents.
Runs until stable (handles multi-hop chains: ABC where A blocked cascades to B then C).
"""
with get_monitor().scope("dag_cascade_blocks"):
# Build adjacency list of dependents using object references to avoid lookups
dependents = {t.id: [] for t in self.tickets}
for t in self.tickets:
for dep_id in t.depends_on:
if dep_id in dependents:
dependents[dep_id].append(t)
# Use a queue-based propagation (BFS) from all currently blocked tickets
queue = [t for t in self.tickets if t.status == 'blocked']
idx = 0
while idx < len(queue):
curr = queue[idx]
idx += 1
for dep_ticket in dependents.get(curr.id, []):
if dep_ticket.status == 'todo':
dep_ticket.status = 'blocked'
# Optional: preserve the reason for blocking
if not dep_ticket.blocked_reason:
dep_ticket.blocked_reason = f"Dependency {curr.id} is blocked."
queue.append(dep_ticket)
def is_ticket_ready(self, ticket: Ticket) -> bool:
"""Returns True if all dependencies of the ticket are completed."""
for dep_id in ticket.depends_on:
dep = self.ticket_map.get(dep_id)
if not dep or dep.status != 'completed':
return False
return True
changed = True
while changed:
changed = False
for ticket in self.tickets:
if ticket.status == 'todo':
for dep_id in ticket.depends_on:
dep = self.ticket_map.get(dep_id)
if dep and dep.status == 'blocked':
ticket.status = 'blocked'
changed = True
break
def get_ready_tasks(self) -> List[Ticket]:
"""
@@ -88,74 +69,74 @@ class TrackDAG:
"""
ready = []
for ticket in self.tickets:
if ticket.status == 'todo' and self.is_ticket_ready(ticket):
ready.append(ticket)
if ticket.status == 'todo':
# Check if all dependencies exist and are completed
all_done = True
for dep_id in ticket.depends_on:
dep = self.ticket_map.get(dep_id)
if not dep or dep.status != 'completed':
all_done = False
break
if all_done:
ready.append(ticket)
return ready
def has_cycle(self) -> bool:
"""
Performs an iterative Depth-First Search to detect cycles in the dependency graph.
Performs a Depth-First Search to detect cycles in the dependency graph.
Returns:
True if a cycle is detected, False otherwise.
"""
with get_monitor().scope("dag_has_cycle"):
visited = set()
for start_ticket in self.tickets:
if start_ticket.id in visited:
continue
stack = [(start_ticket.id, False)] # (id, is_backtracking)
path = set()
while stack:
node_id, is_backtracking = stack.pop()
if is_backtracking:
path.remove(node_id)
continue
if node_id in path:
visited = set()
rec_stack = set()
def is_cyclic(ticket_id: str) -> bool:
"""Internal recursive helper for cycle detection."""
if ticket_id in rec_stack:
return True
if ticket_id in visited:
return False
visited.add(ticket_id)
rec_stack.add(ticket_id)
ticket = self.ticket_map.get(ticket_id)
if ticket:
for neighbor in ticket.depends_on:
if is_cyclic(neighbor):
return True
if node_id in visited:
continue
visited.add(node_id)
path.add(node_id)
stack.append((node_id, True))
ticket = self.ticket_map.get(node_id)
if ticket:
for neighbor_id in ticket.depends_on:
stack.append((neighbor_id, False))
rec_stack.remove(ticket_id)
return False
for ticket in self.tickets:
if ticket.id not in visited:
if is_cyclic(ticket.id):
return True
return False
def topological_sort(self) -> List[str]:
"""
Returns a list of ticket IDs in topological order (dependencies before dependents).
Uses Kahn's algorithm for efficient O(V+E) sorting and cycle detection.
Returns:
A list of ticket ID strings.
Raises:
ValueError: If a dependency cycle is detected.
"""
with get_monitor().scope("dag_topological_sort"):
in_degree = {t.id: len(t.depends_on) for t in self.tickets}
dependents = {t.id: [] for t in self.tickets}
for t in self.tickets:
for dep_id in t.depends_on:
if dep_id in dependents:
dependents[dep_id].append(t.id)
# Queue starts with nodes having no dependencies
queue = [t.id for t in self.tickets if in_degree[t.id] == 0]
result = []
idx = 0
while idx < len(queue):
u = queue[idx]
idx += 1
result.append(u)
for v_id in dependents.get(u, []):
in_degree[v_id] -= 1
if in_degree[v_id] == 0:
queue.append(v_id)
if len(result) < len(self.tickets):
raise ValueError("Dependency cycle detected")
return result
if self.has_cycle():
raise ValueError("Dependency cycle detected")
visited = set()
stack = []
def visit(ticket_id: str) -> None:
"""Internal recursive helper for topological sorting."""
if ticket_id in visited:
return
visited.add(ticket_id)
ticket = self.ticket_map.get(ticket_id)
if ticket:
for dep_id in ticket.depends_on:
visit(dep_id)
stack.append(ticket_id)
for ticket in self.tickets:
visit(ticket.id)
return stack
class ExecutionEngine:
"""
@@ -180,10 +161,9 @@ class ExecutionEngine:
Returns:
A list of ready Ticket objects.
"""
with get_monitor().scope("dag_tick"):
self.dag.cascade_blocks()
ready = self.dag.get_ready_tasks()
return ready
self.dag.cascade_blocks()
ready = self.dag.get_ready_tasks()
return ready
def approve_task(self, task_id: str) -> None:
"""
@@ -192,8 +172,16 @@ class ExecutionEngine:
task_id: The ID of the task to approve.
"""
ticket = self.dag.ticket_map.get(task_id)
if ticket and ticket.status == "todo" and self.dag.is_ticket_ready(ticket):
ticket.status = "in_progress"
if ticket and ticket.status == "todo":
# Check if dependencies are met first
all_done = True
for dep_id in ticket.depends_on:
dep = self.dag.ticket_map.get(dep_id)
if not dep or dep.status != "completed":
all_done = False
break
if all_done:
ticket.status = "in_progress"
def update_task_status(self, task_id: str, status: str) -> None:
"""
@@ -205,3 +193,4 @@ class ExecutionEngine:
ticket = self.dag.ticket_map.get(task_id)
if ticket:
ticket.status = status
+3 -20
View File
@@ -91,14 +91,7 @@ class AsyncEventQueue:
"""
self._queue.put((event_name, payload))
if self.websocket_server:
# Ensure payload is JSON serializable for websocket broadcast
serializable_payload = payload
if hasattr(payload, 'to_dict'):
serializable_payload = payload.to_dict()
elif hasattr(payload, '__dict__'):
serializable_payload = vars(payload)
self.websocket_server.broadcast("events", {"event": event_name, "payload": serializable_payload})
self.websocket_server.broadcast("events", {"event": event_name, "payload": payload})
def get(self) -> Tuple[str, Any]:
"""
@@ -142,20 +135,10 @@ class UserRequestEvent:
self.base_dir = base_dir
def to_dict(self) -> Dict[str, Any]:
# Ensure all file items and base_dir are JSON serializable
serializable_files = []
for f in self.file_items:
if hasattr(f, 'to_dict'):
serializable_files.append(f.to_dict())
elif isinstance(f, (str, dict, list, int, float, bool, type(None))):
serializable_files.append(f)
else:
serializable_files.append(str(f))
return {
"prompt": self.prompt,
"stable_md": self.stable_md,
"file_items": serializable_files,
"file_items": self.file_items,
"disc_text": self.disc_text,
"base_dir": str(self.base_dir)
"base_dir": self.base_dir
}
+27 -328
View File
@@ -38,8 +38,6 @@ from pathlib import Path
from typing import Optional, Any, List, Tuple, Dict
import tree_sitter
import tree_sitter_python
import tree_sitter_cpp
import tree_sitter_c
import re
_ast_cache: Dict[str, Tuple[float, tree_sitter.Tree]] = {}
@@ -51,16 +49,11 @@ class ASTParser:
"""
def __init__(self, language: str) -> None:
if language not in ("python", "cpp", "c"):
if language != "python":
raise ValueError(f"Language '{language}' not supported yet.")
self.language_name = language
# Load the tree-sitter language grammar
if language == "python":
self.language = tree_sitter.Language(tree_sitter_python.language())
elif language == "cpp":
self.language = tree_sitter.Language(tree_sitter_cpp.language())
elif language == "c":
self.language = tree_sitter.Language(tree_sitter_c.language())
self.language = tree_sitter.Language(tree_sitter_python.language())
self.parser = tree_sitter.Parser(self.language)
def parse(self, code: str) -> tree_sitter.Tree:
@@ -94,44 +87,10 @@ class ASTParser:
_ast_cache[path] = (mtime, tree)
return tree
def _get_name(self, node: tree_sitter.Node, code_bytes: bytes) -> str:
name_node = node.child_by_field_name("name")
if name_node:
return code_bytes[name_node.start_byte:name_node.end_byte].decode("utf8", errors="replace")
if node.type in ("function_definition", "field_declaration"):
def find_id(n: tree_sitter.Node) -> str:
if n.type in ("identifier", "field_identifier", "qualified_identifier", "destructor_name"):
return code_bytes[n.start_byte:n.end_byte].decode("utf8", errors="replace")
# Try field name 'declarator' first
d = n.child_by_field_name("declarator")
if d:
res = find_id(d)
if res: return res
# Fallback to all children
for child in n.children:
if child.type == "compound_statement": continue # Don't look in body
res = find_id(child)
if res: return res
return ""
return find_id(node)
if node.type == "template_declaration":
for child in node.children:
if child.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier", "enum_specifier", "enum_definition", "field_declaration"):
return self._get_name(child, code_bytes)
if node.type in ("struct_specifier", "class_specifier", "class_definition", "enum_specifier", "enum_definition", "namespace_definition"):
for child in node.children:
if child.type in ("type_identifier", "identifier", "namespace_identifier"):
return code_bytes[child.start_byte:child.end_byte].decode("utf8", errors="replace")
return ""
def get_skeleton(self, code: str, path: Optional[str] = None) -> str:
"""
Returns a skeleton of a Python file (preserving docstrings, stripping function bodies).
"""
code_bytes = code.encode("utf8")
tree = self.get_cached_tree(path, code)
edits: List[Tuple[int, int, str]] = []
@@ -144,37 +103,31 @@ class ASTParser:
def walk(node: tree_sitter.Node) -> None:
if node.type == "function_definition":
body = node.child_by_field_name("body")
if body and body.type in ("block", "compound_statement"):
if body and body.type == "block":
indent = " " * body.start_point.column
first_stmt = None
for child in body.children:
if child.type != "comment":
first_stmt = child
break
initializer = None
for child in node.children:
if child.type == "field_initializer_list":
initializer = child
break
if first_stmt and is_docstring(first_stmt):
start_byte = first_stmt.end_byte
end_byte = body.end_byte
if end_byte > start_byte:
edits.append((start_byte, end_byte, f"\n{indent}..."))
else:
start_byte = initializer.start_byte if initializer else body.start_byte
start_byte = body.start_byte
end_byte = body.end_byte
repl = "..."
edits.append((start_byte, end_byte, repl))
edits.append((start_byte, end_byte, "..."))
for child in node.children:
walk(child)
walk(tree.root_node)
# Apply edits in reverse to maintain byte offsets
edits.sort(key=lambda x: x[0], reverse=True)
code_bytearray = bytearray(code_bytes)
code_bytes = bytearray(code, "utf8")
for start, end, replacement in edits:
code_bytearray[start:end] = bytes(replacement, "utf8")
return code_bytearray.decode("utf8")
code_bytes[start:end] = bytes(replacement, "utf8")
return code_bytes.decode("utf8")
def get_curated_view(self, code: str, path: Optional[str] = None) -> str:
"""
@@ -182,7 +135,6 @@ class ASTParser:
Preserves function bodies if they have @core_logic decorator or # [HOT] comment.
Otherwise strips bodies but preserves docstrings.
"""
code_bytes = code.encode("utf8")
tree = self.get_cached_tree(path, code)
edits: List[Tuple[int, int, str]] = []
@@ -199,7 +151,7 @@ class ASTParser:
for child in parent.children:
if child.type == "decorator":
# decorator -> ( '@', identifier ) or ( '@', call )
if b"@core_logic" in code_bytes[child.start_byte:child.end_byte]:
if "@core_logic" in code[child.start_byte:child.end_byte]:
return True
return False
@@ -209,8 +161,8 @@ class ASTParser:
while stack:
curr = stack.pop()
if curr.type == "comment":
comment_bytes = code_bytes[curr.start_byte:curr.end_byte]
if b"[HOT]" in comment_bytes:
comment_text = code[curr.start_byte:curr.end_byte]
if "[HOT]" in comment_text:
return True
for child in curr.children:
stack.append(child)
@@ -219,7 +171,7 @@ class ASTParser:
def walk(node: tree_sitter.Node) -> None:
if node.type == "function_definition":
body = node.child_by_field_name("body")
if body and body.type in ("block", "compound_statement"):
if body and body.type == "block":
# Check if we should preserve it
preserve = has_core_logic_decorator(node) or has_hot_comment(node)
if not preserve:
@@ -243,17 +195,16 @@ class ASTParser:
walk(tree.root_node)
# Apply edits in reverse to maintain byte offsets
edits.sort(key=lambda x: x[0], reverse=True)
code_bytearray = bytearray(code_bytes)
code_bytes = bytearray(code, "utf8")
for start, end, replacement in edits:
code_bytearray[start:end] = bytes(replacement, "utf8")
return code_bytearray.decode("utf8")
code_bytes[start:end] = bytes(replacement, "utf8")
return code_bytes.decode("utf8")
def get_targeted_view(self, code: str, function_names: List[str], path: Optional[str] = None) -> str:
"""
Returns a targeted view of the code including only the specified functions
and their dependencies up to depth 2.
"""
code_bytes = code.encode("utf8")
tree = self.get_cached_tree(path, code)
all_functions = {}
@@ -261,13 +212,13 @@ class ASTParser:
if node.type == "function_definition":
name_node = node.child_by_field_name("name")
if name_node:
func_name = code_bytes[name_node.start_byte:name_node.end_byte].decode("utf8", errors="replace")
func_name = code[name_node.start_byte:name_node.end_byte]
full_name = f"{class_name}.{func_name}" if class_name else func_name
all_functions[full_name] = node
elif node.type == "class_definition":
name_node = node.child_by_field_name("name")
if name_node:
cname = code_bytes[name_node.start_byte:name_node.end_byte].decode("utf8", errors="replace")
cname = code[name_node.start_byte:name_node.end_byte]
full_cname = f"{class_name}.{cname}" if class_name else cname
body = node.child_by_field_name("body")
if body:
@@ -285,11 +236,11 @@ class ASTParser:
func_node = n.child_by_field_name("function")
if func_node:
if func_node.type == "identifier":
calls.add(code_bytes[func_node.start_byte:func_node.end_byte].decode("utf8", errors="replace"))
calls.add(code[func_node.start_byte:func_node.end_byte])
elif func_node.type == "attribute":
attr_node = func_node.child_by_field_name("attribute")
if attr_node:
calls.add(code_bytes[attr_node.start_byte:attr_node.end_byte].decode("utf8", errors="replace"))
calls.add(code[attr_node.start_byte:attr_node.end_byte])
for child in n.children:
walk_calls(child)
walk_calls(node)
@@ -332,12 +283,12 @@ class ASTParser:
def check_for_targeted(node, parent_class=None):
if node.type == "function_definition":
name_node = node.child_by_field_name("name")
fname = code_bytes[name_node.start_byte:name_node.end_byte].decode("utf8", errors="replace") if name_node else ""
fname = code[name_node.start_byte:name_node.end_byte] if name_node else ""
fullname = f"{parent_class}.{fname}" if parent_class else fname
return fullname in all_found
if node.type == "class_definition":
name_node = node.child_by_field_name("name")
cname = code_bytes[name_node.start_byte:name_node.end_byte].decode("utf8", errors="replace") if name_node else ""
cname = code[name_node.start_byte:name_node.end_byte] if name_node else ""
full_cname = f"{parent_class}.{cname}" if parent_class else cname
body = node.child_by_field_name("body")
if body:
@@ -353,11 +304,11 @@ class ASTParser:
def walk_edits(node, parent_class=None):
if node.type == "function_definition":
name_node = node.child_by_field_name("name")
fname = code_bytes[name_node.start_byte:name_node.end_byte].decode("utf8", errors="replace") if name_node else ""
fname = code[name_node.start_byte:name_node.end_byte] if name_node else ""
fullname = f"{parent_class}.{fname}" if parent_class else fname
if fullname in all_found:
body = node.child_by_field_name("body")
if body and body.type in ("block", "compound_statement"):
if body and body.type == "block":
indent = " " * body.start_point.column
first_stmt = None
for child in body.children:
@@ -379,7 +330,7 @@ class ASTParser:
if node.type == "class_definition":
if check_for_targeted(node, parent_class):
name_node = node.child_by_field_name("name")
cname = code_bytes[name_node.start_byte:name_node.end_byte].decode("utf8", errors="replace") if name_node else ""
cname = code[name_node.start_byte:name_node.end_byte] if name_node else ""
full_cname = f"{parent_class}.{cname}" if parent_class else cname
body = node.child_by_field_name("body")
if body:
@@ -403,265 +354,13 @@ class ASTParser:
walk_edits(tree.root_node)
edits.sort(key=lambda x: x[0], reverse=True)
code_bytearray = bytearray(code_bytes)
code_bytes = bytearray(code, "utf8")
for start, end, replacement in edits:
code_bytearray[start:end] = bytes(replacement, "utf8")
result = code_bytearray.decode("utf8")
code_bytes[start:end] = bytes(replacement, "utf8")
result = code_bytes.decode("utf8")
result = re.sub(r'\n\s*\n\s*\n+', '\n\n', result)
return result.strip() + "\n"
def get_definition(self, code: str, name: str, path: Optional[str] = None) -> str:
"""
Returns the full source code for a specific definition by name.
Supports 'ClassName::method' or 'method' for C++.
"""
code_bytes = code.encode("utf8")
tree = self.get_cached_tree(path, code)
parts = re.split(r'::|\.', name)
def walk(node: tree_sitter.Node, target_parts: List[str]) -> Optional[tree_sitter.Node]:
if not target_parts:
return None
target = target_parts[0]
for child in node.children:
# If it's a field_declaration, it might wrap a class/struct/enum definition
check_node = child
if child.type == "field_declaration":
for sub in child.children:
if sub.type in ("class_specifier", "struct_specifier", "enum_specifier"):
check_node = sub
break
is_interesting = check_node.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier", "enum_specifier", "enum_definition", "namespace_definition", "template_declaration", "field_declaration")
if is_interesting:
node_name = self._get_name(check_node, code_bytes)
if node_name == target:
if len(target_parts) == 1:
return check_node if child.type != "field_declaration" else child
next_parts = target_parts[1:]
else:
next_parts = target_parts
body = check_node.child_by_field_name("body")
if not body and check_node.type == "template_declaration":
for sub in check_node.children:
if sub.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier", "enum_specifier", "enum_definition"):
body = sub.child_by_field_name("body")
break
if body:
found = walk(body, next_parts)
if found: return found
for sub in check_node.children:
if sub.type in ("field_declaration_list", "class_body", "declaration_list", "enum_body"):
found = walk(sub, next_parts)
if found: return found
elif child.type in ("module", "translation_unit", "namespace_definition", "declaration_list", "field_declaration_list", "class_body"):
found = walk(child, target_parts)
if found: return found
return None
def deep_search(node: tree_sitter.Node, target: str) -> Optional[tree_sitter.Node]:
if node.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier", "enum_specifier", "enum_definition", "namespace_definition", "template_declaration"):
if self._get_name(node, code_bytes) == target:
return node
for child in node.children:
res = deep_search(child, target)
if res: return res
return None
found_node = walk(tree.root_node, parts)
if not found_node:
found_node = deep_search(tree.root_node, name)
if found_node:
return code_bytes[found_node.start_byte:found_node.end_byte].decode("utf8", errors="replace")
return f"ERROR: definition '{name}' not found"
def get_signature(self, code: str, name: str, path: Optional[str] = None) -> str:
"""
Returns only the signature part of a function or method.
For C/C++, this is the code from the start of the definition until the block start '{'.
"""
code_bytes = code.encode("utf8")
tree = self.get_cached_tree(path, code)
parts = re.split(r'::|\.', name)
def walk(node: tree_sitter.Node, target_parts: List[str]) -> Optional[tree_sitter.Node]:
if not target_parts:
return None
target = target_parts[0]
for child in node.children:
# If it's a field_declaration, it might wrap a class/struct/enum definition
check_node = child
if child.type == "field_declaration":
for sub in child.children:
if sub.type in ("class_specifier", "struct_specifier", "enum_specifier"):
check_node = sub
break
is_interesting = check_node.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier", "enum_specifier", "enum_definition", "namespace_definition", "template_declaration", "field_declaration")
if is_interesting:
node_name = self._get_name(check_node, code_bytes)
if node_name == target:
if len(target_parts) == 1:
return check_node if child.type != "field_declaration" else child
next_parts = target_parts[1:]
else:
next_parts = target_parts
body = check_node.child_by_field_name("body")
if not body and check_node.type == "template_declaration":
for sub in check_node.children:
if sub.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier", "enum_specifier", "enum_definition"):
body = sub.child_by_field_name("body")
break
if body:
found = walk(body, next_parts)
if found: return found
for sub in check_node.children:
if sub.type in ("field_declaration_list", "class_body", "declaration_list", "enum_body"):
found = walk(sub, next_parts)
if found: return found
elif child.type in ("module", "translation_unit", "namespace_definition", "declaration_list", "field_declaration_list", "class_body"):
found = walk(child, target_parts)
if found: return found
return None
def deep_search(node: tree_sitter.Node, target: str) -> Optional[tree_sitter.Node]:
if node.type in ("function_definition", "template_declaration"):
if self._get_name(node, code_bytes) == target:
return node
for child in node.children:
res = deep_search(child, target)
if res: return res
return None
found_node = walk(tree.root_node, parts)
if not found_node:
found_node = deep_search(tree.root_node, name)
if found_node:
target_node = found_node
if found_node.type == "template_declaration":
for child in found_node.children:
if child.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier"):
target_node = child
break
body = target_node.child_by_field_name("body")
if body:
return code_bytes[found_node.start_byte:body.start_byte].decode("utf8", errors="replace").strip()
return code_bytes[found_node.start_byte:found_node.end_byte].decode("utf8", errors="replace").strip()
return f"ERROR: signature for '{name}' not found"
def get_code_outline(self, code: str, path: Optional[str] = None) -> str:
"""
Returns a hierarchical outline of the code (classes, structs, functions, methods).
"""
code_bytes = code.encode("utf8")
tree = self.get_cached_tree(path, code)
output = []
def walk(node: tree_sitter.Node, indent: int = 0) -> None:
ntype = node.type
label = ""
if ntype in ("class_definition", "class_specifier"):
label = "[Class]"
elif ntype == "struct_specifier":
label = "[Struct]"
elif ntype == "function_definition":
label = "[Method]" if indent > 0 else "[Func]"
if label:
name = self._get_name(node, code_bytes)
if name:
start = node.start_point.row + 1
end = node.end_point.row + 1
output.append(f"{' ' * indent}{label} {name} (Lines {start}-{end})")
body = node.child_by_field_name("body")
if body:
for child in body.children:
walk(child, indent + 1)
return
for child in node.children:
walk(child, indent)
walk(tree.root_node)
return "\n".join(output)
def update_definition(self, code: str, name: str, new_content: str, path: Optional[str] = None) -> str:
"""
Surgically replace the definition of a class or function by name.
"""
code_bytes = code.encode("utf8")
tree = self.get_cached_tree(path, code)
parts = re.split(r'::|\.', name)
def walk(node: tree_sitter.Node, target_parts: List[str]) -> Optional[tree_sitter.Node]:
if not target_parts:
return None
target = target_parts[0]
for child in node.children:
# If it's a field_declaration, it might wrap a class/struct/enum definition
check_node = child
if child.type == "field_declaration":
for sub in child.children:
if sub.type in ("class_specifier", "struct_specifier", "enum_specifier"):
check_node = sub
break
is_interesting = check_node.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier", "enum_specifier", "enum_definition", "namespace_definition", "template_declaration", "field_declaration")
if is_interesting:
node_name = self._get_name(check_node, code_bytes)
if node_name == target:
if len(target_parts) == 1:
return check_node if child.type != "field_declaration" else child
next_parts = target_parts[1:]
else:
next_parts = target_parts
body = check_node.child_by_field_name("body")
if not body and check_node.type == "template_declaration":
for sub in check_node.children:
if sub.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier", "enum_specifier", "enum_definition"):
body = sub.child_by_field_name("body")
break
if body:
found = walk(body, next_parts)
if found: return found
for sub in check_node.children:
if sub.type in ("field_declaration_list", "class_body", "declaration_list", "enum_body"):
found = walk(sub, next_parts)
if found: return found
elif child.type in ("module", "translation_unit", "namespace_definition", "declaration_list", "field_declaration_list", "class_body"):
found = walk(child, target_parts)
if found: return found
return None
def deep_search(node: tree_sitter.Node, target: str) -> Optional[tree_sitter.Node]:
if node.type in ("function_definition", "class_definition", "class_specifier", "struct_specifier", "enum_specifier", "enum_definition", "namespace_definition", "template_declaration"):
if self._get_name(node, code_bytes) == target:
return node
for child in node.children:
res = deep_search(child, target)
if res: return res
return None
found_node = walk(tree.root_node, parts)
if not found_node:
found_node = deep_search(tree.root_node, name)
if found_node:
code_bytearray = bytearray(code_bytes)
code_bytearray[found_node.start_byte:found_node.end_byte] = bytes(new_content, "utf8")
return code_bytearray.decode("utf8")
return f"ERROR: definition '{name}' not found"
def reset_client() -> None:
pass
-1
View File
@@ -47,7 +47,6 @@ class GeminiCliAdapter:
Adapter for the Gemini CLI that parses streaming JSON output.
"""
def __init__(self, binary_path: str = "gemini"):
"""Initializes the adapter with the path to the gemini CLI executable."""
self.binary_path = binary_path
self.session_id: Optional[str] = None
self.last_usage: Optional[dict[str, Any]] = None
+327 -1236
View File
File diff suppressed because it is too large Load Diff
-131
View File
@@ -1,131 +0,0 @@
import typing
import time
from dataclasses import dataclass, field
@dataclass
class UISnapshot:
"""Capture of restorable UI state."""
ai_input: str
project_system_prompt: str
global_system_prompt: str
base_system_prompt: str
use_default_base_prompt: bool
temperature: float
top_p: float
max_tokens: int
auto_add_history: bool
disc_entries: list[dict]
files: list[dict]
screenshots: list[str]
def to_dict(self) -> dict:
return {
"ai_input": self.ai_input,
"project_system_prompt": self.project_system_prompt,
"global_system_prompt": self.global_system_prompt,
"base_system_prompt": self.base_system_prompt,
"use_default_base_prompt": self.use_default_base_prompt,
"temperature": self.temperature,
"top_p": self.top_p,
"max_tokens": self.max_tokens,
"auto_add_history": self.auto_add_history,
"disc_entries": self.disc_entries,
"files": self.files,
"screenshots": self.screenshots
}
@classmethod
def from_dict(cls, data: dict) -> "UISnapshot":
return cls(
ai_input=data.get("ai_input", ""),
project_system_prompt=data.get("project_system_prompt", ""),
global_system_prompt=data.get("global_system_prompt", ""),
base_system_prompt=data.get("base_system_prompt", ""),
use_default_base_prompt=data.get("use_default_base_prompt", True),
temperature=data.get("temperature", 0.0),
top_p=data.get("top_p", 1.0),
max_tokens=data.get("max_tokens", 4096),
auto_add_history=data.get("auto_add_history", False),
disc_entries=data.get("disc_entries", []),
files=data.get("files", []),
screenshots=data.get("screenshots", [])
)
@dataclass
class HistoryEntry:
state: typing.Any
description: str
timestamp: float = field(default_factory=lambda: time.time())
class HistoryManager:
def __init__(self, max_capacity: int = 100):
self.max_capacity = max_capacity
self._undo_stack: typing.List[HistoryEntry] = []
self._redo_stack: typing.List[HistoryEntry] = []
def push(self, state: typing.Any, description: str) -> None:
"""
Pushes a new state to the undo stack and clears the redo stack.
If the undo stack exceeds max_capacity, the oldest state is removed.
"""
entry = HistoryEntry(state=state, description=description)
self._undo_stack.append(entry)
self._redo_stack.clear()
if len(self._undo_stack) > self.max_capacity:
self._undo_stack.pop(0)
def undo(self, current_state: typing.Any, current_description: str = "Current State") -> typing.Optional[HistoryEntry]:
"""
Undoes the last action by moving the current_state to the redo stack
and returning the top of the undo stack.
"""
if not self._undo_stack:
return None
redo_entry = HistoryEntry(state=current_state, description=current_description)
self._redo_stack.append(redo_entry)
return self._undo_stack.pop()
def redo(self, current_state: typing.Any, current_description: str = "Current State") -> typing.Optional[HistoryEntry]:
"""
Redoes the last undone action by moving the current_state to the undo stack
and returning the top of the redo stack.
"""
if not self._redo_stack:
return None
undo_entry = HistoryEntry(state=current_state, description=current_description)
self._undo_stack.append(undo_entry)
return self._redo_stack.pop()
@property
def can_undo(self) -> bool:
return len(self._undo_stack) > 0
@property
def can_redo(self) -> bool:
return len(self._redo_stack) > 0
def get_history(self) -> typing.List[typing.Dict[str, typing.Any]]:
"""Returns a list of descriptions and timestamps for the undo stack."""
return [
{"description": e.description, "timestamp": e.timestamp}
for e in self._undo_stack
]
def jump_to_undo(self, index: int, current_state: typing.Any, current_description: str = "Before Jump") -> typing.Optional[HistoryEntry]:
"""
Jumps to a specific state in the undo stack by moving subsequent states
and the current_state to the redo stack.
"""
if index < 0 or index >= len(self._undo_stack):
return None
# Move current state to redo
self._redo_stack.append(HistoryEntry(state=current_state, description=current_description))
# Move states between index and top of undo to redo
while len(self._undo_stack) > index + 1:
self._redo_stack.append(self._undo_stack.pop())
return self._undo_stack.pop()
+2 -442
View File
@@ -62,7 +62,6 @@ import ast
import subprocess
from src import summarize
from src import outline_tool
from src import beads_client
import urllib.request
import urllib.parse
from html.parser import HTMLParser
@@ -78,8 +77,6 @@ MUTATING_TOOLS: frozenset[str] = frozenset({
"py_set_signature",
"py_set_var_declaration",
"edit_file",
"ts_c_update_definition",
"ts_cpp_update_definition",
})
# ------------------------------------------------------------------ state
@@ -299,34 +296,6 @@ def py_get_skeleton(path: str) -> str:
except Exception as e:
return f"ERROR generating skeleton for '{path}': {e}"
def ts_c_get_skeleton(path: str) -> str:
"""Returns a skeleton of a C file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("c")
return parser.get_skeleton(code, path=str(p))
except Exception as e:
return f"ERROR generating skeleton for '{path}': {e}"
def ts_cpp_get_skeleton(path: str) -> str:
"""Returns a skeleton of a C++ file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("cpp")
return parser.get_skeleton(code, path=str(p))
except Exception as e:
return f"ERROR generating skeleton for '{path}': {e}"
def py_get_code_outline(path: str) -> str:
"""
Returns a hierarchical outline of a code file (classes, functions, methods with line ranges).
@@ -345,126 +314,6 @@ def py_get_code_outline(path: str) -> str:
except Exception as e:
return f"ERROR generating outline for '{path}': {e}"
def ts_c_get_code_outline(path: str) -> str:
"""Returns a hierarchical outline of a C file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("c")
return parser.get_code_outline(code, path=str(p))
except Exception as e:
return f"ERROR generating outline for '{path}': {e}"
def ts_cpp_get_code_outline(path: str) -> str:
"""Returns a hierarchical outline of a C++ file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("cpp")
return parser.get_code_outline(code, path=str(p))
except Exception as e:
return f"ERROR generating outline for '{path}': {e}"
def ts_c_get_definition(path: str, name: str) -> str:
"""Returns the source code for a specific definition in a C file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("c")
return parser.get_definition(code, name, path=str(p))
except Exception as e:
return f"ERROR retrieving definition '{name}' from '{path}': {e}"
def ts_cpp_get_definition(path: str, name: str) -> str:
"""Returns the source code for a specific definition in a C++ file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("cpp")
return parser.get_definition(code, name, path=str(p))
except Exception as e:
return f"ERROR retrieving definition '{name}' from '{path}': {e}"
def ts_c_get_signature(path: str, name: str) -> str:
"""Returns the signature part of a function in a C file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("c")
return parser.get_signature(code, name, path=str(p))
except Exception as e:
return f"ERROR retrieving signature '{name}' from '{path}': {e}"
def ts_cpp_get_signature(path: str, name: str) -> str:
"""Returns the signature part of a function or method in a C++ file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("cpp")
return parser.get_signature(code, name, path=str(p))
except Exception as e:
return f"ERROR retrieving signature '{name}' from '{path}': {e}"
def ts_c_update_definition(path: str, name: str, new_content: str) -> str:
"""Surgically replace the definition of a function in a C file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("c")
updated_code = parser.update_definition(code, name, new_content, path=str(p))
if updated_code.startswith("ERROR:"):
return updated_code
p.write_text(updated_code, encoding="utf-8")
return f"Successfully updated definition '{name}' in {path}"
except Exception as e:
return f"ERROR updating definition '{name}' in '{path}': {e}"
def ts_cpp_update_definition(path: str, name: str, new_content: str) -> str:
"""Surgically replace the definition of a class or function in a C++ file."""
p, err = _resolve_and_check(path)
if err: return err
assert p is not None
if not p.exists(): return f"ERROR: file not found: {path}"
try:
from src.file_cache import ASTParser
code = p.read_text(encoding="utf-8")
parser = ASTParser("cpp")
updated_code = parser.update_definition(code, name, new_content, path=str(p))
if updated_code.startswith("ERROR:"):
return updated_code
p.write_text(updated_code, encoding="utf-8")
return f"Successfully updated definition '{name}' in {path}"
except Exception as e:
return f"ERROR updating definition '{name}' in '{path}': {e}"
def get_file_slice(path: str, start_line: int, end_line: int) -> str:
"""Return a specific line range from a file."""
p, err = _resolve_and_check(path)
@@ -1150,13 +999,10 @@ class StdioMCPServer:
return str(result)
class ExternalMCPManager:
"""Manages external MCP servers using the StdioMCPServer class."""
def __init__(self):
"""Initialize the manager with an empty server registry."""
self.servers = {}
async def add_server(self, config: models.MCPServerConfig):
"""Add and start a new MCP server from a configuration object."""
if config.url:
# RemoteMCPServer placeholder
return
@@ -1165,13 +1011,11 @@ class ExternalMCPManager:
self.servers[config.name] = server
async def stop_all(self):
"""Stop all managed MCP servers and clear the registry."""
for server in self.servers.values():
await server.stop()
self.servers = {}
def get_all_tools(self) -> dict:
"""Retrieve a dictionary of all tools available across all managed servers."""
all_tools = {}
for sname, server in self.servers.items():
for tname, tool in server.tools.items():
@@ -1179,11 +1023,9 @@ class ExternalMCPManager:
return all_tools
def get_servers_status(self) -> dict[str, str]:
"""Get the current operational status of all managed servers."""
return {name: server.status for name, server in self.servers.items()}
async def async_dispatch(self, tool_name: str, tool_input: dict) -> str:
"""Dispatch a tool call to the appropriate external MCP server asynchronously."""
for server in self.servers.values():
if tool_name in server.tools:
return await server.call_tool(tool_name, tool_input)
@@ -1192,10 +1034,11 @@ class ExternalMCPManager:
_external_mcp_manager = ExternalMCPManager()
def get_external_mcp_manager() -> ExternalMCPManager:
"""Retrieve the global ExternalMCPManager instance."""
global _external_mcp_manager
return _external_mcp_manager
TOOL_NAMES: set[str] = {"read_file", "list_directory", "search_files", "get_file_summary", "py_get_skeleton", "py_get_code_outline", "py_get_definition", "get_git_diff", "web_search", "fetch_url", "get_ui_performance", "get_file_slice", "set_file_slice", "edit_file", "py_update_definition", "py_get_signature", "py_set_signature", "py_get_class_summary", "py_get_var_declaration", "py_set_var_declaration", "py_find_usages", "py_get_imports", "py_check_syntax", "py_get_hierarchy", "py_get_docstring", "get_tree"}
def dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
"""
Dispatch an MCP tool call by name. Returns the result as a string.
@@ -1212,28 +1055,8 @@ def dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
return get_file_summary(path)
if tool_name == "py_get_skeleton":
return py_get_skeleton(path)
if tool_name == "ts_c_get_skeleton":
return ts_c_get_skeleton(path)
if tool_name == "ts_cpp_get_skeleton":
return ts_cpp_get_skeleton(path)
if tool_name == "py_get_code_outline":
return py_get_code_outline(path)
if tool_name == "ts_c_get_code_outline":
return ts_c_get_code_outline(path)
if tool_name == "ts_cpp_get_code_outline":
return ts_cpp_get_code_outline(path)
if tool_name == "ts_c_get_definition":
return ts_c_get_definition(path, str(tool_input.get("name", "")))
if tool_name == "ts_cpp_get_definition":
return ts_cpp_get_definition(path, str(tool_input.get("name", "")))
if tool_name == "ts_c_get_signature":
return ts_c_get_signature(path, str(tool_input.get("name", "")))
if tool_name == "ts_cpp_get_signature":
return ts_cpp_get_signature(path, str(tool_input.get("name", "")))
if tool_name == "ts_c_update_definition":
return ts_c_update_definition(path, str(tool_input.get("name", "")), str(tool_input.get("new_content", "")))
if tool_name == "ts_cpp_update_definition":
return ts_cpp_update_definition(path, str(tool_input.get("name", "")), str(tool_input.get("new_content", "")))
if tool_name == "py_get_definition":
return py_get_definition(path, str(tool_input.get("name", "")))
if tool_name == "py_update_definition":
@@ -1283,31 +1106,6 @@ def dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
return py_get_docstring(path, str(tool_input.get("name", "")))
if tool_name == "get_tree":
return get_tree(path, int(tool_input.get("max_depth", 2)))
# Beads tools
if tool_name.startswith("bd_"):
if not _primary_base_dir:
return "ERROR: no active workspace to run beads tools."
bclient = beads_client.BeadsClient(_primary_base_dir)
if tool_name == "bd_list":
beads = bclient.list_beads()
if not beads:
return "No beads found."
return "\n".join([f"ID: {b.id}, Status: {b.status}, Title: {b.title}" for b in beads])
elif tool_name == "bd_create":
title = str(tool_input.get("title", ""))
desc = str(tool_input.get("description", ""))
bid = bclient.create_bead(title, desc)
return f"Created bead: {bid}"
elif tool_name == "bd_update":
bid = str(tool_input.get("bead_id", ""))
status = str(tool_input.get("status", ""))
if bclient.update_bead(bid, status):
return f"Updated {bid} to status {status}"
return f"ERROR: bead {bid} not found."
elif tool_name == "bd_ready":
return "READY" if bclient.is_initialized() else "NOT_INITIALIZED"
return f"ERROR: unknown MCP tool '{tool_name}'"
async def async_dispatch(tool_name: str, tool_input: dict[str, Any]) -> str:
@@ -1452,202 +1250,6 @@ MCP_TOOL_SPECS: list[dict[str, Any]] = [
"required": ["path"],
},
},
{
"name": "ts_c_get_skeleton",
"description": (
"Get a skeleton view of a C file. "
"This returns all function signatures and structs, "
"but replaces function bodies with '...'. "
"Use this to understand C interfaces without reading the full implementation."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C file.",
}
},
"required": ["path"],
},
},
{
"name": "ts_cpp_get_skeleton",
"description": (
"Get a skeleton view of a C++ file. "
"This returns all classes, structs and function signatures, "
"but replaces function bodies with '...'. "
"Use this to understand C++ interfaces without reading the full implementation."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C++ file.",
}
},
"required": ["path"],
},
},
{
"name": "ts_c_get_code_outline",
"description": (
"Get a hierarchical outline of a C file. "
"This returns structs and functions with their line ranges. "
"Use this to quickly map out a file's structure before reading specific sections."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C file.",
}
},
"required": ["path"],
},
},
{
"name": "ts_cpp_get_code_outline",
"description": (
"Get a hierarchical outline of a C++ file. "
"This returns classes, structs and functions with their line ranges. "
"Use this to quickly map out a file's structure before reading specific sections."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C++ file.",
}
},
"required": ["path"],
},
},
{
"name": "ts_c_get_definition",
"description": (
"Get the full source code of a specific function or struct definition in a C file. "
"This is more efficient than reading the whole file if you know what you're looking for."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C file.",
},
"name": {
"type": "string",
"description": "The name of the function or struct to retrieve.",
}
},
"required": ["path", "name"],
},
},
{
"name": "ts_cpp_get_definition",
"description": (
"Get the full source code of a specific class, function, or method definition in a C++ file. "
"This is more efficient than reading the whole file if you know what you're looking for."
),
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C++ file.",
},
"name": {
"type": "string",
"description": "The name of the class or function to retrieve. Use 'ClassName::method_name' for methods.",
}
},
"required": ["path", "name"],
},
},
{
"name": "ts_c_get_signature",
"description": "Get only the signature part of a C function.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C file."
},
"name": {
"type": "string",
"description": "Name of the function."
}
},
"required": ["path", "name"]
}
},
{
"name": "ts_cpp_get_signature",
"description": "Get only the signature part of a C++ function or method.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C++ file."
},
"name": {
"type": "string",
"description": "Name of the function/method (e.g. 'ClassName::method_name')."
}
},
"required": ["path", "name"]
}
},
{
"name": "ts_c_update_definition",
"description": "Surgically replace the definition of a function in a C file using AST to find line ranges.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C file."
},
"name": {
"type": "string",
"description": "Name of function."
},
"new_content": {
"type": "string",
"description": "Complete new source for the definition."
}
},
"required": ["path", "name", "new_content"]
}
},
{
"name": "ts_cpp_update_definition",
"description": "Surgically replace the definition of a class or function in a C++ file using AST to find line ranges.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the C++ file."
},
"name": {
"type": "string",
"description": "Name of class/function/method."
},
"new_content": {
"type": "string",
"description": "Complete new source for the definition."
}
},
"required": ["path", "name", "new_content"]
}
},
{
"name": "get_file_slice",
"description": "Read a specific line range from a file. Useful for reading parts of very large files.",
@@ -1993,51 +1595,9 @@ MCP_TOOL_SPECS: list[dict[str, Any]] = [
},
"required": ["path"]
}
},
{
"name": "bd_create",
"description": "Create a new Bead in the active Beads repository.",
"parameters": {
"type": "object",
"properties": {
"title": { "type": "string", "description": "Title of the Bead." },
"description": { "type": "string", "description": "Description of the Bead." }
},
"required": ["title", "description"]
}
},
{
"name": "bd_update",
"description": "Update an existing Bead.",
"parameters": {
"type": "object",
"properties": {
"bead_id": { "type": "string", "description": "ID of the Bead to update." },
"status": { "type": "string", "description": "New status for the Bead." }
},
"required": ["bead_id", "status"]
}
},
{
"name": "bd_list",
"description": "List all Beads in the active Beads repository.",
"parameters": {
"type": "object",
"properties": {}
}
},
{
"name": "bd_ready",
"description": "Check if the Beads repository is initialized in the current workspace.",
"parameters": {
"type": "object",
"properties": {}
}
}
]
TOOL_NAMES: set[str] = {t['name'] for t in MCP_TOOL_SPECS}
-29
View File
@@ -179,32 +179,3 @@ RULES:
Analyze this error and generate the patch:
"""
TIER4_SUMMARIZE_CODE_PROMPT: str = """You are a Tier 4 QA Agent specializing in code summarization.
Your goal is to provide a concise, high-signal summary of the provided code file.
Focus on the primary responsibility of the module and its key architectural components.
INPUT:
- File Path: {file_path}
- Heuristic Outline: {outline}
- Raw Content:
{content}
OUTPUT REQUIREMENT:
Provide a 1-2 sentence high-level summary followed by a brief bulleted list of key features or responsibilities.
Keep it extremely concise. Do NOT repeat the outline.
"""
TIER4_SUMMARIZE_TEXT_PROMPT: str = """You are a Tier 4 QA Agent specializing in document summarization.
Your goal is to provide a concise, high-signal summary of the provided text/markdown file.
INPUT:
- File Path: {file_path}
- Heuristic Outline: {outline}
- Raw Content:
{content}
OUTPUT REQUIREMENT:
Provide a 1-2 sentence high-level summary of the document's purpose and key takeaways.
Keep it extremely concise.
"""
+7 -131
View File
@@ -86,11 +86,7 @@ AGENT_TOOL_NAMES = [
"py_find_usages",
"py_get_imports",
"py_check_syntax",
"py_get_hierarchy",
"ts_c_get_skeleton",
"ts_cpp_get_skeleton",
"ts_c_get_code_outline",
"ts_cpp_get_code_outline"
"py_get_hierarchy"
]
DEFAULT_TOOL_CATEGORIES: Dict[str, List[str]] = {
@@ -110,24 +106,11 @@ DEFAULT_TOOL_CATEGORIES: Dict[str, List[str]] = {
"Surgical": ["get_file_slice", "set_file_slice", "edit_file"],
"Web": ["web_search", "fetch_url"],
"Analysis": ["py_find_usages", "py_get_imports", "py_check_syntax", "py_get_hierarchy"],
"C/C++": [
"ts_c_get_skeleton",
"ts_cpp_get_skeleton",
"ts_c_get_code_outline",
"ts_cpp_get_code_outline",
"ts_c_get_definition",
"ts_cpp_get_definition",
"ts_c_get_signature",
"ts_cpp_get_signature",
"ts_c_update_definition",
"ts_cpp_update_definition"
],
"Runtime": ["run_powershell", "get_ui_performance"]
}
def parse_history_entries(history_strings: list[str], roles: list[str]) -> list[dict[str, Any]]:
import re
from src import thinking_parser
entries = []
for raw in history_strings:
ts = ""
@@ -145,30 +128,11 @@ def parse_history_entries(history_strings: list[str], roles: list[str]) -> list[
content = rest[match.end():].strip()
else:
content = rest
entry_obj = {"role": role, "content": content, "collapsed": True, "ts": ts}
if role == "AI" and ("<thinking>" in content or "<thought>" in content or "Thinking:" in content):
segments, parsed_content = thinking_parser.parse_thinking_trace(content)
if segments:
entry_obj["content"] = parsed_content
entry_obj["thinking_segments"] = [{"content": s.content, "marker": s.marker} for s in segments]
entries.append(entry_obj)
entries.append({"role": role, "content": content, "collapsed": True, "ts": ts})
return entries
@dataclass
class ThinkingSegment:
content: str
marker: str # 'thinking', 'thought', or 'Thinking:'
def to_dict(self) -> Dict[str, Any]:
return {"content": self.content, "marker": self.marker}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ThinkingSegment":
return cls(content=data["content"], marker=data["marker"])
@dataclass
@dataclass
class Ticket:
id: str
@@ -275,6 +239,8 @@ class Track:
)
@dataclass
@dataclass
@dataclass
class WorkerContext:
ticket_id: str
@@ -373,14 +339,12 @@ class FileItem:
path: str
auto_aggregate: bool = True
force_full: bool = False
injected_at: Optional[float] = None
def to_dict(self) -> Dict[str, Any]:
return {
"path": self.path,
"auto_aggregate": self.auto_aggregate,
"force_full": self.force_full,
"injected_at": self.injected_at,
}
@classmethod
@@ -389,7 +353,6 @@ class FileItem:
path=data["path"],
auto_aggregate=data.get("auto_aggregate", True),
force_full=data.get("force_full", False),
injected_at=data.get("injected_at"),
)
@dataclass
@@ -485,8 +448,6 @@ class Persona:
system_prompt: str = ''
tool_preset: Optional[str] = None
bias_profile: Optional[str] = None
context_preset: Optional[str] = None
aggregation_strategy: Optional[str] = None
@property
def provider(self) -> Optional[str]:
@@ -529,10 +490,6 @@ class Persona:
res["tool_preset"] = self.tool_preset
if self.bias_profile is not None:
res["bias_profile"] = self.bias_profile
if self.context_preset is not None:
res["context_preset"] = self.context_preset
if self.aggregation_strategy is not None:
res["aggregation_strategy"] = self.aggregation_strategy
return res
@classmethod
@@ -550,7 +507,7 @@ class Persona:
for k in ["provider", "model", "temperature", "top_p", "max_output_tokens"]:
if data.get(k) is not None:
legacy[k] = data[k]
if legacy:
if not parsed_models:
parsed_models.append(legacy)
@@ -566,9 +523,8 @@ class Persona:
system_prompt=data.get("system_prompt", ""),
tool_preset=data.get("tool_preset"),
bias_profile=data.get("bias_profile"),
context_preset=data.get("context_preset"),
aggregation_strategy=data.get("aggregation_strategy"),
)
@dataclass
class MCPServerConfig:
name: str
@@ -612,86 +568,6 @@ class MCPConfiguration:
}
return cls(mcpServers=parsed_servers)
@dataclass
class VectorStoreConfig:
provider: str # 'chroma', 'qdrant', 'mock', 'mcp'
url: Optional[str] = None
api_key: Optional[str] = None
collection_name: str = 'manual_slop'
mcp_server: Optional[str] = None
mcp_tool: Optional[str] = None
def to_dict(self) -> Dict[str, Any]:
return {
"provider": self.provider,
"url": self.url,
"api_key": self.api_key,
"collection_name": self.collection_name,
"mcp_server": self.mcp_server,
"mcp_tool": self.mcp_tool,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "VectorStoreConfig":
return cls(
provider=data["provider"],
url=data.get("url"),
api_key=data.get("api_key"),
collection_name=data.get("collection_name", "manual_slop"),
mcp_server=data.get("mcp_server"),
mcp_tool=data.get("mcp_tool"),
)
@dataclass
class RAGConfig:
enabled: bool = False
vector_store: VectorStoreConfig = field(default_factory=lambda: VectorStoreConfig(provider='mock'))
embedding_provider: str = 'gemini'
chunk_size: int = 1000
chunk_overlap: int = 200
def to_dict(self) -> Dict[str, Any]:
return {
"enabled": self.enabled,
"vector_store": self.vector_store.to_dict(),
"embedding_provider": self.embedding_provider,
"chunk_size": self.chunk_size,
"chunk_overlap": self.chunk_overlap,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "RAGConfig":
return cls(
enabled=data.get("enabled", False),
vector_store=VectorStoreConfig.from_dict(data.get("vector_store", {"provider": "mock"})),
embedding_provider=data.get("embedding_provider", "gemini"),
chunk_size=data.get("chunk_size", 1000),
chunk_overlap=data.get("chunk_overlap", 200),
)
@dataclass
class WorkspaceProfile:
name: str
ini_content: str
show_windows: Dict[str, bool]
panel_states: Dict[str, Any]
def to_dict(self) -> Dict[str, Any]:
return {
"ini_content": self.ini_content,
"show_windows": self.show_windows,
"panel_states": self.panel_states,
}
@classmethod
def from_dict(cls, name: str, data: Dict[str, Any]) -> "WorkspaceProfile":
return cls(
name=name,
ini_content=data.get("ini_content", ""),
show_windows=data.get("show_windows", {}),
panel_states=data.get("panel_states", {}),
)
def load_mcp_config(path: str) -> MCPConfiguration:
if not os.path.exists(path):
return MCPConfiguration()
+40 -37
View File
@@ -27,8 +27,35 @@ See Also:
- src/dag_engine.py for TrackDAG and ExecutionEngine
- src/models.py for Ticket, Track, WorkerContext
"""
"""
Multi-Agent Conductor - MMA 4-Tier orchestration engine.
This module provides the ConductorEngine and WorkerPool for orchestrating
the execution of implementation tickets within a Track using the DAG engine
and the bounded concurrent worker pool with abort event propagation.
Key Components:
- ConductorEngine: Tier 2 orchestrator that owns the execution loop
- WorkerPool: Bounded concurrent worker pool with semaphore gating
- run_worker_lifecycle: Stateless Tier 3 worker execution with context amnesia
Thread Safety:
- All state mutations use locks (_workers_lock, _tier_usage_lock)
- Worker threads are daemon threads that clean up on exit
- Abort events enable per-ticket cancellation
Integration:
- Uses AsyncEventQueue for state updates to the GUI
- Uses ai_client.send() for LLM communication
- Uses mcp_client for tool dispatch
See Also:
- docs/guide_mma.md for MMA orchestration documentation
- src/dag_engine.py for TrackDAG and ExecutionEngine
- src/ai_client.py for multi-provider LLM abstraction
- src/models.py for Ticket, Track, WorkerContext data structures
"""
from src import ai_client
from src import summarize
import json
import threading
import time
@@ -40,8 +67,6 @@ from src import models
from src.models import Ticket, Track, WorkerContext
from src.file_cache import ASTParser
from pathlib import Path
from src.personas import PersonaManager
from src import paths
from src.dag_engine import TrackDAG, ExecutionEngine
@@ -124,10 +149,8 @@ class ConductorEngine:
self._abort_events: dict[str, threading.Event] = {}
self._pause_event: threading.Event = threading.Event()
self._tier_usage_lock = threading.Lock()
self._dirty: bool = True
def update_usage(self, tier: str, input_tokens: int, output_tokens: int) -> None:
"""Updates token usage for a specific tier."""
with self._tier_usage_lock:
if tier in self.tier_usage:
self.tier_usage[tier]["input"] += input_tokens
@@ -141,16 +164,6 @@ class ConductorEngine:
"""Resumes the pipeline execution."""
self._pause_event.clear()
def approve_task(self, task_id: str) -> None:
"""Manually transition todo to in_progress and mark engine dirty."""
self.engine.approve_task(task_id)
self._dirty = True
def update_task_status(self, task_id: str, status: str) -> None:
"""Force-update ticket status and mark engine dirty."""
self.engine.update_task_status(task_id, status)
self._dirty = True
def kill_worker(self, ticket_id: str) -> None:
"""Sets the abort event for a worker and attempts to join its thread."""
if ticket_id in self._abort_events:
@@ -167,7 +180,6 @@ class ConductorEngine:
self._active_workers.pop(ticket_id, None)
def _push_state(self, status: str = "running", active_tier: str = None) -> None:
"""Pushes the current engine state to the GUI."""
if not self.event_queue:
return
payload = {
@@ -229,14 +241,10 @@ class ConductorEngine:
if max_ticks is not None and tick_count >= max_ticks:
break
tick_count += 1
# 1. Identify ready tasks
if self._dirty:
self._ready_tasks = self.engine.tick()
self._dirty = False
ready_tasks = self._ready_tasks
# 1. Identify ready tasks
ready_tasks = self.engine.tick()
# 2. Check for completion or blockage
if not ready_tasks:
all_done = all(t.status == "completed" for t in self.track.tickets)
if all_done:
@@ -281,6 +289,7 @@ class ConductorEngine:
model_name = ticket.model_override
else:
# Check if ticket has a persona with preferred_models
models_list = ["gemini-2.5-flash-lite", "gemini-2.5-flash", "gemini-3.1-pro-preview"]
if ticket.persona_id:
# Try to load preferred_models from persona
try:
@@ -419,8 +428,9 @@ def run_worker_lifecycle(ticket: Ticket, context: WorkerContext, context_files:
# Apply Persona if specified
preferred_models = []
persona_tool_preset = None
persona = None
if context.persona_id:
from src.personas import PersonaManager
from src import paths
pm = PersonaManager(Path(paths.get_project_personas_path(Path.cwd())) if paths.get_project_personas_path(Path.cwd()).exists() else None)
try:
personas = pm.load_all()
@@ -460,7 +470,6 @@ def run_worker_lifecycle(ticket: Ticket, context: WorkerContext, context_files:
if context_files:
parser = ASTParser(language="python")
strategy = getattr(persona, "aggregation_strategy", "auto") if persona else "auto"
for i, file_path in enumerate(context_files):
try:
Path(file_path)
@@ -470,17 +479,12 @@ def run_worker_lifecycle(ticket: Ticket, context: WorkerContext, context_files:
tokens_before += _count_tokens(content)
if strategy == "summarize":
view = summarize.summarise_file(Path(file_path), content)
elif strategy == "full":
view = content
else: # auto or skeleton
if i == 0:
view = parser.get_curated_view(content, path=file_path)
elif ticket.target_file and Path(file_path).resolve() == Path(ticket.target_file).resolve() and ticket.target_symbols:
view = parser.get_targeted_view(content, ticket.target_symbols, path=file_path)
else:
view = parser.get_skeleton(content, path=file_path)
if i == 0:
view = parser.get_curated_view(content, path=file_path)
elif ticket.target_file and Path(file_path).resolve() == Path(ticket.target_file).resolve() and ticket.target_symbols:
view = parser.get_targeted_view(content, ticket.target_symbols, path=file_path)
else:
view = parser.get_skeleton(content, path=file_path)
tokens_after += _count_tokens(view)
context_injection += f"\nFile: {file_path}\n{view}\n"
@@ -602,7 +606,6 @@ def run_worker_lifecycle(ticket: Ticket, context: WorkerContext, context_files:
_in_tokens = sum(e.get("payload", {}).get("usage", {}).get("input_tokens", 0) for e in _resp_entries)
_out_tokens = sum(e.get("payload", {}).get("usage", {}).get("output_tokens", 0) for e in _resp_entries)
engine.update_usage("Tier 3", _in_tokens, _out_tokens)
engine._dirty = True
if "BLOCKED" in response.upper():
ticket.mark_blocked(response)
else:
+6 -14
View File
@@ -2,23 +2,19 @@ from pathlib import Path
from typing import Optional
import json
import re
from src import paths
def read_plan(track_id: str, base_dir: str = ".") -> str:
"""Reads the implementation plan (plan.md) for a track."""
plan_path = paths.get_track_state_dir(track_id, base_dir) / "plan.md"
plan_path = Path(base_dir) / "conductor" / "tracks" / track_id / "plan.md"
if not plan_path.exists():
return ""
return plan_path.read_text(encoding="utf-8")
def write_plan(track_id: str, content: str, base_dir: str = ".") -> None:
"""Writes the implementation plan (plan.md) for a track."""
plan_path = paths.get_track_state_dir(track_id, base_dir) / "plan.md"
plan_path = Path(base_dir) / "conductor" / "tracks" / track_id / "plan.md"
plan_path.parent.mkdir(parents=True, exist_ok=True)
plan_path.write_text(content, encoding="utf-8")
def parse_plan_tasks(content: str) -> list[dict[str, str]]:
"""Parses the tasks from a plan.md file."""
tasks = []
for line in content.split("\n"):
stripped = line.strip()
@@ -29,25 +25,21 @@ def parse_plan_tasks(content: str) -> list[dict[str, str]]:
return tasks
def read_metadata(track_id: str, base_dir: str = ".") -> dict:
"""Reads the metadata (metadata.json) for a track."""
meta_path = paths.get_track_state_dir(track_id, base_dir) / "metadata.json"
meta_path = Path(base_dir) / "conductor" / "tracks" / track_id / "metadata.json"
if not meta_path.exists():
return {}
return json.loads(meta_path.read_text(encoding="utf-8"))
def write_metadata(track_id: str, data: dict, base_dir: str = ".") -> None:
"""Writes the metadata (metadata.json) for a track."""
meta_path = paths.get_track_state_dir(track_id, base_dir) / "metadata.json"
meta_path = Path(base_dir) / "conductor" / "tracks" / track_id / "metadata.json"
meta_path.parent.mkdir(parents=True, exist_ok=True)
meta_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
def get_track_dir(track_id: str, base_dir: str = ".") -> Path:
"""Returns the state directory for a specific track."""
return paths.get_track_state_dir(track_id, base_dir)
return Path(base_dir) / "conductor" / "tracks" / track_id
def get_archive_dir(base_dir: str = ".") -> Path:
"""Returns the central archive directory for completed tracks."""
return paths.get_archive_dir(base_dir)
return Path(base_dir) / "conductor" / "archive"
class NativeOrchestrator:
def __init__(self, base_dir: str = "."):
-7
View File
@@ -72,13 +72,6 @@ def get_global_personas_path() -> Path:
def get_project_personas_path(project_root: Path) -> Path:
return project_root / "project_personas.toml"
def get_global_workspace_profiles_path() -> Path:
root_dir = Path(__file__).resolve().parent.parent
return Path(os.environ.get("SLOP_GLOBAL_WORKSPACE_PROFILES", root_dir / "workspace_profiles.toml"))
def get_project_workspace_profiles_path(project_root: Path) -> Path:
return project_root / ".ai" / "workspace_profiles.toml"
def _resolve_path(env_var: str, config_key: str, default: str) -> Path:
root_dir = Path(__file__).resolve().parent.parent
p = None
+4 -34
View File
@@ -62,18 +62,6 @@ from collections import deque
_instance: Optional[PerformanceMonitor] = None
class PerformanceScope:
"""Helper class for PerformanceMonitor.scope() context manager."""
def __init__(self, monitor: PerformanceMonitor, name: str) -> None:
self.monitor = monitor
self.name = name
def __enter__(self) -> PerformanceScope:
self.monitor.start_component(self.name)
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.monitor.end_component(self.name)
def get_monitor() -> PerformanceMonitor:
global _instance
if _instance is None:
@@ -102,9 +90,6 @@ class PerformanceMonitor:
self._component_starts: dict[str, float] = {}
self._component_timings: dict[str, float] = {}
self._component_counts: dict[str, int] = {}
self._component_max: dict[str, float] = {}
self._component_min: dict[str, float] = {}
# Rolling history and running sums for O(1) average calculation
# deques are thread-safe for appends and pops.
@@ -154,7 +139,7 @@ class PerformanceMonitor:
return self._history_sums[key] / len(h)
def start_frame(self) -> None:
now = time.perf_counter()
now = time.time()
with self._lock:
if self._last_frame_start_time > 0:
dt = now - self._last_frame_start_time
@@ -167,7 +152,7 @@ class PerformanceMonitor:
def end_frame(self) -> None:
if self._start_time is None:
return
now = time.perf_counter()
now = time.time()
elapsed = now - self._start_time
frame_time_ms = elapsed * 1000
@@ -194,24 +179,19 @@ class PerformanceMonitor:
def start_component(self, name: str) -> None:
if not self.enabled: return
now = time.perf_counter()
now = time.time()
with self._lock:
self._component_starts[name] = now
def end_component(self, name: str) -> None:
if not self.enabled: return
now = time.perf_counter()
now = time.time()
with self._lock:
start = self._component_starts.pop(name, None)
if start is not None:
elapsed = (now - start) * 1000
with self._lock:
self._component_timings[name] = elapsed
self._component_counts[name] = self._component_counts.get(name, 0) + 1
if name not in self._component_max or elapsed > self._component_max[name]:
self._component_max[name] = elapsed
if name not in self._component_min or elapsed < self._component_min[name]:
self._component_min[name] = elapsed
self._add_to_history(f'comp_{name}', elapsed)
def get_metrics(self) -> dict[str, float]:
@@ -223,9 +203,6 @@ class PerformanceMonitor:
ilag = self._input_lag_ms
last_calc_fps = self._last_calculated_fps
timings_snapshot = dict(self._component_timings)
counts_snapshot = dict(self._component_counts)
max_snapshot = dict(self._component_max)
min_snapshot = dict(self._component_min)
metrics = {
'fps': fps,
@@ -240,9 +217,6 @@ class PerformanceMonitor:
for name, elapsed in timings_snapshot.items():
metrics[f'time_{name}_ms'] = elapsed
metrics[f'time_{name}_ms_avg'] = self._get_avg(f'comp_{name}')
metrics[f'count_{name}'] = float(counts_snapshot.get(name, 0))
metrics[f'max_{name}_ms'] = max_snapshot.get(name, 0.0)
metrics[f'min_{name}_ms'] = min_snapshot.get(name, 0.0)
return metrics
def get_history(self, key: str) -> List[float]:
@@ -254,10 +228,6 @@ class PerformanceMonitor:
return list(self._history[f'comp_{key}'])
return []
def scope(self, name: str) -> PerformanceScope:
"""Returns a context manager for timing a component."""
return PerformanceScope(self, name)
def stop(self) -> None:
self._stop_event.set()
if self._cpu_thread.is_alive():
+6 -70
View File
@@ -33,14 +33,6 @@ def entry_to_str(entry: dict[str, Any]) -> str:
ts = entry.get("ts", "")
role = entry.get("role", "User")
content = entry.get("content", "")
segments = entry.get("thinking_segments")
if segments:
for s in segments:
marker = s.get("marker", "thinking")
s_content = s.get("content", "")
content = f"<{marker}>\n{s_content}\n</{marker}>\n{content}"
if ts:
return f"@{ts}\n{role}:\n{content}"
return f"{role}:\n{content}"
@@ -97,11 +89,10 @@ def default_discussion() -> dict[str, Any]:
def default_project(name: str = "unnamed") -> dict[str, Any]:
return {
"project": {"name": name, "git_dir": "", "system_prompt": "", "main_context": "", "execution_mode": "native"},
"project": {"name": name, "git_dir": "", "system_prompt": "", "main_context": ""},
"output": {"output_dir": "./md_gen"},
"files": {"base_dir": ".", "paths": [], "tier_assignments": {}},
"screenshots": {"base_dir": ".", "paths": []},
"context_presets": {},
"gemini_cli": {"binary_path": "gemini"},
"deepseek": {"reasoning_effort": "medium"},
"agent": {
@@ -240,37 +231,15 @@ def flat_config(proj: dict[str, Any], disc_name: Optional[str] = None, track_id:
disc_data = disc_sec.get("discussions", {}).get(name, {})
history = disc_data.get("history", [])
return {
"project": proj.get("project", {}),
"output": proj.get("output", {}),
"files": proj.get("files", {}),
"screenshots": proj.get("screenshots", {}),
"context_presets": proj.get("context_presets", {}),
"discussion": {
"project": proj.get("project", {}),
"output": proj.get("output", {}),
"files": proj.get("files", {}),
"screenshots": proj.get("screenshots", {}),
"discussion": {
"roles": disc_sec.get("roles", []),
"history": history,
},
}
# ── context presets ──────────────────────────────────────────────────────────
def save_context_preset(project_dict: dict, preset_name: str, files: list[str], screenshots: list[str]) -> None:
"""Save a named context preset (files + screenshots) into the project dict."""
if "context_presets" not in project_dict:
project_dict["context_presets"] = {}
project_dict["context_presets"][preset_name] = {
"files": files,
"screenshots": screenshots
}
def load_context_preset(project_dict: dict, preset_name: str) -> dict:
"""Return the files and screenshots for a named preset."""
if "context_presets" not in project_dict or preset_name not in project_dict["context_presets"]:
raise KeyError(f"Preset '{preset_name}' not found in project context_presets.")
return project_dict["context_presets"][preset_name]
def delete_context_preset(project_dict: dict, preset_name: str) -> None:
"""Remove a named preset if it exists."""
if "context_presets" in project_dict:
project_dict["context_presets"].pop(preset_name, None)
# ── track state persistence ─────────────────────────────────────────────────
def save_track_state(track_id: str, state: 'TrackState', base_dir: Union[str, Path] = ".") -> None:
@@ -424,36 +393,3 @@ def calculate_track_progress(tickets: list) -> dict:
"todo": todo
}
def branch_discussion(project_dict: dict, source_id: str, new_id: str, message_index: int) -> None:
"""
Creates a new discussion in project_dict['discussion']['discussions'] by copying
the history from source_id up to (and including) message_index, and sets active to new_id.
"""
if "discussion" not in project_dict or "discussions" not in project_dict["discussion"]:
return
if source_id not in project_dict["discussion"]["discussions"]:
return
source_disc = project_dict["discussion"]["discussions"][source_id]
new_disc = default_discussion()
new_disc["git_commit"] = source_disc.get("git_commit", "")
# Copy history up to and including message_index
new_disc["history"] = source_disc["history"][:message_index + 1]
project_dict["discussion"]["discussions"][new_id] = new_disc
project_dict["discussion"]["active"] = new_id
def promote_take(project_dict: dict, take_id: str, new_id: str) -> None:
"""Renames a take_id to new_id in the discussions dict."""
if "discussion" not in project_dict or "discussions" not in project_dict["discussion"]:
return
if take_id not in project_dict["discussion"]["discussions"]:
return
disc = project_dict["discussion"]["discussions"].pop(take_id)
project_dict["discussion"]["discussions"][new_id] = disc
# If the take was active, update the active pointer
if project_dict["discussion"].get("active") == take_id:
project_dict["discussion"]["active"] = new_id
-246
View File
@@ -1,246 +0,0 @@
import os
import sys
import asyncio
import json
from typing import List, Dict, Any, Optional
import chromadb
from chromadb.config import Settings
from src import models
from src import mcp_client
try:
from sentence_transformers import SentenceTransformer
except ImportError:
SentenceTransformer = None
from google import genai
from google.genai import types
from src import ai_client
class BaseEmbeddingProvider:
def embed(self, texts: List[str]) -> List[List[float]]:
raise NotImplementedError()
class LocalEmbeddingProvider(BaseEmbeddingProvider):
def __init__(self, model_name: str = 'all-MiniLM-L6-v2'):
if SentenceTransformer is None:
raise ImportError("sentence-transformers is not installed")
self.model = SentenceTransformer(model_name)
def embed(self, texts: List[str]) -> List[List[float]]:
embeddings = self.model.encode(texts)
return embeddings.tolist()
class GeminiEmbeddingProvider(BaseEmbeddingProvider):
def __init__(self, model_name: str = 'text-embedding-004'):
self.model_name = model_name
def embed(self, texts: List[str]) -> List[List[float]]:
ai_client._ensure_gemini_client()
client = ai_client._gemini_client
if not client:
raise ValueError("Gemini client not initialized")
# For text-embedding-004, we can embed a batch
res = client.models.embed_content(
model=self.model_name,
contents=texts,
config=types.EmbedContentConfig(task_type="RETRIEVAL_DOCUMENT")
)
return [e.values for e in res.embeddings]
class RAGEngine:
def __init__(self, config: models.RAGConfig, base_dir: str = "."):
self.config = config
self.base_dir = base_dir
self.client = None
self.collection = None
self.embedding_provider = None
if not self.config.enabled:
return
self._init_embedding_provider()
self._init_vector_store()
def _init_embedding_provider(self):
if self.config.embedding_provider == 'gemini':
self.embedding_provider = GeminiEmbeddingProvider()
elif self.config.embedding_provider == 'local':
self.embedding_provider = LocalEmbeddingProvider()
else:
raise ValueError(f"Unknown embedding provider: {self.config.embedding_provider}")
def _init_vector_store(self):
vs_config = self.config.vector_store
if vs_config.provider == 'chroma':
db_path = os.path.join(self.base_dir, ".slop_cache", "chroma_db")
os.makedirs(db_path, exist_ok=True)
self.client = chromadb.PersistentClient(path=db_path)
self.collection = self.client.get_or_create_collection(name=vs_config.collection_name)
elif vs_config.provider == 'mock':
self.client = "mock"
self.collection = "mock"
else:
raise ValueError(f"Unknown vector store provider: {vs_config.provider}")
def is_empty(self) -> bool:
if not self.config.enabled:
return True
if self.config.vector_store.provider == 'mock' or self.collection == "mock":
return True
if self.collection is None:
return True
return self.collection.count() == 0
def add_documents(self, ids: List[str], texts: List[str], metadatas: Optional[List[Dict[str, Any]]] = None):
if not self.config.enabled or self.collection == "mock":
return
embeddings = self.embedding_provider.embed(texts)
self.collection.upsert(
ids=ids,
embeddings=embeddings,
documents=texts,
metadatas=metadatas
)
def _chunk_text(self, content: str) -> List[str]:
"""Character-based chunking with overlap."""
chunks = []
if not content:
return chunks
chunk_size = self.config.chunk_size
overlap = self.config.chunk_overlap
start = 0
while start < len(content):
end = start + chunk_size
chunks.append(content[start:end])
if end >= len(content):
break
start += (chunk_size - overlap)
return chunks
def _chunk_code(self, content: str, file_path: str) -> List[str]:
"""AST-aware chunking for Python code."""
try:
from src.file_cache import ASTParser
parser = ASTParser("python")
tree = parser.parse(content)
chunks = []
# Capture classes and top-level functions
for node in tree.root_node.children:
if node.type in ("function_definition", "class_definition"):
chunks.append(content[node.start_byte:node.end_byte])
# Fallback if no structural chunks found or if file is small
if not chunks or len(content) < self.config.chunk_size:
return self._chunk_text(content)
return chunks
except Exception:
return self._chunk_text(content)
def index_file(self, file_path: str):
"""Reads, chunks, and indexes a file into the vector store."""
if not self.config.enabled or self.collection == "mock":
return
full_path = os.path.join(self.base_dir, file_path)
if not os.path.exists(full_path):
return
try:
mtime = os.path.getmtime(full_path)
except Exception:
return
# Incremental check: see if we already have this file with the same mtime
try:
res = self.collection.get(where={"path": file_path}, limit=1, include=["metadatas"])
if res and res["metadatas"] and res["metadatas"][0]:
if res["metadatas"][0].get("mtime") == mtime:
return
except Exception:
pass
try:
with open(full_path, "r", encoding="utf-8", errors="ignore") as f:
content = f.read()
except Exception:
return
# Remove old entries for this file
self.collection.delete(where={"path": file_path})
if file_path.lower().endswith(".py"):
chunks = self._chunk_code(content, file_path)
else:
chunks = self._chunk_text(content)
if not chunks:
return
ids = [f"{file_path}_{i}" for i in range(len(chunks))]
metadatas = [{"path": file_path, "chunk": i, "mtime": mtime} for i in range(len(chunks))]
self.add_documents(ids, chunks, metadatas)
def _search_mcp(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
async def _async_search_mcp():
tool_name = self.config.vector_store.mcp_tool or "rag_search"
args = {"query": query, "top_k": top_k}
res_str = await mcp_client.async_dispatch(tool_name, args)
try:
data = json.loads(res_str)
if isinstance(data, list):
return data
elif isinstance(data, dict) and "results" in data:
return data["results"]
return []
except:
return []
return asyncio.run(_async_search_mcp())
def search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
if not self.config.enabled:
return []
if self.config.vector_store.provider == 'mcp':
return self._search_mcp(query, top_k)
if self.collection == "mock":
return []
query_embedding = self.embedding_provider.embed([query])[0]
results = self.collection.query(
query_embeddings=[query_embedding],
n_results=top_k
)
ret = []
if results and results["ids"] and results["ids"][0]:
for i in range(len(results["ids"][0])):
ret.append({
"id": results["ids"][0][i],
"document": results["documents"][0][i],
"metadata": results["metadatas"][0][i] if results["metadatas"] else {},
"distance": results["distances"][0][i] if "distances" in results and results["distances"] else 0.0
})
return ret
def delete_documents(self, ids: List[str]):
if not self.config.enabled or self.collection == "mock":
return
self.collection.delete(ids=ids)
def get_all_indexed_paths(self) -> List[str]:
if not self.config.enabled or self.collection == "mock":
return []
res = self.collection.get(include=["metadatas"])
if not res or not res["metadatas"]:
return []
return list(set(m.get("path") for m in res["metadatas"] if m.get("path")))
def delete_documents_by_path(self, file_paths: List[str]):
if not self.config.enabled or self.collection == "mock":
return
for path in file_paths:
self.collection.delete(where={"path": path})
+252 -1
View File
@@ -5,6 +5,113 @@ class ShaderManager:
self.program = None
self.bg_program = None
self.pp_program = None
self.blur_h_program = None
self.blur_v_program = None
self.blur_fbo = None
self.scene_fbo = None
self.temp_fbo = None
self.scene_tex = None
self.blur_tex = None
self.temp_tex = None
self.fbo_width = 0
self.fbo_height = 0
self._vao = None
def _ensure_vao(self):
if self._vao is None:
try:
import sys
if sys.platform == "win32":
self._vao = gl.glGenVertexArrays(1)
else:
# Some non-win32 environments might not support VAOs or need different handling
self._vao = gl.glGenVertexArrays(1)
except Exception:
pass
if self._vao is not None:
gl.glBindVertexArray(self._vao)
def setup_capture_fbo(self, width, height):
if self.blur_fbo is not None:
gl.glDeleteFramebuffers(1, [self.blur_fbo])
if self.scene_fbo is not None:
gl.glDeleteFramebuffers(1, [self.scene_fbo])
if self.temp_fbo is not None:
gl.glDeleteFramebuffers(1, [self.temp_fbo])
if self.scene_tex is not None:
gl.glDeleteTextures(1, [self.scene_tex])
if self.blur_tex is not None:
gl.glDeleteTextures(1, [self.blur_tex])
if self.temp_tex is not None:
gl.glDeleteTextures(1, [self.temp_tex])
self.scene_tex = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.scene_tex)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, width, height, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, None)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)
self.scene_fbo = gl.glGenFramebuffers(1)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.scene_fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.scene_tex, 0)
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
raise RuntimeError("Scene Framebuffer not complete")
self.temp_tex = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.temp_tex)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, width, height, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, None)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)
self.temp_fbo = gl.glGenFramebuffers(1)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.temp_fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.temp_tex, 0)
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
raise RuntimeError("Temp Framebuffer not complete")
self.blur_tex = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.blur_tex)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, width, height, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, None)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)
self.blur_fbo = gl.glGenFramebuffers(1)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.blur_fbo)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.blur_tex, 0)
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
raise RuntimeError("Blur Framebuffer not complete")
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
self.fbo_width = width
self.fbo_height = height
def render_background_to_fbo(self, width, height, time):
if self.scene_fbo is None or self.fbo_width != width or self.fbo_height != height:
self.setup_capture_fbo(width, height)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.scene_fbo)
gl.glViewport(0, 0, width, height)
self.render_background(width, height, time)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def prepare_global_blur(self, width, height, radius, tint, opacity, time):
self.render_background_to_fbo(width, height, time)
self.render_blur(self.scene_tex, width, height, radius, tint, opacity)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def capture_begin(self, width, height):
if self.blur_fbo is None or self.fbo_width != width or self.fbo_height != height:
self.setup_capture_fbo(width, height)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.blur_fbo)
gl.glViewport(0, 0, width, height)
def capture_end(self):
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def compile_shader(self, vertex_src: str, fragment_src: str) -> int:
program = gl.glCreateProgram()
@@ -79,9 +186,44 @@ void main() {
uniform float u_time;
uniform vec2 u_resolution;
out vec4 FragColor;
float hash(vec2 p) {
return fract(sin(dot(p, vec2(127.1, 311.7))) * 43758.5453123);
}
float noise(vec2 p) {
vec2 i = floor(p);
vec2 f = fract(p);
vec2 u = f * f * (3.0 - 2.0 * f);
return mix(mix(hash(i + vec2(0.0, 0.0)), hash(i + vec2(1.0, 0.0)), u.x),
mix(hash(i + vec2(0.0, 1.0)), hash(i + vec2(1.0, 1.0)), u.x), u.y);
}
void main() {
vec2 uv = gl_FragCoord.xy / u_resolution.xy;
vec3 col = 0.5 + 0.5 * cos(u_time + uv.xyx + vec3(0, 2, 4));
vec2 p = uv * 2.0 - 1.0;
p.x *= u_resolution.x / u_resolution.y;
// Deep sea background gradient (dark blue)
vec3 col = mix(vec3(0.01, 0.03, 0.08), vec3(0.0, 0.08, 0.15), uv.y);
// Moving blobs / caustics
float n = 0.0;
float t = u_time * 0.15;
n += noise(p * 1.2 + vec2(t * 0.8, t * 0.5)) * 0.4;
n += noise(p * 2.5 - vec2(t * 0.4, t * 0.9)) * 0.2;
col += vec3(0.05, 0.12, 0.22) * n;
// Bright highlights (caustics approximation)
float c = 0.0;
for(int i=0; i<3; i++) {
vec2 p2 = p * (float(i) + 1.0) * 0.4;
p2 += vec2(sin(t + p2.y * 1.5), cos(t + p2.x * 1.5));
c += abs(0.015 / (length(p2) - 0.4));
}
col += vec3(0.1, 0.25, 0.45) * c * 0.12;
FragColor = vec4(col, 1.0);
}
"""
@@ -97,6 +239,7 @@ void main() {
u_res_loc = gl.glGetUniformLocation(self.bg_program, "u_resolution")
if u_res_loc != -1:
gl.glUniform2f(u_res_loc, float(width), float(height))
self._ensure_vao()
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, 4)
gl.glUseProgram(0)
@@ -148,6 +291,114 @@ void main() {
u_time_loc = gl.glGetUniformLocation(self.pp_program, "u_time")
if u_time_loc != -1:
gl.glUniform1f(u_time_loc, float(time))
self._ensure_vao()
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, 4)
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
gl.glUseProgram(0)
def setup_frosted_glass_shader(self):
vertex_src = """
#version 330 core
const vec2 positions[4] = vec2[](
vec2(-1.0, -1.0),
vec2( 1.0, -1.0),
vec2(-1.0, 1.0),
vec2( 1.0, 1.0)
);
const vec2 uvs[4] = vec2[](
vec2(0.0, 0.0),
vec2(1.0, 0.0),
vec2(0.0, 1.0),
vec2(1.0, 1.0)
);
out vec2 v_uv;
void main() {
gl_Position = vec4(positions[gl_VertexID], 0.0, 1.0);
v_uv = uvs[gl_VertexID];
}
"""
fragment_src_h = """
#version 330 core
in vec2 v_uv;
uniform sampler2D u_texture;
uniform float u_blur_radius;
uniform vec2 u_direction;
out vec4 FragColor;
void main() {
float weight[5] = float[](0.227027, 0.1945946, 0.1216216, 0.054054, 0.016216);
vec2 res = vec2(textureSize(u_texture, 0));
vec2 tex_offset = (u_blur_radius / res) * u_direction * 2.5; // Multiplied by 2.5 for milky effect
vec4 result = texture(u_texture, v_uv) * weight[0];
for(int i = 1; i < 5; ++i) {
result += texture(u_texture, v_uv + tex_offset * float(i)) * weight[i];
result += texture(u_texture, v_uv - tex_offset * float(i)) * weight[i];
}
FragColor = result;
}
"""
fragment_src_v = """
#version 330 core
in vec2 v_uv;
uniform sampler2D u_texture;
uniform float u_blur_radius;
uniform vec2 u_direction;
uniform float u_tint_intensity;
uniform float u_opacity;
out vec4 FragColor;
void main() {
float weight[5] = float[](0.227027, 0.1945946, 0.1216216, 0.054054, 0.016216);
vec2 res = vec2(textureSize(u_texture, 0));
vec2 tex_offset = (u_blur_radius / res) * u_direction * 2.5; // Multiplied by 2.5 for milky effect
vec4 result = texture(u_texture, v_uv) * weight[0];
for(int i = 1; i < 5; ++i) {
result += texture(u_texture, v_uv + tex_offset * float(i)) * weight[i];
result += texture(u_texture, v_uv - tex_offset * float(i)) * weight[i];
}
vec3 tint_color = vec3(0.05, 0.07, 0.12); // Slightly deeper tint
vec3 tinted = mix(result.rgb, tint_color, u_tint_intensity);
FragColor = vec4(tinted, result.a * u_opacity);
}
"""
self.blur_h_program = self.compile_shader(vertex_src, fragment_src_h)
self.blur_v_program = self.compile_shader(vertex_src, fragment_src_v)
def render_blur(self, texture_id, width, height, radius, tint, opacity):
if not self.blur_h_program or not self.blur_v_program:
return
self._ensure_vao()
# Pass 1: Horizontal blur to temp_fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.temp_fbo)
gl.glViewport(0, 0, width, height)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glUseProgram(self.blur_h_program)
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, texture_id)
gl.glUniform1i(gl.glGetUniformLocation(self.blur_h_program, "u_texture"), 0)
gl.glUniform1f(gl.glGetUniformLocation(self.blur_h_program, "u_blur_radius"), float(radius))
gl.glUniform2f(gl.glGetUniformLocation(self.blur_h_program, "u_direction"), 1.0, 0.0)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, 4)
# Pass 2: Vertical blur to blur_fbo
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.blur_fbo)
gl.glViewport(0, 0, width, height)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glUseProgram(self.blur_v_program)
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.temp_tex)
gl.glUniform1i(gl.glGetUniformLocation(self.blur_v_program, "u_texture"), 0)
gl.glUniform1f(gl.glGetUniformLocation(self.blur_v_program, "u_blur_radius"), float(radius))
gl.glUniform2f(gl.glGetUniformLocation(self.blur_v_program, "u_direction"), 0.0, 1.0)
gl.glUniform1f(gl.glGetUniformLocation(self.blur_v_program, "u_tint_intensity"), float(tint))
gl.glUniform1f(gl.glGetUniformLocation(self.blur_v_program, "u_opacity"), float(opacity))
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, 4)
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
gl.glUseProgram(0)
+2 -9
View File
@@ -1,11 +1,4 @@
# shell_runner.py
"""
Shell Runner - Execution engine for PowerShell scripts.
This module provides utilities to run PowerShell scripts in a subprocess,
configuring the environment via mcp_env.toml. It handles timeouts,
logging, and optional QA/patch callbacks for error recovery.
"""
import os
import subprocess
import shutil
@@ -21,7 +14,7 @@ TIMEOUT_SECONDS: int = 60
_ENV_CONFIG: dict = {}
def _load_env_config() -> dict:
"""Load mcp_env.toml from project root or environment variable."""
"""Load mcp_env.toml from project root (sibling of this file or parent dir)."""
env_path = os.environ.get("SLOP_MCP_ENV")
if env_path and Path(env_path).exists():
with open(env_path, "rb") as f:
@@ -37,7 +30,7 @@ def _load_env_config() -> dict:
return {}
def _build_subprocess_env() -> dict[str, str]:
"""Build environment dictionary for subprocess with overrides from mcp_env.toml."""
"""Build env dict for subprocess: current env + mcp_env.toml overrides."""
global _ENV_CONFIG
if not _ENV_CONFIG:
_ENV_CONFIG = _load_env_config()
+4 -32
View File
@@ -27,9 +27,6 @@ import ast
import re
from pathlib import Path
from typing import Callable, Any
from src.summary_cache import SummaryCache, get_file_hash
_summary_cache = SummaryCache()
# ------------------------------------------------------------------ per-type extractors
@@ -153,38 +150,13 @@ _SUMMARISERS: dict[str, Callable[[Path, str], str]] = {
def summarise_file(path: Path, content: str) -> str:
"""
Return a compact markdown summary string for a single file.
`content` is the already-read file text (or an error string).
"""
content_hash = get_file_hash(content)
cached = _summary_cache.get_summary(str(path), content_hash)
if cached:
return cached
Return a compact markdown summary string for a single file.
`content` is the already-read file text (or an error string).
"""
suffix = path.suffix.lower() if hasattr(path, "suffix") else ""
fn = _SUMMARISERS.get(suffix, _summarise_generic)
try:
heuristic_outline = fn(path, content)
# Smart AI Summarization
is_code = suffix in [".py", ".ps1", ".js", ".ts", ".cpp", ".c", ".h", ".cs", ".go", ".rs", ".lua"]
try:
from src import ai_client
smart_summary = ai_client.run_subagent_summarization(
file_path=str(path),
content=content[:10000], # Cap content to 10k chars for summarization
is_code=is_code,
outline=heuristic_outline
)
if smart_summary and not smart_summary.startswith("ERROR:"):
summary = f"{smart_summary}\n\n**Outline:**\n{heuristic_outline}"
else:
summary = heuristic_outline
except Exception:
summary = heuristic_outline # Fallback
_summary_cache.set_summary(str(path), content_hash, summary)
return summary
return fn(path, content)
except Exception as e:
return f"_Summariser error: {e}_"
-88
View File
@@ -1,88 +0,0 @@
import hashlib
import json
from pathlib import Path
from typing import Optional, Dict
def get_file_hash(content: str) -> str:
"""Returns SHA256 hash of the content."""
return hashlib.sha256(content.encode("utf-8")).hexdigest()
class SummaryCache:
"""
A hash-based cache for file summaries to avoid redundant processing.
Invalidates when content hash changes.
"""
def __init__(self, cache_file: Optional[str] = None, max_entries: int = 1000):
if cache_file:
self.cache_file = Path(cache_file)
else:
# Default relative to current working directory
self.cache_file = Path(".slop_cache/summary_cache.json")
self.max_entries = max_entries
self.cache: Dict[str, Dict[str, str]] = {}
self.load()
def load(self) -> None:
"""Loads cache from disk."""
if self.cache_file.exists():
try:
with open(self.cache_file, "r", encoding="utf-8") as f:
self.cache = json.load(f)
except Exception:
self.cache = {}
def save(self) -> None:
"""Saves cache to disk."""
try:
self.cache_file.parent.mkdir(parents=True, exist_ok=True)
with open(self.cache_file, "w", encoding="utf-8") as f:
json.dump(self.cache, f, indent=1)
except Exception:
pass
def get_summary(self, file_path: str, content_hash: str) -> Optional[str]:
"""Returns cached summary if hash matches, otherwise None."""
entry = self.cache.get(file_path)
if entry and entry.get("hash") == content_hash:
# LRU: move to end
val = self.cache.pop(file_path)
self.cache[file_path] = val
return val.get("summary")
return None
def set_summary(self, file_path: str, content_hash: str, summary: str) -> None:
"""Stores summary in cache and saves to disk."""
if file_path in self.cache:
self.cache.pop(file_path)
self.cache[file_path] = {
"hash": content_hash,
"summary": summary
}
# Enforce LRU size limit
while len(self.cache) > self.max_entries:
# pop first item (oldest)
first_key = next(iter(self.cache))
self.cache.pop(first_key)
self.save()
def clear(self) -> None:
"""Clears the cache both in-memory and on disk."""
self.cache.clear()
if self.cache_file.exists():
try:
self.cache_file.unlink()
except Exception:
pass
def get_stats(self) -> dict:
"""Returns dictionary of cache statistics."""
size_bytes = 0
if self.cache_file.exists():
try:
size_bytes = self.cache_file.stat().st_size
except Exception:
pass
return {
"entries": len(self.cache),
"size_bytes": size_bytes
}
-42
View File
@@ -1,42 +0,0 @@
def format_takes_diff(takes: dict[str, list[dict]]) -> str:
if not takes:
return ""
histories = list(takes.values())
if not histories:
return ""
min_len = min(len(h) for h in histories)
common_prefix_len = 0
for i in range(min_len):
first_msg = histories[0][i]
if all(h[i] == first_msg for h in histories):
common_prefix_len += 1
else:
break
shared_lines = []
for i in range(common_prefix_len):
msg = histories[0][i]
shared_lines.append(f"{msg.get('role', 'unknown')}: {msg.get('content', '')}")
shared_text = "=== Shared History ==="
if shared_lines:
shared_text += "\n" + "\n".join(shared_lines)
variation_lines = []
if len(takes) > 1:
for take_name, history in takes.items():
if len(history) > common_prefix_len:
variation_lines.append(f"[{take_name}]")
for i in range(common_prefix_len, len(history)):
msg = history[i]
variation_lines.append(f"{msg.get('role', 'unknown')}: {msg.get('content', '')}")
variation_lines.append("")
else:
# Single take case
pass
variations_text = "=== Variations ===\n" + "\n".join(variation_lines)
return shared_text + "\n\n" + variations_text
+41 -1
View File
@@ -235,6 +235,10 @@ _current_font_size: float = 16.0
_current_scale: float = 1.0
_transparency: float = 1.0
_child_transparency: float = 1.0
_frosted_glass_enabled: bool = False
_frosted_blur_radius: float = 8.0
_frosted_tint_intensity: float = 0.1
_frosted_opacity: float = 1.0
# ------------------------------------------------------------------ public API
@@ -269,6 +273,34 @@ def set_child_transparency(val: float) -> None:
_child_transparency = val
apply(_current_palette)
def get_frosted_glass_enabled() -> bool:
return _frosted_glass_enabled
def set_frosted_glass_enabled(val: bool) -> None:
global _frosted_glass_enabled
_frosted_glass_enabled = val
def get_frosted_blur_radius() -> float:
return _frosted_blur_radius
def set_frosted_blur_radius(val: float) -> None:
global _frosted_blur_radius
_frosted_blur_radius = val
def get_frosted_tint_intensity() -> float:
return _frosted_tint_intensity
def set_frosted_tint_intensity(val: float) -> None:
global _frosted_tint_intensity
_frosted_tint_intensity = val
def get_frosted_opacity() -> float:
return _frosted_opacity
def set_frosted_opacity(val: float) -> None:
global _frosted_opacity
_frosted_opacity = val
def apply(palette_name: str) -> None:
"""
Apply a named palette by setting all ImGui style colors and applying global professional styling.
@@ -350,13 +382,17 @@ def save_to_config(config: dict) -> None:
config["theme"]["scale"] = _current_scale
config["theme"]["transparency"] = _transparency
config["theme"]["child_transparency"] = _child_transparency
config["theme"]["frosted_glass_enabled"] = _frosted_glass_enabled
config["theme"]["frosted_blur_radius"] = _frosted_blur_radius
config["theme"]["frosted_tint_intensity"] = _frosted_tint_intensity
config["theme"]["frosted_opacity"] = _frosted_opacity
sys.stderr.write(f"[DEBUG theme_2] save_to_config: palette={_current_palette}, transparency={_transparency}\n")
sys.stderr.flush()
def load_from_config(config: dict) -> None:
"""Read [theme] from config. Font is handled separately at startup."""
import sys
global _current_font_path, _current_font_size, _current_scale, _current_palette, _transparency, _child_transparency
global _current_font_path, _current_font_size, _current_scale, _current_palette, _transparency, _child_transparency, _frosted_glass_enabled, _frosted_blur_radius, _frosted_tint_intensity, _frosted_opacity
t = config.get("theme", {})
sys.stderr.write(f"[DEBUG theme_2] load_from_config raw: {t}\n")
sys.stderr.flush()
@@ -369,6 +405,10 @@ def load_from_config(config: dict) -> None:
_current_scale = float(t.get("scale", 1.0))
_transparency = float(t.get("transparency", 1.0))
_child_transparency = float(t.get("child_transparency", 1.0))
_frosted_glass_enabled = bool(t.get("frosted_glass_enabled", False))
_frosted_blur_radius = float(t.get("frosted_blur_radius", 8.0))
_frosted_tint_intensity = float(t.get("frosted_tint_intensity", 0.1))
_frosted_opacity = float(t.get("frosted_opacity", 1.0))
sys.stderr.write(f"[DEBUG theme_2] load_from_config effective: palette={_current_palette}, transparency={_transparency}\n")
sys.stderr.flush()
-53
View File
@@ -1,53 +0,0 @@
import re
from typing import List, Tuple
from src.models import ThinkingSegment
def parse_thinking_trace(text: str) -> Tuple[List[ThinkingSegment], str]:
"""
Parses thinking segments from text and returns (segments, response_content).
Support extraction of thinking traces from <thinking>...</thinking>, <thought>...</thought>,
and blocks prefixed with Thinking:.
"""
segments = []
# 1. Extract <thinking> and <thought> tags
current_text = text
# Combined pattern for tags
tag_pattern = re.compile(r'<(thinking|thought)>(.*?)</\1>', re.DOTALL | re.IGNORECASE)
def extract_tags(txt: str) -> Tuple[List[ThinkingSegment], str]:
found_segments = []
def replace_func(match):
marker = match.group(1).lower()
content = match.group(2).strip()
found_segments.append(ThinkingSegment(content=content, marker=marker))
return ""
remaining = tag_pattern.sub(replace_func, txt)
return found_segments, remaining
tag_segments, remaining = extract_tags(current_text)
segments.extend(tag_segments)
# 2. Extract Thinking: prefix
# This usually appears at the start of a block and ends with a double newline or a response marker.
thinking_colon_pattern = re.compile(r'(?:^|\n)Thinking:\s*(.*?)(?:\n\n|\nResponse:|\nAnswer:|$)', re.DOTALL | re.IGNORECASE)
def extract_colon_blocks(txt: str) -> Tuple[List[ThinkingSegment], str]:
found_segments = []
def replace_func(match):
content = match.group(1).strip()
if content:
found_segments.append(ThinkingSegment(content=content, marker="Thinking:"))
return "\n\n"
res = thinking_colon_pattern.sub(replace_func, txt)
return found_segments, res
colon_segments, final_remaining = extract_colon_blocks(remaining)
segments.extend(colon_segments)
return segments, final_remaining.strip()
-72
View File
@@ -1,72 +0,0 @@
import tomllib
import tomli_w
from pathlib import Path
from typing import Dict, Any, Optional, Union
from src.models import WorkspaceProfile
from src import paths
class WorkspaceManager:
"""Manages Workspace profiles across global and project-specific files."""
def __init__(self, project_root: Optional[Union[str, Path]] = None):
if project_root:
self.project_root = Path(project_root).resolve()
else:
self.project_root = None
def _get_path(self, scope: str) -> Path:
if scope == "global":
return paths.get_global_workspace_profiles_path()
elif scope == "project":
if not self.project_root:
raise ValueError("Project root is not set, cannot resolve project scope.")
return paths.get_project_workspace_profiles_path(self.project_root)
else:
raise ValueError("Invalid scope, must be 'global' or 'project'")
def load_all_profiles(self) -> Dict[str, WorkspaceProfile]:
"""Merges global and project profiles into a single dictionary."""
profiles = {}
global_path = paths.get_global_workspace_profiles_path()
global_data = self._load_file(global_path)
for name, data in global_data.get("profiles", {}).items():
profiles[name] = WorkspaceProfile.from_dict(name, data)
if self.project_root:
project_path = paths.get_project_workspace_profiles_path(self.project_root)
project_data = self._load_file(project_path)
for name, data in project_data.get("profiles", {}).items():
profiles[name] = WorkspaceProfile.from_dict(name, data)
return profiles
def save_profile(self, profile: WorkspaceProfile, scope: str = "project") -> None:
path = self._get_path(scope)
data = self._load_file(path)
if "profiles" not in data:
data["profiles"] = {}
data["profiles"][profile.name] = profile.to_dict()
self._save_file(path, data)
def delete_profile(self, name: str, scope: str = "project") -> None:
path = self._get_path(scope)
data = self._load_file(path)
if "profiles" in data and name in data["profiles"]:
del data["profiles"][name]
self._save_file(path, data)
def _load_file(self, path: Path) -> Dict[str, Any]:
if not path.exists():
return {}
try:
with open(path, "rb") as f:
return tomllib.load(f)
except Exception:
return {}
def _save_file(self, path: Path, data: Dict[str, Any]) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "wb") as f:
tomli_w.dump(data, f)
BIN
View File
Binary file not shown.
-42
View File
@@ -1,42 +0,0 @@
#pragma once
#include <string>
#include <memory>
#include <vector>
namespace gencpp {
namespace core {
/**
* @brief Base class for all components in the system.
*/
template <typename T>
class BaseComponent {
public:
virtual ~BaseComponent() = default;
virtual void Initialize() = 0;
virtual void Shutdown() = 0;
virtual const std::string& GetName() const = 0;
struct Config {
std::string name;
int priority;
bool enabled;
class Metadata {
public:
std::string author;
std::string version;
};
Metadata metadata;
};
protected:
BaseComponent(const Config& config) : m_config(config) {}
Config m_config;
};
} // namespace core
} // namespace gencpp
@@ -1,54 +0,0 @@
#pragma once
#include <tuple>
#include <utility>
namespace gencpp {
namespace util {
/**
* @brief A complex template class demonstrating variadic templates.
*/
template <typename... Args>
class MultiBuffer {
public:
void SetData(Args... args) {
m_data = std::make_tuple(args...);
}
template <size_t I>
auto Get() const -> const typename std::tuple_element<I, std::tuple<Args...>>::type& {
return std::get<I>(m_data);
}
private:
std::tuple<Args...> m_data;
};
/**
* @brief Template specialization example.
*/
template <typename T>
struct TypeTraits {
static constexpr bool IsPointer = false;
};
template <typename T>
struct TypeTraits<T*> {
static constexpr bool IsPointer = true;
};
/**
* @brief Nested template class.
*/
template <typename Outer>
struct Container {
template <typename Inner>
struct Wrapper {
Inner value;
Outer context;
};
};
} // namespace util
} // namespace gencpp
@@ -1,26 +0,0 @@
#include "component_registry.h"
#include <iostream>
namespace gencpp {
namespace registry {
ComponentRegistry& ComponentRegistry::Instance() {
static ComponentRegistry instance;
return instance;
}
void ComponentRegistry::Register(const std::string& type, ComponentCreator creator) {
std::cout << "Registering component type: " << type << std::endl;
m_creators[type] = creator;
}
std::unique_ptr<core::BaseComponent<void*>> ComponentRegistry::Create(const std::string& type) {
auto it = m_creators.find(type);
if (it != m_creators.end()) {
return it->second();
}
return nullptr;
}
} // namespace registry
} // namespace gencpp
@@ -1,43 +0,0 @@
#pragma once
#include "base_component.h"
#include <map>
#include <string>
#include <functional>
namespace gencpp {
namespace registry {
class ComponentRegistry {
public:
using ComponentCreator = std::function<std::unique_ptr<core::BaseComponent<void*>>()>;
static ComponentRegistry& Instance();
void Register(const std::string& type, ComponentCreator creator);
std::unique_ptr<core::BaseComponent<void*>> Create(const std::string& type);
class Iterator {
public:
using MapIterator = std::map<std::string, ComponentCreator>::iterator;
Iterator(MapIterator it) : m_it(it) {}
bool operator!=(const Iterator& other) const { return m_it != other.m_it; }
void operator++() { ++m_it; }
const std::string& GetType() const { return m_it->first; }
private:
MapIterator m_it;
};
Iterator Begin() { return Iterator(m_creators.begin()); }
Iterator End() { return Iterator(m_creators.end()); }
private:
ComponentRegistry() = default;
std::map<std::string, ComponentCreator> m_creators;
};
} // namespace registry
} // namespace gencpp
-457
View File
@@ -1,457 +0,0 @@
#ifdef INTELLISENSE_DIRECTIVES
#pragma once
#include "parser_types.hpp"
#endif
/*
______ ______ ________ __ __ ______ __
/ \ / \| \ | \ | \ / \ | \
| \ \\ | \ | | \ ______ ____| ______
| __| ___\ | | \| | \/ \ / / \
| \ \ | | \ | | \ \
| _\\ | | \ | __| | |
| | \__| | | \ | __/ \ __/ __|
| | \ | | \ \ \ \ \ \
\ \ \ \ \ \ \ \ \ \
*/
struct AST;
struct AST_Body;
struct AST_Attributes;
struct AST_Comment;
struct AST_Constructor;
// struct AST_BaseClass;
struct AST_Class;
struct AST_Define;
struct AST_DefineParams;
struct AST_Destructor;
struct AST_Enum;
struct AST_Exec;
struct AST_Extern;
struct AST_Include;
struct AST_Friend;
struct AST_Fn;
struct AST_Module;
struct AST_NS;
struct AST_Operator;
struct AST_OpCast;
struct AST_Params;
struct AST_Pragma;
struct AST_PreprocessCond;
struct AST_Specifiers;
#ifdef GEN_EXECUTION_EXPRESSION_SUPPORT
struct AST_Expr;
struct AST_Expr_Assign;
struct AST_Expr_Alignof;
struct AST_Expr_Binary;
struct AST_Expr_CStyleCast;
struct AST_Expr_FunctionalCast;
struct AST_Expr_CppCast;
struct AST_Expr_ProcCall;
struct AST_Expr_Decltype;
struct AST_Expr_Comma; // TODO(Ed) : This is a binary op not sure if it needs its own AST...
struct AST_Expr_AMS; // Access Member Symbol
struct AST_Expr_Sizeof;
struct AST_Expr_Subscript;
struct AST_Expr_Ternary;
struct AST_Expr_UnaryPrefix;
struct AST_Expr_UnaryPostfix;
struct AST_Expr_Element;
struct AST_Stmt;
struct AST_Stmt_Break;
struct AST_Stmt_Case;
struct AST_Stmt_Continue;
struct AST_Stmt_Decl;
struct AST_Stmt_Do;
struct AST_Stmt_Expr; // TODO(Ed) : Is this distinction needed? (Should it be a flag instead?)
struct AST_Stmt_Else;
struct AST_Stmt_If;
struct AST_Stmt_For;
struct AST_Stmt_Goto;
struct AST_Stmt_Label;
struct AST_Stmt_Switch;
struct AST_Stmt_While;
#endif
struct AST_Struct;
struct AST_Template;
struct AST_Typename;
struct AST_Typedef;
struct AST_Union;
struct AST_Using;
struct AST_Var;
#if GEN_COMPILER_C
typedef AST* Code;
#else
struct Code;
#endif
#if GEN_COMPILER_C
typedef AST_Body* CodeBody;
typedef AST_Attributes* CodeAttributes;
typedef AST_Comment* CodeComment;
typedef AST_Class* CodeClass;
typedef AST_Constructor* CodeConstructor;
typedef AST_Define* CodeDefine;
typedef AST_DefineParams* CodeDefineParams;
typedef AST_Destructor* CodeDestructor;
typedef AST_Enum* CodeEnum;
typedef AST_Exec* CodeExec;
typedef AST_Extern* CodeExtern;
typedef AST_Include* CodeInclude;
typedef AST_Friend* CodeFriend;
typedef AST_Fn* CodeFn;
typedef AST_Module* CodeModule;
typedef AST_NS* CodeNS;
typedef AST_Operator* CodeOperator;
typedef AST_OpCast* CodeOpCast;
typedef AST_Params* CodeParams;
typedef AST_PreprocessCond* CodePreprocessCond;
typedef AST_Pragma* CodePragma;
typedef AST_Specifiers* CodeSpecifiers;
#else
struct CodeBody;
struct CodeAttributes;
struct CodeComment;
struct CodeClass;
struct CodeConstructor;
struct CodeDefine;
struct CodeDefineParams;
struct CodeDestructor;
struct CodeEnum;
struct CodeExec;
struct CodeExtern;
struct CodeInclude;
struct CodeFriend;
struct CodeFn;
struct CodeModule;
struct CodeNS;
struct CodeOperator;
struct CodeOpCast;
struct CodeParams;
struct CodePreprocessCond;
struct CodePragma;
struct CodeSpecifiers;
#endif
#ifdef GEN_EXECUTION_EXPRESSION_SUPPORT
#if GEN_COMPILER_C
typedef AST_Expr* CodeExpr;
typedef AST_Expr_Assign* CodeExpr_Assign;
typedef AST_Expr_Alignof* CodeExpr_Alignof;
typedef AST_Expr_Binary* CodeExpr_Binary;
typedef AST_Expr_CStyleCast* CodeExpr_CStyleCast;
typedef AST_Expr_FunctionalCast* CodeExpr_FunctionalCast;
typedef AST_Expr_CppCast* CodeExpr_CppCast;
typedef AST_Expr_Element* CodeExpr_Element;
typedef AST_Expr_ProcCall* CodeExpr_ProcCall;
typedef AST_Expr_Decltype* CodeExpr_Decltype;
typedef AST_Expr_Comma* CodeExpr_Comma;
typedef AST_Expr_AMS* CodeExpr_AMS; // Access Member Symbol
typedef AST_Expr_Sizeof* CodeExpr_Sizeof;
typedef AST_Expr_Subscript* CodeExpr_Subscript;
typedef AST_Expr_Ternary* CodeExpr_Ternary;
typedef AST_Expr_UnaryPrefix* CodeExpr_UnaryPrefix;
typedef AST_Expr_UnaryPostfix* CodeExpr_UnaryPostfix;
#else
struct CodeExpr;
struct CodeExpr_Assign;
struct CodeExpr_Alignof;
struct CodeExpr_Binary;
struct CodeExpr_CStyleCast;
struct CodeExpr_FunctionalCast;
struct CodeExpr_CppCast;
struct CodeExpr_Element;
struct CodeExpr_ProcCall;
struct CodeExpr_Decltype;
struct CodeExpr_Comma;
struct CodeExpr_AMS; // Access Member Symbol
struct CodeExpr_Sizeof;
struct CodeExpr_Subscript;
struct CodeExpr_Ternary;
struct CodeExpr_UnaryPrefix;
struct CodeExpr_UnaryPostfix;
#endif
#if GEN_COMPILER_C
typedef AST_Stmt* CodeStmt;
typedef AST_Stmt_Break* CodeStmt_Break;
typedef AST_Stmt_Case* CodeStmt_Case;
typedef AST_Stmt_Continue* CodeStmt_Continue;
typedef AST_Stmt_Decl* CodeStmt_Decl;
typedef AST_Stmt_Do* CodeStmt_Do;
typedef AST_Stmt_Expr* CodeStmt_Expr;
typedef AST_Stmt_Else* CodeStmt_Else;
typedef AST_Stmt_If* CodeStmt_If;
typedef AST_Stmt_For* CodeStmt_For;
typedef AST_Stmt_Goto* CodeStmt_Goto;
typedef AST_Stmt_Label* CodeStmt_Label;
typedef AST_Stmt_Lambda* CodeStmt_Lambda;
typedef AST_Stmt_Switch* CodeStmt_Switch;
typedef AST_Stmt_While* CodeStmt_While;
#else
struct CodeStmt;
struct CodeStmt_Break;
struct CodeStmt_Case;
struct CodeStmt_Continue;
struct CodeStmt_Decl;
struct CodeStmt_Do;
struct CodeStmt_Expr;
struct CodeStmt_Else;
struct CodeStmt_If;
struct CodeStmt_For;
struct CodeStmt_Goto;
struct CodeStmt_Label;
struct CodeStmt_Lambda;
struct CodeStmt_Switch;
struct CodeStmt_While;
#endif
// GEN_EXECUTION_EXPRESSION_SUPPORT
#endif
#if GEN_COMPILER_C
typedef AST_Struct* CodeStruct;
typedef AST_Template* CodeTemplate;
typedef AST_Typename* CodeTypename;
typedef AST_Typedef* CodeTypedef;
typedef AST_Union* CodeUnion;
typedef AST_Using* CodeUsing;
typedef AST_Var* CodeVar;
#else
struct CodeStruct;
struct CodeTemplate;
struct CodeTypename;
struct CodeTypedef;
struct CodeUnion;
struct CodeUsing;
struct CodeVar;
#endif
#if GEN_COMPILER_CPP
template< class Type> forceinline Type tmpl_cast( Code self ) { return * rcast( Type*, & self ); }
#endif
#pragma region Code C-Interface
void code_append (Code code, Code other );
GEN_API Str code_debug_str (Code code);
GEN_API Code code_duplicate (Code code);
Code* code_entry (Code code, u32 idx );
bool code_has_entries (Code code);
bool code_is_body (Code code);
GEN_API bool code_is_equal (Code code, Code other);
bool code_is_valid (Code code);
void code_set_global (Code code);
GEN_API StrBuilder code_to_strbuilder (Code self );
GEN_API void code_to_strbuilder_ref(Code self, StrBuilder* result );
Str code_type_str (Code self );
GEN_API bool code_validate_body (Code self );
#pragma endregion Code C-Interface
#if GEN_COMPILER_CPP
/*
AST* wrapper
- Not constantly have to append the '*' as this is written often..
- Allows for implicit conversion to any of the ASTs (raw or filtered).
*/
struct Code
{
AST* ast;
# define Using_Code( Typename ) \
forceinline Str debug_str() { return code_debug_str(* this); } \
forceinline Code duplicate() { return code_duplicate(* this); } \
forceinline bool is_equal( Code other ) { return code_is_equal(* this, other); } \
forceinline bool is_body() { return code_is_body(* this); } \
forceinline bool is_valid() { return code_is_valid(* this); } \
forceinline void set_global() { return code_set_global(* this); }
# define Using_CodeOps( Typename ) \
forceinline Typename& operator = ( Code other ); \
forceinline bool operator ==( Code other ) { return (AST*)ast == other.ast; } \
forceinline bool operator !=( Code other ) { return (AST*)ast != other.ast; } \
forceinline bool operator ==(std::nullptr_t) const { return ast == nullptr; } \
forceinline bool operator !=(std::nullptr_t) const { return ast != nullptr; } \
operator bool();
#if ! GEN_C_LIKE_CPP
Using_Code( Code );
forceinline void append(Code other) { return code_append(* this, other); }
forceinline Code* entry(u32 idx) { return code_entry(* this, idx); }
forceinline bool has_entries() { return code_has_entries(* this); }
forceinline StrBuilder to_strbuilder() { return code_to_strbuilder(* this); }
forceinline void to_strbuilder(StrBuilder& result) { return code_to_strbuilder_ref(* this, & result); }
forceinline Str type_str() { return code_type_str(* this); }
forceinline bool validate_body() { return code_validate_body(*this); }
#endif
Using_CodeOps( Code );
forceinline Code operator *() { return * this; } // Required to support for-range iteration.
forceinline AST* operator ->() { return ast; }
Code& operator ++();
#ifdef GEN_ENFORCE_STRONG_CODE_TYPES
# define operator explicit operator
#endif
operator CodeBody() const;
operator CodeAttributes() const;
// operator CodeBaseClass() const;
operator CodeComment() const;
operator CodeClass() const;
operator CodeConstructor() const;
operator CodeDefine() const;
operator CodeDefineParams() const;
operator CodeDestructor() const;
operator CodeExec() const;
operator CodeEnum() const;
operator CodeExtern() const;
operator CodeInclude() const;
operator CodeFriend() const;
operator CodeFn() const;
operator CodeModule() const;
operator CodeNS() const;
operator CodeOperator() const;
operator CodeOpCast() const;
operator CodeParams() const;
operator CodePragma() const;
operator CodePreprocessCond() const;
operator CodeSpecifiers() const;
operator CodeStruct() const;
operator CodeTemplate() const;
operator CodeTypename() const;
operator CodeTypedef() const;
operator CodeUnion() const;
operator CodeUsing() const;
operator CodeVar() const;
#undef operator
};
#endif
#pragma region Statics
// Used to identify ASTs that should always be duplicated. (Global constant ASTs)
GEN_API extern Code Code_Global;
// Used to identify invalid generated code.
GEN_API extern Code Code_Invalid;
#pragma endregion Statics
struct Code_POD
{
AST* ast;
};
static_assert( sizeof(Code) == sizeof(Code_POD), "ERROR: Code is not POD" );
// Desired width of the AST data structure.
constexpr int const AST_POD_Size = 128;
constexpr static
int AST_ArrSpecs_Cap =
(
AST_POD_Size
- sizeof(Code)
- sizeof(StrCached)
- sizeof(Code) * 2
- sizeof(Token*)
- sizeof(Code)
- sizeof(CodeType)
- sizeof(ModuleFlag)
- sizeof(u32)
)
/ sizeof(Specifier) - 1;
/*
Simple AST POD with functionality to seralize into C++ syntax.
TODO(Ed): Eventually haven't a transparent AST like this will longer be viable once statements & expressions are in (most likely....)
*/
struct AST
{
union {
struct
{
Code InlineCmt; // Class, Constructor, Destructor, Enum, Friend, Functon, Operator, OpCast, Struct, Typedef, Using, Variable
Code Attributes; // Class, Enum, Function, Struct, Typedef, Union, Using, Variable // TODO(Ed): Parameters can have attributes
Code Specs; // Class, Destructor, Function, Operator, Struct, Typename, Variable
union {
Code InitializerList; // Constructor
Code ParentType; // Class, Struct, ParentType->Next has a possible list of interfaces.
Code ReturnType; // Function, Operator, Typename
Code UnderlyingType; // Enum, Typedef
Code ValueType; // Parameter, Variable
};
union {
Code Macro; // Parameter
Code BitfieldSize; // Variable (Class/Struct Data Member)
Code Params; // Constructor, Define, Function, Operator, Template, Typename
Code UnderlyingTypeMacro; // Enum
};
union {
Code ArrExpr; // Typename
Code Body; // Class, Constructor, Define, Destructor, Enum, Friend, Function, Namespace, Struct, Union
Code Declaration; // Friend, Template
Code Value; // Parameter, Variable
};
union {
Code NextVar; // Variable
Code SuffixSpecs; // Typename, Function (Thanks Unreal)
Code PostNameMacro; // Only used with parameters for specifically UE_REQUIRES (Thanks Unreal)
};
};
StrCached Content; // Attributes, Comment, Execution, Include
TokenSlice ContentToks; // TODO(Ed): Use a token slice for content
struct {
Specifier ArrSpecs[AST_ArrSpecs_Cap]; // Specifiers
Code NextSpecs; // Specifiers; If ArrSpecs is full, then NextSpecs is used.
};
};
StrCached Name;
union {
Code Prev;
Code Front;
Code Last;
};
union {
Code Next;
Code Back;
};
Token* Token; // Reference to starting token, only available if it was derived from parsing. // TODO(Ed): Change this to a token slice.
Code Parent;
CodeType Type;
// CodeFlag CodeFlags;
ModuleFlag ModuleFlags;
union {
b32 IsFunction; // Used by typedef to not serialize the name field.
struct {
b16 IsParamPack; // Used by typename to know if type should be considered a parameter pack.
ETypenameTag TypeTag; // Used by typename to keep track of explicitly declared tags for the identifier (enum, struct, union)
};
Operator Op;
AccessSpec ParentAccess;
s32 NumEntries;
s32 VarParenthesizedInit; // Used by variables to know that initialization is using a constructor expression instead of an assignment expression.
};
};
static_assert( sizeof(AST) == AST_POD_Size, "ERROR: AST is not size of AST_POD_Size" );
#if GEN_COMPILER_CPP
// Uses an implicitly overloaded cast from the AST to the desired code type.
// Necessary if the user wants GEN_ENFORCE_STRONG_CODE_TYPES
struct InvalidCode_ImplictCaster;
#define InvalidCode (InvalidCode_ImplictCaster{})
#else
#define InvalidCode (void*){ (void*)Code_Invalid }
#endif
#if GEN_COMPILER_CPP
struct NullCode_ImplicitCaster;
// Used when the its desired when omission is allowed in a definition.
#define NullCode (NullCode_ImplicitCaster{})
#else
#define NullCode nullptr
#endif

Some files were not shown because too many files have changed in this diff Show More