Compare commits
2 Commits
7f748b8eb9
...
5c6e93e1dd
| Author | SHA1 | Date | |
|---|---|---|---|
| 5c6e93e1dd | |||
| 72000c18d5 |
@@ -0,0 +1,42 @@
|
||||
# Track Debrief: Tech Debt & Test Discipline Cleanup (tech_debt_and_test_cleanup_20260302)
|
||||
|
||||
## Status: Botched / Partially Resolved
|
||||
**CRITICAL NOTE:** This track was initialized with a flawed specification and executed with insufficient validation rigor. While some deduplication goals were achieved, it introduced significant regressions and left the test suite in a fractured state.
|
||||
|
||||
### 1. Specification Failures
|
||||
- **Incorrect "Dead Code" Identification:** The spec incorrectly marked essential FastAPI endpoints (Remote Confirmation Protocol) as "leftovers." Removing them broke `test_headless_service.py` and the application's documented headless features. These had to be re-added mid-track.
|
||||
- **Underestimated Dependency Complexity:** The spec assumed `app_instance` could be globally centralized without accounting for unique patching requirements in several files (e.g., `test_gui2_events.py`, `test_mma_dashboard_refresh.py`).
|
||||
|
||||
### 2. Removed / Modified Tests
|
||||
- **Deleted:** `tests/test_ast_parser_curated.py` (Confirmed as a duplicate of `tests/test_ast_parser.py`).
|
||||
- **Fixture Removal:** Local `app_instance` and `mock_app` fixtures were removed from the following files, now resolving from `tests/conftest.py`:
|
||||
- `tests/test_gui2_layout.py`
|
||||
- `tests/test_gui2_mcp.py`
|
||||
- `tests/test_gui_phase3.py`
|
||||
- `tests/test_gui_phase4.py`
|
||||
- `tests/test_gui_streaming.py`
|
||||
- `tests/test_live_gui_integration.py`
|
||||
- `tests/test_mma_agent_focus_phase1.py`
|
||||
- `tests/test_mma_agent_focus_phase3.py`
|
||||
- `tests/test_mma_orchestration_gui.py`
|
||||
- `tests/test_mma_ticket_actions.py`
|
||||
- `tests/test_token_viz.py`
|
||||
|
||||
### 3. Exposed Zero-Assertion Tests (Marked with `pytest.fail`)
|
||||
The following tests now fail loudly to prevent false-positive coverage:
|
||||
- `tests/test_agent_capabilities.py`
|
||||
- `tests/test_agent_tools_wiring.py`
|
||||
- `tests/test_api_events.py::test_send_emits_events`
|
||||
- `tests/test_execution_engine.py::test_execution_engine_update_nonexistent_task`
|
||||
- `tests/test_token_usage.py`
|
||||
- `tests/test_vlogger_availability.py`
|
||||
|
||||
### 4. Known Regressions / Unresolved Issues
|
||||
- **Simulation Failures:** `test_extended_sims.py::test_context_sim_live` fails with `AssertionError: Expected at least 2 entries, found 0`.
|
||||
- **Asyncio RuntimeErrors:** Widespread `RuntimeError: Event loop is closed` warnings and potential hangs in `test_spawn_interception.py` (partially addressed but not fully stable).
|
||||
- **Broken Logic:** The centralization of fixtures may have masked subtle timing issues in UI event processing that were previously "fixed" by local, idiosyncratic patches.
|
||||
|
||||
### 5. Guidance for Tier 1 / Next Track
|
||||
- **Immediate Priority:** The next track MUST focus on "unfucking" the testing suite. Do not attempt further feature implementation until the `Event loop is closed` errors and simulation failures are resolved.
|
||||
- **Audit Requirement:** Re-audit all files where fixtures were removed to ensure no side-effect-heavy patches were lost.
|
||||
- **Validation Mandate:** Future Tech Lead agents MUST be forbidden from claiming "passed perfectly" without a verifiable, green `pytest` output for the full suite.
|
||||
@@ -11,7 +11,7 @@ This file tracks all major tracks for the project. Each track has its own detail
|
||||
## Completed / Archived
|
||||
|
||||
- [x] **Track: Tech Debt & Test Discipline Cleanup**
|
||||
*Link: [./tracks/tech_debt_and_test_cleanup_20260302/](./tracks/tech_debt_and_test_cleanup_20260302/)*
|
||||
*Link: [./archive/tech_debt_and_test_cleanup_20260302/](./archive/tech_debt_and_test_cleanup_20260302/)*
|
||||
|
||||
- [x] **Track: Conductor Workflow Improvements**
|
||||
*Link: [./archive/conductor_workflow_improvements_20260302/](./archive/conductor_workflow_improvements_20260302/)*
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
discussion = []
|
||||
|
||||
[metadata]
|
||||
id = "track-1"
|
||||
name = "Test Track"
|
||||
status = "idle"
|
||||
created_at = "2026-03-02T21:03:18.362973"
|
||||
updated_at = "2026-03-02T21:04:22.986795"
|
||||
|
||||
[[tasks]]
|
||||
id = "T-001"
|
||||
description = "desc"
|
||||
status = "todo"
|
||||
assigned_to = "tier3-worker"
|
||||
context_requirements = []
|
||||
depends_on = []
|
||||
step_mode = false
|
||||
retry_count = 0
|
||||
@@ -1,6 +1,6 @@
|
||||
[ai]
|
||||
provider = "gemini_cli"
|
||||
model = "gemini-2.0-flash"
|
||||
model = "gemini-2.5-flash-lite"
|
||||
temperature = 0.0
|
||||
max_tokens = 8192
|
||||
history_trunc_limit = 8000
|
||||
@@ -15,7 +15,7 @@ paths = [
|
||||
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_livetoolssim.toml",
|
||||
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_liveexecutionsim.toml",
|
||||
]
|
||||
active = "C:\\projects\\manual_slop\\tests\\artifacts\\temp_project.toml"
|
||||
active = "C:\\projects\\manual_slop\\tests\\artifacts\\temp_livetoolssim.toml"
|
||||
|
||||
[gui.show_windows]
|
||||
"Context Hub" = true
|
||||
|
||||
90
gui_2.py
90
gui_2.py
@@ -161,6 +161,7 @@ class GenerateRequest(BaseModel):
|
||||
|
||||
class ConfirmRequest(BaseModel):
|
||||
approved: bool
|
||||
script: Optional[str] = None
|
||||
|
||||
class App:
|
||||
"""The main ImGui interface orchestrator for Manual Slop."""
|
||||
@@ -460,6 +461,21 @@ class App:
|
||||
return header_key
|
||||
raise HTTPException(status_code=403, detail="Could not validate API Key")
|
||||
|
||||
@api.get("/health")
|
||||
def health() -> dict[str, str]:
|
||||
"""Returns the health status of the API."""
|
||||
return {"status": "ok"}
|
||||
|
||||
@api.get("/status", dependencies=[Depends(get_api_key)])
|
||||
def status() -> dict[str, Any]:
|
||||
"""Returns the current status of the application."""
|
||||
return {
|
||||
"provider": self.current_provider,
|
||||
"model": self.current_model,
|
||||
"status": self.ai_status,
|
||||
"usage": self.session_usage
|
||||
}
|
||||
|
||||
@api.post("/api/v1/generate", dependencies=[Depends(get_api_key)])
|
||||
def generate(req: GenerateRequest) -> dict[str, Any]:
|
||||
"""Triggers an AI generation request using the current project context."""
|
||||
@@ -523,6 +539,77 @@ class App:
|
||||
"""Placeholder for streaming AI generation responses (Not yet implemented)."""
|
||||
raise HTTPException(status_code=501, detail="Streaming endpoint (/api/v1/stream) is not yet supported in this version.")
|
||||
|
||||
@api.get("/api/v1/pending_actions", dependencies=[Depends(get_api_key)])
|
||||
def pending_actions() -> list[dict[str, Any]]:
|
||||
"""Lists all pending PowerShell scripts awaiting confirmation."""
|
||||
with self._pending_dialog_lock:
|
||||
return [
|
||||
{"action_id": uid, "script": diag._script, "base_dir": diag._base_dir}
|
||||
for uid, diag in self._pending_actions.items()
|
||||
]
|
||||
|
||||
@api.post("/api/v1/confirm/{action_id}", dependencies=[Depends(get_api_key)])
|
||||
def confirm_action(action_id: str, req: ConfirmRequest) -> dict[str, str]:
|
||||
"""Approves or rejects a pending action."""
|
||||
with self._pending_dialog_lock:
|
||||
if action_id not in self._pending_actions:
|
||||
raise HTTPException(status_code=404, detail="Action not found")
|
||||
dialog = self._pending_actions.pop(action_id)
|
||||
if req.script is not None:
|
||||
dialog._script = req.script
|
||||
with dialog._condition:
|
||||
dialog._approved = req.approved
|
||||
dialog._done = True
|
||||
dialog._condition.notify_all()
|
||||
return {"status": "confirmed" if req.approved else "rejected"}
|
||||
|
||||
@api.get("/api/v1/sessions", dependencies=[Depends(get_api_key)])
|
||||
def list_sessions() -> list[str]:
|
||||
"""Lists all session log files."""
|
||||
log_dir = Path("logs")
|
||||
if not log_dir.exists():
|
||||
return []
|
||||
return [f.name for f in log_dir.glob("*.log")]
|
||||
|
||||
@api.get("/api/v1/sessions/{session_id}", dependencies=[Depends(get_api_key)])
|
||||
def get_session(session_id: str) -> dict[str, Any]:
|
||||
"""Returns the content of a specific session log."""
|
||||
log_path = Path("logs") / session_id
|
||||
if not log_path.exists():
|
||||
raise HTTPException(status_code=404, detail="Session log not found")
|
||||
return {"id": session_id, "content": log_path.read_text(encoding="utf-8", errors="replace")}
|
||||
|
||||
@api.delete("/api/v1/sessions/{session_id}", dependencies=[Depends(get_api_key)])
|
||||
def delete_session(session_id: str) -> dict[str, str]:
|
||||
"""Deletes a specific session log."""
|
||||
log_path = Path("logs") / session_id
|
||||
if not log_path.exists():
|
||||
raise HTTPException(status_code=404, detail="Session log not found")
|
||||
log_path.unlink()
|
||||
return {"status": "deleted"}
|
||||
|
||||
@api.get("/api/v1/context", dependencies=[Depends(get_api_key)])
|
||||
def get_context() -> dict[str, Any]:
|
||||
"""Returns the current aggregated project context."""
|
||||
try:
|
||||
md, path, file_items, stable_md, disc_text = self._do_generate()
|
||||
# Pull current screenshots if available in project
|
||||
screenshots = self.project.get("screenshots", {}).get("paths", [])
|
||||
return {
|
||||
"files": [f.get("path") if isinstance(f, dict) else str(f) for f in file_items],
|
||||
"screenshots": screenshots,
|
||||
"files_base_dir": self.ui_files_base_dir,
|
||||
"markdown": md,
|
||||
"discussion": disc_text
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Context aggregation failure: {e}")
|
||||
|
||||
@api.get("/api/v1/token_stats", dependencies=[Depends(get_api_key)])
|
||||
def token_stats() -> dict[str, Any]:
|
||||
"""Returns current token usage and budget statistics."""
|
||||
return self._token_stats
|
||||
|
||||
return api
|
||||
# ---------------------------------------------------------------- project loading
|
||||
|
||||
@@ -1290,6 +1377,9 @@ class App:
|
||||
self.session_usage["last_latency"] = payload["latency"]
|
||||
self._recalculate_session_usage()
|
||||
|
||||
if md_content is not None:
|
||||
self._token_stats = ai_client.get_token_stats(md_content)
|
||||
|
||||
cache_stats = payload.get("cache_stats")
|
||||
if cache_stats:
|
||||
count = cache_stats.get("cache_count", 0)
|
||||
|
||||
@@ -8,5 +8,5 @@ active = "main"
|
||||
|
||||
[discussions.main]
|
||||
git_commit = ""
|
||||
last_updated = "2026-03-02T21:32:02"
|
||||
last_updated = "2026-03-02T21:58:42"
|
||||
history = []
|
||||
|
||||
Reference in New Issue
Block a user