chore(cleanup): Remove unused scripts and artifacts from project root

This commit is contained in:
2026-03-04 09:51:51 -05:00
parent e8cd3e5e87
commit 1eb9d2923f
16 changed files with 4 additions and 7522 deletions

View File

@@ -8,7 +8,7 @@ This file tracks all major tracks for the project. Each track has its own detail
*The following tracks MUST be executed in this exact order to safely resolve tech debt before feature development.*
1. [ ] **Track: Codebase Migration to `src` & Cleanup**
1. [~] **Track: Codebase Migration to `src` & Cleanup**
*Link: [./tracks/codebase_migration_20260302/](./tracks/codebase_migration_20260302/)*
2. [ ] **Track: GUI Decoupling & Controller Architecture**

View File

@@ -1,17 +1,9 @@
# Implementation Plan: Codebase Migration to `src` & Cleanup (codebase_migration_20260302)
## Phase 1: Unused File Identification & Removal
- [ ] Task: Initialize MMA Environment `activate_skill mma-orchestrator`
- [ ] Task: Audit Codebase for Dead Files
- [ ] WHERE: Project root
- [ ] WHAT: Run `py_find_usages` or grep on suspected unused files to verify they are not referenced by `gui_2.py`, `tests/`, `simulation/`, or core config files.
- [ ] HOW: Gather a list of unused files.
- [ ] SAFETY: Do not delete files referenced in `.toml` files or Github action workflows.
- [ ] Task: Delete Unused Files
- [ ] WHERE: Project root
- [ ] WHAT: Use `run_powershell` with `Remove-Item` to delete the identified unused files.
- [ ] HOW: Explicitly list and delete them.
- [ ] SAFETY: Stage deletions to Git carefully.
- [x] Task: Initialize MMA Environment `activate_skill mma-orchestrator`
- [x] Task: Audit Codebase for Dead Files
- [x] Task: Delete Unused Files
- [ ] Task: Conductor - User Manual Verification 'Phase 1: Unused File Identification & Removal' (Protocol in workflow.md)
## Phase 2: Directory Restructuring & Migration

View File

@@ -1,17 +0,0 @@
role = "tier3-worker"
prompt = """FIX DeepSeek implementation in ai_client.py.
CONTEXT:
Several tests in @tests/test_deepseek_provider.py are failing (returning '(No text returned by the model)') because the current implementation of '_send_deepseek' in @ai_client.py forces 'stream=True' and expects SSE format, but the test mocks provide standard JSON responses.
TASK:
1. Modify '_send_deepseek' in @ai_client.py to handle the response correctly whether it is a stream or a standard JSON response.
- You should probably determine this based on the 'stream' value in the payload (which is currently hardcoded to True, but the implementation should be flexible).
- If 'stream' is True, use the iter_lines() logic to aggregate chunks.
- If 'stream' is False, use resp.json() to get the content.
2. Fix the 'NameError: name 'data' is not defined' and ensure 'usage' is correctly extracted.
3. Ensure 'full_content', 'full_reasoning' (thinking tags), and 'tool_calls' are correctly captured and added to the conversation history in both modes.
4. Ensure all tests in @tests/test_deepseek_provider.py pass.
OUTPUT: Provide the raw Python code for the modified '_send_deepseek' function."""
docs = ["ai_client.py", "tests/test_deepseek_provider.py"]

File diff suppressed because it is too large Load Diff

View File

@@ -1,35 +0,0 @@
# gemini.py
from __future__ import annotations
import tomllib
from typing import Any
from google import genai
_client: genai.Client | None = None
_chat: Any = None
def _load_key() -> str:
with open("credentials.toml", "rb") as f:
return tomllib.load(f)["gemini"]["api_key"]
def _ensure_client() -> None:
global _client
if _client is None:
_client = genai.Client(api_key=_load_key())
def _ensure_chat() -> None:
global _chat
if _chat is None:
_ensure_client()
_chat = _client.chats.create(model="gemini-2.0-flash")
def send(md_content: str, user_message: str) -> str:
global _chat
_ensure_chat()
full_message = f"<context>\n{md_content}\n</context>\n\n{user_message}"
response = _chat.send_message(full_message)
return response.text
def reset_session() -> None:
global _client, _chat
_client = None
_chat = None

View File

@@ -1,2 +0,0 @@
@echo off
uv run python scripts/tool_call.py get_file_summary

Binary file not shown.

Binary file not shown.

View File

@@ -1,10 +0,0 @@
role = "tier3-worker"
prompt = """Implement strict type hints for ALL functions and methods in @gui_2.py.
1. Use specific types (e.g., dict[str, Any], list[str], Union[str, Path], etc.) for arguments and returns.
2. Maintain the 'AI-Optimized' style: 1-space indentation, NO blank lines within function bodies, and maximum 1 blank line between definitions.
3. Since this file is very large, you MUST use surgical tools (discovered_tool_py_update_definition, discovered_tool_py_set_signature, discovered_tool_py_set_var_declaration) to apply changes. Do NOT try to overwrite the entire file at once.
4. Do NOT change any logic.
5. Use discovered_tool_py_check_syntax after each major change to verify syntax.
6. Ensure 'from typing import Any, dict, list, Union, Optional, Callable' etc. are present.
7. Focus on completing the task efficiently without hitting timeouts."""
docs = ["gui_2.py", "conductor/workflow.md"]

View File

@@ -1,21 +0,0 @@
import subprocess
import sys
def test_type_hints() -> None:
files = ["project_manager.py", "session_logger.py"]
all_missing = []
for f in files:
print(f"Scanning {f}...")
result = subprocess.run(["uv", "run", "python", "scripts/type_hint_scanner.py", f], capture_output=True, text=True)
if result.stdout.strip():
print(f"Missing hints in {f}:\n{result.stdout}")
all_missing.append(f)
if all_missing:
print(f"FAILURE: Missing type hints in: {', '.join(all_missing)}")
sys.exit(1)
else:
print("SUCCESS: All functions have type hints.")
sys.exit(0)
if __name__ == "__main__":
test_type_hints()

View File

@@ -1,113 +0,0 @@
import argparse
import sys
import tomllib
import pytest
from typing import Dict, List, Any
def load_manifest(path: str) -> Dict[str, Any]:
"""
Loads a manifest file (expected to be in TOML format) from the given path.
Args:
path: The path to the TOML manifest file.
Returns:
A dictionary representing the loaded manifest.
Raises:
FileNotFoundError: If the manifest file does not exist.
tomllib.TOMLDecodeError: If the manifest file is not valid TOML.
"""
try:
with open(path, 'rb') as f:
return tomllib.load(f)
except FileNotFoundError:
print(f"Error: Manifest file not found at {path}", file=sys.stderr)
raise
except tomllib.TOMLDecodeError:
print(f"Error: Could not decode TOML from {path}", file=sys.stderr)
raise
def get_test_files(manifest: Dict[str, Any], category: str) -> List[str]:
"""
Determines the list of test files based on the manifest and a specified category.
Args:
manifest: The loaded manifest dictionary.
category: The category of tests to retrieve.
Returns:
A list of file paths corresponding to the tests in the given category.
Returns an empty list if the category is not found or has no tests.
"""
print(f"DEBUG: Looking for category '{category}' in manifest.", file=sys.stderr)
files = manifest.get("categories", {}).get(category, {}).get("files", [])
print(f"DEBUG: Found test files for category '{category}': {files}", file=sys.stderr)
return files
def main() -> None:
parser = argparse.ArgumentParser(
description="Run tests with optional manifest and category filtering, passing additional pytest arguments.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""\
Example usage:
python run_tests.py --manifest tests.toml --category unit -- --verbose --cov=my_module
python run_tests.py --manifest tests.toml --category integration
python run_tests.py --manifest tests.toml --category core
python run_tests.py --manifest tests.toml # Runs tests from default_categories
python run_tests.py -- --capture=no # Runs all tests with pytest args
"""
)
parser.add_argument(
"--manifest",
type=str,
help="Path to the TOML manifest file containing test configurations."
)
parser.add_argument(
"--category",
type=str,
help="Category of tests to run (e.g., 'unit', 'integration')."
)
# Parse known arguments for the script itself, then parse remaining args for pytest
args, remaining_pytest_args = parser.parse_known_args(sys.argv[1:])
selected_test_files = []
manifest_data = None
if args.manifest:
try:
manifest_data = load_manifest(args.manifest)
except (FileNotFoundError, tomllib.TOMLDecodeError):
# Error message already printed by load_manifest
sys.exit(1)
if args.category:
# Case 1: --manifest and --category provided
files = get_test_files(manifest_data, args.category)
selected_test_files.extend(files)
else:
# Case 2: --manifest provided, but no --category
# Load default categories from manifest['execution']['default_categories']
default_categories = manifest_data.get("execution", {}).get("default_categories", [])
if not default_categories:
print(f"Error: --manifest provided without --category, and no 'default_categories' found in manifest '{args.manifest}'.", file=sys.stderr)
parser.print_help(sys.stderr)
sys.exit(1)
print(f"DEBUG: Using default categories from manifest '{args.manifest}': {default_categories}", file=sys.stderr)
for cat in default_categories:
files = get_test_files(manifest_data, cat)
selected_test_files.extend(files)
elif args.category:
# Case 3: --category provided without --manifest
print("Error: --category requires --manifest to be specified.", file=sys.stderr)
parser.print_help(sys.stderr)
sys.exit(1)
# Combine selected test files with any remaining pytest arguments that were not parsed by this script.
# We also filter out the literal '--' if it was passed by the user to avoid pytest errors if it appears multiple times.
pytest_command_args = selected_test_files + [arg for arg in remaining_pytest_args if arg != '--']
# Filter out any empty strings that might have been included.
final_pytest_args = [arg for arg in pytest_command_args if arg]
# If no specific tests were selected from manifest/category and no manifest was provided,
# and no other pytest args were given, pytest.main([]) runs default test discovery.
print(f"Running pytest with arguments: {final_pytest_args}", file=sys.stderr)
sys.exit(pytest.main(final_pytest_args))
if __name__ == "__main__":
main()

View File

@@ -1,3 +0,0 @@
role = "tier3-worker"
prompt = "Read @ai_client.py and describe the current placeholder implementation of _send_deepseek. Just a one-sentence summary."
docs = ["ai_client.py"]

View File

@@ -1,31 +0,0 @@
Files with untyped items: 25
File NoRet Params Vars Total
-------------------------------------------------------------------------------------
./debug_ast.py 1 2 4 7
./tests/visual_mma_verification.py 0 0 4 4
./debug_ast_2.py 0 0 3 3
./scripts/cli_tool_bridge.py 1 0 1 2
./scripts/mcp_server.py 0 0 2 2
./tests/test_gui_diagnostics.py 0 0 2 2
./tests/test_gui_updates.py 0 0 2 2
./tests/test_layout_reorganization.py 0 0 2 2
./scripts/check_hints.py 0 0 1 1
./scripts/check_hints_v2.py 0 0 1 1
./scripts/claude_tool_bridge.py 0 0 1 1
./scripts/type_hint_scanner.py 1 0 0 1
./tests/mock_alias_tool.py 0 0 1 1
./tests/test_gemini_cli_adapter_parity.py 0 0 1 1
./tests/test_gui2_parity.py 0 0 1 1
./tests/test_gui2_performance.py 0 0 1 1
./tests/test_gui_performance_requirements.py 0 1 0 1
./tests/test_gui_stress_performance.py 0 1 0 1
./tests/test_hooks.py 0 1 0 1
./tests/test_live_workflow.py 0 1 0 1
./tests/test_track_state_persistence.py 0 1 0 1
./tests/verify_mma_gui_robust.py 0 0 1 1
./tests/visual_diag.py 0 0 1 1
./tests/visual_orchestration_verification.py 0 1 0 1
./tests/visual_sim_mma_v2.py 0 1 0 1
-------------------------------------------------------------------------------------
TOTAL 41

View File

@@ -1,17 +0,0 @@
role = "tier3-worker"
prompt = """TASK: Implement streaming support for the DeepSeek provider in ai_client.py and add failing tests.
INSTRUCTIONS:
1. In @tests/test_deepseek_provider.py:
- Add a test function 'test_deepseek_streaming' that mocks a streaming API response using 'requests.post(..., stream=True)'.
- Use 'mock_response.iter_lines()' to simulate chunks of data.
- Assert that 'ai_client.send()' correctly aggregates these chunks into a single string.
2. In @ai_client.py:
- Modify the '_send_deepseek' function to use 'requests.post(..., stream=True)'.
- Implement a loop to iterate over the response lines using 'iter_lines()'.
- Aggregate the content from each chunk.
- Ensure the aggregated content is added to the history and returned by the function.
OUTPUT: Provide the raw Python code for the modified sections or the full files. No pleasantries."""
docs = ["conductor/workflow.md", "ai_client.py", "tests/test_deepseek_provider.py", "mcp_client.py"]

View File

@@ -1,30 +0,0 @@
import unittest
from pathlib import Path
import project_manager
class TestMMAPersistence(unittest.TestCase):
def test_default_project_has_mma(self) -> None:
proj = project_manager.default_project("test")
self.assertIn("mma", proj)
self.assertEqual(proj["mma"], {"epic": "", "active_track_id": "", "tracks": []})
def test_save_load_mma(self) -> None:
proj = project_manager.default_project("test")
proj["mma"] = {"epic": "Test Epic", "tracks": [{"id": "track_1"}]}
test_file = Path("test_mma_proj.toml")
try:
project_manager.save_project(proj, test_file)
loaded = project_manager.load_project(test_file)
self.assertIn("mma", loaded)
self.assertEqual(loaded["mma"]["epic"], "Test Epic")
self.assertEqual(len(loaded["mma"]["tracks"]), 1)
finally:
if test_file.exists():
test_file.unlink()
hist_file = Path("test_mma_proj_history.toml")
if hist_file.exists():
hist_file.unlink()
if __name__ == "__main__":
unittest.main()

View File