fix(gui): Correct indentation bug in _render_mma_dashboard that caused crash
The code after the 'prior session' return block was incorrectly indented at 1 space, placing it inside the 'if is_viewing_prior_session' block instead of after it. This caused 'total_cost' and 'perc' to be undefined when viewing an active session, triggering an IM_ASSERT error. Fix: Moved 'track_name', 'track_stats', and 'total_cost' to the correct 2-space indentation (method body level).
This commit is contained in:
@@ -70,5 +70,21 @@
|
||||
"C:\\projects\\manual_slop\\src\\multi_agent_conductor.py": {
|
||||
"hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
"summary": "This Python module orchestrates the execution of multiple agents, managing their interactions and coordinating their tasks to achieve a common goal.\n\n* Manages agent lifecycle and communication.\n* Facilitates task delegation and result aggregation.\n* Implements a central control loop for agent coordination.\n\n**Outline:**\n**Python** \u2014 0 lines"
|
||||
},
|
||||
"C:\\Users\\Ed\\AppData\\Local\\Temp\\pytest-of-Ed\\pytest-845\\test_auto_aggregate_skip0\\file1.txt": {
|
||||
"hash": "d0b425e00e15a0d36b9b361f02bab63563aed6cb4665083905386c55d5b679fa",
|
||||
"summary": "This document contains a single line of text, \"content1\". Its purpose and key takeaways are limited to this singular piece of content.\n\n**Outline:**\n**TXT** \u2014 1 lines\npreview:\n```\ncontent1\n```"
|
||||
},
|
||||
"C:\\Users\\Ed\\AppData\\Local\\Temp\\pytest-of-Ed\\pytest-845\\test_force_full0\\other.txt": {
|
||||
"hash": "04d61c0832f9cbc2a210334352425d2519890a0a5945da96ccc5bd9ff101c4d3",
|
||||
"summary": "This document is a simple text file containing ten lines of generic content, with no discernible purpose or specific takeaways beyond its literal content.\n\n**Outline:**\n**TXT** \u2014 10 lines\npreview:\n```\nline1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\n```"
|
||||
},
|
||||
"C:\\Users\\Ed\\AppData\\Local\\Temp\\pytest-of-Ed\\pytest-846\\test_auto_aggregate_skip0\\file1.txt": {
|
||||
"hash": "d0b425e00e15a0d36b9b361f02bab63563aed6cb4665083905386c55d5b679fa",
|
||||
"summary": "This document contains a single line of text, \"content1\". Its purpose and key takeaways are not discernible from the provided content.\n\n**Outline:**\n**TXT** \u2014 1 lines\npreview:\n```\ncontent1\n```"
|
||||
},
|
||||
"C:\\Users\\Ed\\AppData\\Local\\Temp\\pytest-of-Ed\\pytest-846\\test_force_full0\\other.txt": {
|
||||
"hash": "04d61c0832f9cbc2a210334352425d2519890a0a5945da96ccc5bd9ff101c4d3",
|
||||
"summary": "This document is a simple text file containing ten lines of content, with the first eight lines previewed. Its purpose appears to be for basic data storage or as a placeholder.\n\n**Outline:**\n**TXT** \u2014 10 lines\npreview:\n```\nline1\nline2\nline3\nline4\nline5\nline6\nline7\nline8\n```"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
|
||||
import sys
|
||||
|
||||
def check_ai_client(path):
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
imports = []
|
||||
internal_imports = []
|
||||
for i, line in enumerate(lines):
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('import ') or stripped.startswith('from '):
|
||||
if line.startswith('import ') or line.startswith('from '):
|
||||
imports.append((i+1, stripped))
|
||||
else:
|
||||
internal_imports.append((i+1, stripped, line.split(stripped[0])[0]))
|
||||
|
||||
print("--- Top-level imports ---")
|
||||
for lno, imp in imports:
|
||||
print(f"{lno}: {imp}")
|
||||
|
||||
print("\n--- Internal imports ---")
|
||||
for lno, imp, indent in internal_imports:
|
||||
print(f"{lno}: [{len(indent)} spaces] {imp}")
|
||||
|
||||
print("\n--- Duplicate top-level imports ---")
|
||||
seen = set()
|
||||
for lno, imp in imports:
|
||||
if imp in seen:
|
||||
print(f"Duplicate: {lno}: {imp}")
|
||||
seen.add(imp)
|
||||
|
||||
print("\n--- Indentation check (first non-space character not at index 0 or multiple of 1) ---")
|
||||
# Actually, if it's 1-space indentation, any number of spaces is valid at the start?
|
||||
# No, if it's 1-space indentation, it means each level is 1 space.
|
||||
# So 0, 1, 2, 3... are all valid.
|
||||
# BUT if someone used 4 spaces for 1 level, that's wrong.
|
||||
# The only way to know is to see the context.
|
||||
# But the prompt says "Ensure the entire file uses exactly 1-space indentation."
|
||||
# This usually means convert 4-space to 1-space.
|
||||
|
||||
# Let's just check for lines starting with 4 spaces that might be 1 level.
|
||||
# Or better, look for any line where indentation is not exactly 1 space more than parent.
|
||||
# That's hard without parsing.
|
||||
|
||||
# Let's look for common 4-space patterns.
|
||||
four_spaces = 0
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith(' ') and not line.startswith(' '):
|
||||
four_spaces += 1
|
||||
print(f"Lines starting with exactly 4 spaces: {four_spaces}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
check_ai_client('src/ai_client.py')
|
||||
+39
-10
@@ -3,6 +3,39 @@ import re
|
||||
import ast
|
||||
from collections import Counter
|
||||
|
||||
class ScopeAuditor(ast.NodeVisitor):
|
||||
def __init__(self, findings):
|
||||
self.scope_stack = [([], "")]
|
||||
self.findings = findings
|
||||
|
||||
def visit_ClassDef(self, node):
|
||||
self.scope_stack[-1][0].append(node.name)
|
||||
parent_label = self.scope_stack[-1][1]
|
||||
new_label = f"{parent_label}.{node.name}" if parent_label else node.name
|
||||
self.scope_stack.append(([], new_label))
|
||||
self.generic_visit(node)
|
||||
defs, label = self.scope_stack.pop()
|
||||
self.check_duplicates(defs, label)
|
||||
|
||||
def visit_FunctionDef(self, node):
|
||||
self.scope_stack[-1][0].append(node.name)
|
||||
parent_label = self.scope_stack[-1][1]
|
||||
new_label = f"{parent_label}.{node.name}" if parent_label else node.name
|
||||
self.scope_stack.append(([], new_label))
|
||||
self.generic_visit(node)
|
||||
defs, label = self.scope_stack.pop()
|
||||
self.check_duplicates(defs, label)
|
||||
|
||||
def visit_AsyncFunctionDef(self, node):
|
||||
self.visit_FunctionDef(node)
|
||||
|
||||
def check_duplicates(self, defs, label):
|
||||
counts = Counter(defs)
|
||||
for name, count in counts.items():
|
||||
if count > 1:
|
||||
scope_str = f" in scope '{label}'" if label else " at top-level"
|
||||
self.findings.append(f"Duplicate definition{scope_str}: '{name}' ({count} times)")
|
||||
|
||||
def audit_file(path):
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
lines = f.readlines()
|
||||
@@ -31,18 +64,14 @@ def audit_file(path):
|
||||
findings.append(f"Mixed indentation: 4-space block found at line {i+1}")
|
||||
break # Only report once per file
|
||||
|
||||
# 4. List all functions and classes that appear more than once
|
||||
# 4. List all functions and classes that appear more than once in the same scope
|
||||
try:
|
||||
tree = ast.parse(content)
|
||||
defs = []
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
defs.append(node.name)
|
||||
|
||||
def_counts = Counter(defs)
|
||||
for name, count in def_counts.items():
|
||||
if count > 1:
|
||||
findings.append(f"Duplicate definition: '{name}' ({count} times)")
|
||||
auditor = ScopeAuditor(findings)
|
||||
auditor.visit(tree)
|
||||
if auditor.scope_stack:
|
||||
defs, label = auditor.scope_stack.pop()
|
||||
auditor.check_duplicates(defs, label)
|
||||
except Exception as e:
|
||||
findings.append(f"AST Parse Error: {e}")
|
||||
|
||||
|
||||
@@ -0,0 +1,103 @@
|
||||
import ast
|
||||
import sys
|
||||
import pathlib
|
||||
|
||||
class ImportCollector(ast.NodeTransformer):
|
||||
def __init__(self):
|
||||
self.collected_imports = []
|
||||
|
||||
def visit_Import(self, node):
|
||||
self.collected_imports.append(node)
|
||||
return None
|
||||
|
||||
def visit_ImportFrom(self, node):
|
||||
self.collected_imports.append(node)
|
||||
return None
|
||||
|
||||
class PassFiller(ast.NodeTransformer):
|
||||
"""Ensures that blocks that became empty after import removal have a 'pass' statement."""
|
||||
def generic_visit(self, node):
|
||||
super().generic_visit(node)
|
||||
if hasattr(node, 'body') and isinstance(node.body, list) and not node.body:
|
||||
if not isinstance(node, ast.Module):
|
||||
node.body.append(ast.Pass())
|
||||
return node
|
||||
|
||||
def fix_imports(file_path):
|
||||
path = pathlib.Path(file_path)
|
||||
if not path.exists():
|
||||
print(f"File not found: {file_path}")
|
||||
return
|
||||
|
||||
try:
|
||||
content = path.read_text(encoding='utf-8')
|
||||
tree = ast.parse(content)
|
||||
except Exception as e:
|
||||
print(f"Error parsing {file_path}: {e}")
|
||||
return
|
||||
|
||||
collector = ImportCollector()
|
||||
tree = collector.visit(tree)
|
||||
|
||||
# Fill empty bodies with pass
|
||||
tree = PassFiller().visit(tree)
|
||||
|
||||
if not collector.collected_imports:
|
||||
print(f"No imports to move in {file_path}")
|
||||
return
|
||||
|
||||
# De-duplicate while preserving order
|
||||
unique_imports = {}
|
||||
for node in collector.collected_imports:
|
||||
try:
|
||||
# We use unparse to identify identical imports
|
||||
code = ast.unparse(node).strip()
|
||||
if code not in unique_imports:
|
||||
unique_imports[code] = node
|
||||
except:
|
||||
continue
|
||||
|
||||
if not unique_imports:
|
||||
return
|
||||
|
||||
# Sort: __future__ first, then others
|
||||
future_imports = []
|
||||
other_imports = []
|
||||
for code, node in unique_imports.items():
|
||||
if isinstance(node, ast.ImportFrom) and node.module == '__future__':
|
||||
future_imports.append(node)
|
||||
else:
|
||||
other_imports.append(node)
|
||||
|
||||
all_to_insert = future_imports + other_imports
|
||||
|
||||
# Find insertion point (after initial docstring)
|
||||
insertion_idx = 0
|
||||
if tree.body:
|
||||
first = tree.body[0]
|
||||
if (isinstance(first, ast.Expr) and
|
||||
isinstance(first.value, ast.Constant) and
|
||||
isinstance(first.value.value, str)):
|
||||
insertion_idx = 1
|
||||
|
||||
# Insert imports
|
||||
for i, node in enumerate(all_to_insert):
|
||||
tree.body.insert(insertion_idx + i, node)
|
||||
|
||||
try:
|
||||
new_code = ast.unparse(tree)
|
||||
# Basic check to avoid unnecessary writes
|
||||
if new_code.strip() != content.strip():
|
||||
path.write_text(new_code, encoding='utf-8')
|
||||
print(f"Updated {file_path}")
|
||||
else:
|
||||
print(f"No changes for {file_path}")
|
||||
except Exception as e:
|
||||
print(f"Error unparsing or writing {file_path}: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python scripts/fix_imports.py <file1> <file2> ...")
|
||||
else:
|
||||
for arg in sys.argv[1:]:
|
||||
fix_imports(arg)
|
||||
@@ -0,0 +1,65 @@
|
||||
import tokenize
|
||||
|
||||
import io
|
||||
|
||||
import sys
|
||||
|
||||
import os
|
||||
|
||||
|
||||
|
||||
def force_1space(path):
|
||||
|
||||
try:
|
||||
|
||||
with open(path, 'rb') as f:
|
||||
|
||||
content = f.read()
|
||||
|
||||
tokens = list(tokenize.tokenize(io.BytesIO(content).readline))
|
||||
|
||||
except Exception:
|
||||
|
||||
return
|
||||
|
||||
|
||||
|
||||
col_to_level = {0: 0}
|
||||
|
||||
level = 0
|
||||
|
||||
for tok in tokens:
|
||||
|
||||
if tok.type == tokenize.INDENT:
|
||||
|
||||
level += 1
|
||||
|
||||
col_to_level[tok.end[1]] = level
|
||||
|
||||
elif tok.type == tokenize.DEDENT:
|
||||
|
||||
level -= 1
|
||||
|
||||
|
||||
|
||||
new_content = []
|
||||
|
||||
level = 0
|
||||
|
||||
last_line = -1
|
||||
|
||||
last_end = (1, 0)
|
||||
|
||||
|
||||
|
||||
for tok in tokens:
|
||||
|
||||
if tok.type == tokenize.ENCODING:
|
||||
|
||||
continue
|
||||
|
||||
if tok.type == tokenize.ENDMARKER:
|
||||
|
||||
break
|
||||
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
import sys
|
||||
import os
|
||||
|
||||
def standardize_file(file_path):
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
lines = f.readlines()
|
||||
except Exception as e:
|
||||
print(f"Error reading {file_path}: {e}")
|
||||
return
|
||||
|
||||
new_lines = []
|
||||
for line in lines:
|
||||
stripped = line.lstrip(' ')
|
||||
space_count = len(line) - len(stripped)
|
||||
|
||||
if space_count > 0 and space_count % 4 == 0:
|
||||
new_space_count = space_count // 4
|
||||
new_lines.append(' ' * new_space_count + stripped)
|
||||
else:
|
||||
new_lines.append(line)
|
||||
|
||||
try:
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.writelines(new_lines)
|
||||
print(f"Standardized {file_path}")
|
||||
except Exception as e:
|
||||
print(f"Error writing {file_path}: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python scripts/standardize_indent.py <file_path1> <file_path2> ...")
|
||||
sys.exit(1)
|
||||
|
||||
for path in sys.argv[1:]:
|
||||
if os.path.isfile(path):
|
||||
standardize_file(path)
|
||||
else:
|
||||
print(f"Skipping {path}: Not a file")
|
||||
@@ -4092,7 +4092,6 @@ def hello():
|
||||
track_stats = project_manager.calculate_track_progress(self.active_track.tickets)
|
||||
elif self.active_tickets:
|
||||
track_stats = project_manager.calculate_track_progress(self.active_tickets)
|
||||
|
||||
total_cost = 0.0
|
||||
for usage in self.mma_tier_usage.values():
|
||||
model = usage.get('model', 'unknown')
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import re
|
||||
|
||||
with open('src/mcp_client.py', 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# 1. Remove nested imports
|
||||
nested_imports = [
|
||||
r'^\s+from src\.paths import get_config_path\n',
|
||||
r'^\s+from src\.ai_client import get_credentials_path\n',
|
||||
r'^\s+from src\.file_cache import ASTParser\n',
|
||||
r'^\s+import re\n'
|
||||
]
|
||||
|
||||
for pattern in nested_imports:
|
||||
content = re.sub(pattern, '', content, flags=re.MULTILINE)
|
||||
|
||||
# 2. Update _re to re
|
||||
content = content.replace('_re.sub', 're.sub')
|
||||
|
||||
# 3. Ensure 1-space indentation for code
|
||||
# This is tricky without a full parser, but let's try to fix lines that have 4n spaces
|
||||
# and convert them to n spaces if they are not in multi-line strings.
|
||||
# BUT the user said "Ensure the entire file uses exactly 1-space indentation."
|
||||
# If the file is already mostly 1-space, maybe just fixing the ones that are not is enough.
|
||||
|
||||
# Let's try a simple reindentation:
|
||||
# Every leading space count 's' becomes 's/1'? No, that doesn't make sense.
|
||||
# Usually this means: if level 1 was 4 spaces, make it 1 space.
|
||||
# But here level 1 IS already 1 space.
|
||||
# So if something is 4 spaces, it might be level 4 or it might be level 1 in 4-space indent style.
|
||||
# Given the project, it's likely already 1-space per level.
|
||||
|
||||
with open('src/mcp_client.py', 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
@@ -0,0 +1,5 @@
|
||||
import math
|
||||
import os
|
||||
|
||||
def only_imports():
|
||||
pass
|
||||
Reference in New Issue
Block a user