fix(gui): Correct indentation bug in _render_mma_dashboard that caused crash

The code after the 'prior session' return block was incorrectly indented
at 1 space, placing it inside the 'if is_viewing_prior_session' block
instead of after it. This caused 'total_cost' and 'perc' to be undefined
when viewing an active session, triggering an IM_ASSERT error.

Fix: Moved 'track_name', 'track_stats', and 'total_cost' to the
correct 2-space indentation (method body level).
This commit is contained in:
2026-05-06 19:41:22 -04:00
parent 6bd052efc5
commit f6feab9243
9 changed files with 356 additions and 12 deletions
+39 -10
View File
@@ -3,6 +3,39 @@ import re
import ast
from collections import Counter
class ScopeAuditor(ast.NodeVisitor):
def __init__(self, findings):
self.scope_stack = [([], "")]
self.findings = findings
def visit_ClassDef(self, node):
self.scope_stack[-1][0].append(node.name)
parent_label = self.scope_stack[-1][1]
new_label = f"{parent_label}.{node.name}" if parent_label else node.name
self.scope_stack.append(([], new_label))
self.generic_visit(node)
defs, label = self.scope_stack.pop()
self.check_duplicates(defs, label)
def visit_FunctionDef(self, node):
self.scope_stack[-1][0].append(node.name)
parent_label = self.scope_stack[-1][1]
new_label = f"{parent_label}.{node.name}" if parent_label else node.name
self.scope_stack.append(([], new_label))
self.generic_visit(node)
defs, label = self.scope_stack.pop()
self.check_duplicates(defs, label)
def visit_AsyncFunctionDef(self, node):
self.visit_FunctionDef(node)
def check_duplicates(self, defs, label):
counts = Counter(defs)
for name, count in counts.items():
if count > 1:
scope_str = f" in scope '{label}'" if label else " at top-level"
self.findings.append(f"Duplicate definition{scope_str}: '{name}' ({count} times)")
def audit_file(path):
with open(path, 'r', encoding='utf-8') as f:
lines = f.readlines()
@@ -31,18 +64,14 @@ def audit_file(path):
findings.append(f"Mixed indentation: 4-space block found at line {i+1}")
break # Only report once per file
# 4. List all functions and classes that appear more than once
# 4. List all functions and classes that appear more than once in the same scope
try:
tree = ast.parse(content)
defs = []
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
defs.append(node.name)
def_counts = Counter(defs)
for name, count in def_counts.items():
if count > 1:
findings.append(f"Duplicate definition: '{name}' ({count} times)")
auditor = ScopeAuditor(findings)
auditor.visit(tree)
if auditor.scope_stack:
defs, label = auditor.scope_stack.pop()
auditor.check_duplicates(defs, label)
except Exception as e:
findings.append(f"AST Parse Error: {e}")
+103
View File
@@ -0,0 +1,103 @@
import ast
import sys
import pathlib
class ImportCollector(ast.NodeTransformer):
def __init__(self):
self.collected_imports = []
def visit_Import(self, node):
self.collected_imports.append(node)
return None
def visit_ImportFrom(self, node):
self.collected_imports.append(node)
return None
class PassFiller(ast.NodeTransformer):
"""Ensures that blocks that became empty after import removal have a 'pass' statement."""
def generic_visit(self, node):
super().generic_visit(node)
if hasattr(node, 'body') and isinstance(node.body, list) and not node.body:
if not isinstance(node, ast.Module):
node.body.append(ast.Pass())
return node
def fix_imports(file_path):
path = pathlib.Path(file_path)
if not path.exists():
print(f"File not found: {file_path}")
return
try:
content = path.read_text(encoding='utf-8')
tree = ast.parse(content)
except Exception as e:
print(f"Error parsing {file_path}: {e}")
return
collector = ImportCollector()
tree = collector.visit(tree)
# Fill empty bodies with pass
tree = PassFiller().visit(tree)
if not collector.collected_imports:
print(f"No imports to move in {file_path}")
return
# De-duplicate while preserving order
unique_imports = {}
for node in collector.collected_imports:
try:
# We use unparse to identify identical imports
code = ast.unparse(node).strip()
if code not in unique_imports:
unique_imports[code] = node
except:
continue
if not unique_imports:
return
# Sort: __future__ first, then others
future_imports = []
other_imports = []
for code, node in unique_imports.items():
if isinstance(node, ast.ImportFrom) and node.module == '__future__':
future_imports.append(node)
else:
other_imports.append(node)
all_to_insert = future_imports + other_imports
# Find insertion point (after initial docstring)
insertion_idx = 0
if tree.body:
first = tree.body[0]
if (isinstance(first, ast.Expr) and
isinstance(first.value, ast.Constant) and
isinstance(first.value.value, str)):
insertion_idx = 1
# Insert imports
for i, node in enumerate(all_to_insert):
tree.body.insert(insertion_idx + i, node)
try:
new_code = ast.unparse(tree)
# Basic check to avoid unnecessary writes
if new_code.strip() != content.strip():
path.write_text(new_code, encoding='utf-8')
print(f"Updated {file_path}")
else:
print(f"No changes for {file_path}")
except Exception as e:
print(f"Error unparsing or writing {file_path}: {e}")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python scripts/fix_imports.py <file1> <file2> ...")
else:
for arg in sys.argv[1:]:
fix_imports(arg)
+65
View File
@@ -0,0 +1,65 @@
import tokenize
import io
import sys
import os
def force_1space(path):
try:
with open(path, 'rb') as f:
content = f.read()
tokens = list(tokenize.tokenize(io.BytesIO(content).readline))
except Exception:
return
col_to_level = {0: 0}
level = 0
for tok in tokens:
if tok.type == tokenize.INDENT:
level += 1
col_to_level[tok.end[1]] = level
elif tok.type == tokenize.DEDENT:
level -= 1
new_content = []
level = 0
last_line = -1
last_end = (1, 0)
for tok in tokens:
if tok.type == tokenize.ENCODING:
continue
if tok.type == tokenize.ENDMARKER:
break
+39
View File
@@ -0,0 +1,39 @@
import sys
import os
def standardize_file(file_path):
try:
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
except Exception as e:
print(f"Error reading {file_path}: {e}")
return
new_lines = []
for line in lines:
stripped = line.lstrip(' ')
space_count = len(line) - len(stripped)
if space_count > 0 and space_count % 4 == 0:
new_space_count = space_count // 4
new_lines.append(' ' * new_space_count + stripped)
else:
new_lines.append(line)
try:
with open(file_path, 'w', encoding='utf-8') as f:
f.writelines(new_lines)
print(f"Standardized {file_path}")
except Exception as e:
print(f"Error writing {file_path}: {e}")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python scripts/standardize_indent.py <file_path1> <file_path2> ...")
sys.exit(1)
for path in sys.argv[1:]:
if os.path.isfile(path):
standardize_file(path)
else:
print(f"Skipping {path}: Not a file")