chore: apply ruff auto-fixes and remove dead AST scripts
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -13,3 +13,4 @@ dpg_layout.ini
|
||||
.env
|
||||
.coverage
|
||||
tests/temp_workspace
|
||||
.mypy_cache
|
||||
|
||||
@@ -256,7 +256,7 @@ def build_tier3_context(file_items: list[dict[str, Any]], screenshot_base_dir: P
|
||||
parser = ASTParser("python")
|
||||
skeleton = parser.get_skeleton(content)
|
||||
sections.append(f"### `{entry or path_str}` (AST Skeleton)\n\n```python\n{skeleton}\n```")
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
# Fallback to summary if AST parsing fails
|
||||
sections.append(f"### `{entry or path_str}`\n\n" + summarize.summarise_file(path, content))
|
||||
else:
|
||||
|
||||
10
ai_client.py
10
ai_client.py
@@ -21,7 +21,6 @@ import hashlib
|
||||
import difflib
|
||||
import threading
|
||||
import requests
|
||||
from pathlib import Path
|
||||
from typing import Optional, Callable, Any
|
||||
import os
|
||||
import project_manager
|
||||
@@ -1416,7 +1415,7 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
|
||||
def _ensure_deepseek_client() -> None:
|
||||
global _deepseek_client
|
||||
if _deepseek_client is None:
|
||||
creds = _load_credentials()
|
||||
_load_credentials()
|
||||
# Placeholder for Dedicated DeepSeek SDK instantiation
|
||||
# import deepseek
|
||||
# _deepseek_client = deepseek.DeepSeek(api_key=creds["deepseek"]["api_key"])
|
||||
@@ -1672,7 +1671,6 @@ def run_tier4_analysis(stderr: str) -> str:
|
||||
return f"[QA ANALYSIS FAILED] {e}"
|
||||
# ------------------------------------------------------------------ unified send
|
||||
|
||||
import json
|
||||
|
||||
|
||||
def send(
|
||||
@@ -1737,7 +1735,7 @@ def get_history_bleed_stats(md_content: str | None = None) -> dict[str, Any]:
|
||||
# For Anthropic, we have a robust estimator
|
||||
with _anthropic_history_lock:
|
||||
history_snapshot = list(_anthropic_history)
|
||||
hist_only = _estimate_prompt_tokens([], history_snapshot) - 2500 # subtract fixed tools
|
||||
_estimate_prompt_tokens([], history_snapshot) - 2500 # subtract fixed tools
|
||||
sys_tok = max(1, int(len(md_content) / _CHARS_PER_TOKEN)) if md_content else 0
|
||||
current_tokens = _estimate_prompt_tokens([], history_snapshot)
|
||||
if md_content:
|
||||
@@ -1784,7 +1782,7 @@ def get_history_bleed_stats(md_content: str | None = None) -> dict[str, Any]:
|
||||
"current": current_tokens,
|
||||
"percentage": percentage,
|
||||
}, sys_tok=0, tool_tok=0)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
elif md_content:
|
||||
try:
|
||||
@@ -1801,7 +1799,7 @@ def get_history_bleed_stats(md_content: str | None = None) -> dict[str, Any]:
|
||||
"current": current_tokens,
|
||||
"percentage": percentage,
|
||||
})
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
return _add_bleed_derived({
|
||||
"provider": "gemini",
|
||||
|
||||
@@ -1,583 +0,0 @@
|
||||
|
||||
import os
|
||||
|
||||
path = 'ai_client.py'
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
# Very basic cleanup: remove lines after the first 'def get_history_bleed_stats'
|
||||
# or other markers of duplication if they exist.
|
||||
# Actually, I'll just rewrite the relevant functions and clean up the end of the file.
|
||||
|
||||
new_lines = []
|
||||
skip = False
|
||||
for line in lines:
|
||||
if 'def _send_gemini(' in line and 'stream_callback' in line:
|
||||
# This is my partially applied change, I'll keep it but fix it.
|
||||
pass
|
||||
if 'def send(' in line and 'import json' in lines[lines.index(line)-1]:
|
||||
# This looks like the duplicated send at the end
|
||||
skip = True
|
||||
if not skip:
|
||||
new_lines.append(line)
|
||||
if skip and 'return {' in line and 'percentage' in line:
|
||||
# End of duplicated get_history_bleed_stats
|
||||
# skip = False # actually just keep skipping till the end
|
||||
pass
|
||||
|
||||
# It's better to just surgically fix the file content in memory.
|
||||
content = "".join(new_lines)
|
||||
|
||||
# I'll use a more robust approach: I'll define the final versions of the functions I want to change.
|
||||
|
||||
_SEND_GEMINI_NEW = '''def _send_gemini(md_content: str, user_message: str, base_dir: str,
|
||||
file_items: list[dict[str, Any]] | None = None,
|
||||
discussion_history: str = "",
|
||||
pre_tool_callback: Optional[Callable[[str], bool]] = None,
|
||||
qa_callback: Optional[Callable[[str], str]] = None,
|
||||
enable_tools: bool = True,
|
||||
stream_callback: Optional[Callable[[str], None]] = None) -> str:
|
||||
global _gemini_chat, _gemini_cache, _gemini_cache_md_hash, _gemini_cache_created_at
|
||||
try:
|
||||
_ensure_gemini_client(); mcp_client.configure(file_items or [], [base_dir])
|
||||
# Only stable content (files + screenshots) goes in the cached system instruction.
|
||||
# Discussion history is sent as conversation messages so the cache isn't invalidated every turn.
|
||||
sys_instr = f"{_get_combined_system_prompt()}
|
||||
|
||||
<context>
|
||||
{md_content}
|
||||
</context>"
|
||||
td = _gemini_tool_declaration() if enable_tools else None
|
||||
tools_decl = [td] if td else None
|
||||
# DYNAMIC CONTEXT: Check if files/context changed mid-session
|
||||
current_md_hash = hashlib.md5(md_content.encode()).hexdigest()
|
||||
old_history = None
|
||||
if _gemini_chat and _gemini_cache_md_hash != current_md_hash:
|
||||
old_history = list(_get_gemini_history_list(_gemini_chat)) if _get_gemini_history_list(_gemini_chat) else []
|
||||
if _gemini_cache:
|
||||
try: _gemini_client.caches.delete(name=_gemini_cache.name)
|
||||
except Exception as e: _append_comms("OUT", "request", {"message": f"[CACHE DELETE WARN] {e}"})
|
||||
_gemini_chat = None
|
||||
_gemini_cache = None
|
||||
_gemini_cache_created_at = None
|
||||
_append_comms("OUT", "request", {"message": "[CONTEXT CHANGED] Rebuilding cache and chat session..."})
|
||||
if _gemini_chat and _gemini_cache and _gemini_cache_created_at:
|
||||
elapsed = time.time() - _gemini_cache_created_at
|
||||
if elapsed > _GEMINI_CACHE_TTL * 0.9:
|
||||
old_history = list(_get_gemini_history_list(_gemini_chat)) if _get_gemini_history_list(_get_gemini_history_list(_gemini_chat)) else []
|
||||
try: _gemini_client.caches.delete(name=_gemini_cache.name)
|
||||
except Exception as e: _append_comms("OUT", "request", {"message": f"[CACHE DELETE WARN] {e}"})
|
||||
_gemini_chat = None
|
||||
_gemini_cache = None
|
||||
_gemini_cache_created_at = None
|
||||
_append_comms("OUT", "request", {"message": f"[CACHE TTL] Rebuilding cache (expired after {int(elapsed)}s)..."})
|
||||
if not _gemini_chat:
|
||||
chat_config = types.GenerateContentConfig(
|
||||
system_instruction=sys_instr,
|
||||
tools=tools_decl,
|
||||
temperature=_temperature,
|
||||
max_output_tokens=_max_tokens,
|
||||
safety_settings=[types.SafetySetting(category="HARM_CATEGORY_DANGEROUS_CONTENT", threshold="BLOCK_ONLY_HIGH")]
|
||||
)
|
||||
should_cache = False
|
||||
try:
|
||||
count_resp = _gemini_client.models.count_tokens(model=_model, contents=[sys_instr])
|
||||
if count_resp.total_tokens >= 2048:
|
||||
should_cache = True
|
||||
else:
|
||||
_append_comms("OUT", "request", {"message": f"[CACHING SKIPPED] Context too small ({count_resp.total_tokens} tokens < 2048)"})
|
||||
except Exception as e:
|
||||
_append_comms("OUT", "request", {"message": f"[COUNT FAILED] {e}"})
|
||||
if should_cache:
|
||||
try:
|
||||
_gemini_cache = _gemini_client.caches.create(
|
||||
model=_model,
|
||||
config=types.CreateCachedContentConfig(
|
||||
system_instruction=sys_instr,
|
||||
tools=tools_decl,
|
||||
ttl=f"{_GEMINI_CACHE_TTL}s",
|
||||
)
|
||||
)
|
||||
_gemini_cache_created_at = time.time()
|
||||
chat_config = types.GenerateContentConfig(
|
||||
cached_content=_gemini_cache.name,
|
||||
temperature=_temperature,
|
||||
max_output_tokens=_max_tokens,
|
||||
safety_settings=[types.SafetySetting(category="HARM_CATEGORY_DANGEROUS_CONTENT", threshold="BLOCK_ONLY_HIGH")]
|
||||
)
|
||||
_append_comms("OUT", "request", {"message": f"[CACHE CREATED] {_gemini_cache.name}"})
|
||||
except Exception as e:
|
||||
_gemini_cache = None
|
||||
_gemini_cache_created_at = None
|
||||
_append_comms("OUT", "request", {"message": f"[CACHE FAILED] {type(e).__name__}: {e} \u2014 falling back to inline system_instruction"})
|
||||
kwargs = {"model": _model, "config": chat_config}
|
||||
if old_history:
|
||||
kwargs["history"] = old_history
|
||||
_gemini_chat = _gemini_client.chats.create(**kwargs)
|
||||
_gemini_cache_md_hash = current_md_hash
|
||||
if discussion_history and not old_history:
|
||||
_gemini_chat.send_message(f"[DISCUSSION HISTORY]
|
||||
|
||||
{discussion_history}")
|
||||
_append_comms("OUT", "request", {"message": f"[HISTORY INJECTED] {len(discussion_history)} chars"})
|
||||
_append_comms("OUT", "request", {"message": f"[ctx {len(md_content)} + msg {len(user_message)}]"})
|
||||
payload: str | list[types.Part] = user_message
|
||||
all_text: list[str] = []
|
||||
_cumulative_tool_bytes = 0
|
||||
if _gemini_chat and _get_gemini_history_list(_gemini_chat):
|
||||
for msg in _get_gemini_history_list(_gemini_chat):
|
||||
if msg.role == "user" and hasattr(msg, "parts"):
|
||||
for p in msg.parts:
|
||||
if hasattr(p, "function_response") and p.function_response and hasattr(p.function_response, "response"):
|
||||
r = p.function_response.response
|
||||
if isinstance(r, dict) and "output" in r:
|
||||
val = r["output"]
|
||||
if isinstance(val, str):
|
||||
if "[SYSTEM: FILES UPDATED]" in val:
|
||||
val = val.split("[SYSTEM: FILES UPDATED]")[0].strip()
|
||||
if _history_trunc_limit > 0 and len(val) > _history_trunc_limit:
|
||||
val = val[:_history_trunc_limit] + "
|
||||
|
||||
... [TRUNCATED BY SYSTEM TO SAVE TOKENS.]"
|
||||
r["output"] = val
|
||||
for r_idx in range(MAX_TOOL_ROUNDS + 2):
|
||||
events.emit("request_start", payload={"provider": "gemini", "model": _model, "round": r_idx})
|
||||
if stream_callback:
|
||||
resp = _gemini_chat.send_message_stream(payload)
|
||||
txt_chunks = []
|
||||
for chunk in resp:
|
||||
c_txt = chunk.text
|
||||
if c_txt:
|
||||
txt_chunks.append(c_txt)
|
||||
stream_callback(c_txt)
|
||||
txt = "".join(txt_chunks)
|
||||
calls = [p.function_call for c in resp.candidates if getattr(c, "content", None) for p in c.content.parts if hasattr(p, "function_call") and p.function_call]
|
||||
usage = {"input_tokens": getattr(resp.usage_metadata, "prompt_token_count", 0), "output_tokens": getattr(resp.usage_metadata, "candidates_token_count", 0)}
|
||||
cached_tokens = getattr(resp.usage_metadata, "cached_content_token_count", None)
|
||||
if cached_tokens: usage["cache_read_input_tokens"] = cached_tokens
|
||||
else:
|
||||
resp = _gemini_chat.send_message(payload)
|
||||
txt = "
|
||||
".join(p.text for c in resp.candidates if getattr(c, "content", None) for p in c.content.parts if hasattr(p, "text") and p.text)
|
||||
calls = [p.function_call for c in resp.candidates if getattr(c, "content", None) for p in c.content.parts if hasattr(p, "function_call") and p.function_call]
|
||||
usage = {"input_tokens": getattr(resp.usage_metadata, "prompt_token_count", 0), "output_tokens": getattr(resp.usage_metadata, "candidates_token_count", 0)}
|
||||
cached_tokens = getattr(resp.usage_metadata, "cached_content_token_count", None)
|
||||
if cached_tokens: usage["cache_read_input_tokens"] = cached_tokens
|
||||
if txt: all_text.append(txt)
|
||||
events.emit("response_received", payload={"provider": "gemini", "model": _model, "usage": usage, "round": r_idx})
|
||||
reason = resp.candidates[0].finish_reason.name if resp.candidates and hasattr(resp.candidates[0], "finish_reason") else "STOP"
|
||||
_append_comms("IN", "response", {"round": r_idx, "stop_reason": reason, "text": txt, "tool_calls": [{"name": c.name, "args": dict(c.args)} for c in calls], "usage": usage})
|
||||
total_in = usage.get("input_tokens", 0)
|
||||
if total_in > _GEMINI_MAX_INPUT_TOKENS * 0.4 and _gemini_chat and _get_gemini_history_list(_gemini_chat):
|
||||
hist = _get_gemini_history_list(_gemini_chat)
|
||||
dropped = 0
|
||||
while len(hist) > 4 and total_in > _GEMINI_MAX_INPUT_TOKENS * 0.3:
|
||||
saved = 0
|
||||
for _ in range(2):
|
||||
if not hist: break
|
||||
for p in hist[0].parts:
|
||||
if hasattr(p, "text") and p.text: saved += int(len(p.text) / _CHARS_PER_TOKEN)
|
||||
elif hasattr(p, "function_response") and p.function_response:
|
||||
r = getattr(p.function_response, "response", {})
|
||||
if isinstance(r, dict): saved += int(len(str(r.get("output", ""))) / _CHARS_PER_TOKEN)
|
||||
hist.pop(0)
|
||||
dropped += 1
|
||||
total_in -= max(saved, 200)
|
||||
if dropped > 0: _append_comms("OUT", "request", {"message": f"[GEMINI HISTORY TRIMMED: dropped {dropped} old entries]"})
|
||||
if not calls or r_idx > MAX_TOOL_ROUNDS: break
|
||||
f_resps: list[types.Part] = []
|
||||
log: list[dict[str, Any]] = []
|
||||
for i, fc in enumerate(calls):
|
||||
name, args = fc.name, dict(fc.args)
|
||||
if pre_tool_callback:
|
||||
payload_str = json.dumps({"tool": name, "args": args})
|
||||
if not pre_tool_callback(payload_str):
|
||||
out = "USER REJECTED: tool execution cancelled"
|
||||
f_resps.append(types.Part.from_function_response(name=name, response={"output": out}))
|
||||
log.append({"tool_use_id": name, "content": out})
|
||||
continue
|
||||
events.emit("tool_execution", payload={"status": "started", "tool": name, "args": args, "round": r_idx})
|
||||
if name in mcp_client.TOOL_NAMES:
|
||||
_append_comms("OUT", "tool_call", {"name": name, "args": args})
|
||||
out = mcp_client.dispatch(name, args)
|
||||
elif name == TOOL_NAME:
|
||||
scr = args.get("script", "")
|
||||
_append_comms("OUT", "tool_call", {"name": TOOL_NAME, "script": scr})
|
||||
out = _run_script(scr, base_dir, qa_callback)
|
||||
else: out = f"ERROR: unknown tool '{name}'"
|
||||
if i == len(calls) - 1:
|
||||
if file_items:
|
||||
file_items, changed = _reread_file_items(file_items)
|
||||
ctx = _build_file_diff_text(changed)
|
||||
if ctx: out += f"
|
||||
|
||||
[SYSTEM: FILES UPDATED]
|
||||
|
||||
{ctx}"
|
||||
if r_idx == MAX_TOOL_ROUNDS: out += "
|
||||
|
||||
[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
|
||||
out = _truncate_tool_output(out)
|
||||
_cumulative_tool_bytes += len(out)
|
||||
f_resps.append(types.Part.from_function_response(name=name, response={"output": out}))
|
||||
log.append({"tool_use_id": name, "content": out})
|
||||
events.emit("tool_execution", payload={"status": "completed", "tool": name, "result": out, "round": r_idx})
|
||||
if _cumulative_tool_bytes > _MAX_TOOL_OUTPUT_BYTES:
|
||||
f_resps.append(types.Part.from_text(f"SYSTEM WARNING: Cumulative tool output exceeded {_MAX_TOOL_OUTPUT_BYTES // 1000}KB budget."))
|
||||
_append_comms("OUT", "request", {"message": f"[TOOL OUTPUT BUDGET EXCEEDED: {_cumulative_tool_bytes} bytes]"})
|
||||
_append_comms("OUT", "tool_result_send", {"results": log})
|
||||
payload = f_resps
|
||||
return "
|
||||
|
||||
".join(all_text) if all_text else "(No text returned)"
|
||||
except Exception as e: raise _classify_gemini_error(e) from e
|
||||
'''
|
||||
|
||||
_SEND_ANTHROPIC_NEW = '''def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_items: list[dict[str, Any]] | None = None, discussion_history: str = "", pre_tool_callback: Optional[Callable[[str], bool]] = None, qa_callback: Optional[Callable[[str], str]] = None, stream_callback: Optional[Callable[[str], None]] = None) -> str:
|
||||
try:
|
||||
_ensure_anthropic_client()
|
||||
mcp_client.configure(file_items or [], [base_dir])
|
||||
stable_prompt = _get_combined_system_prompt()
|
||||
stable_blocks = [{"type": "text", "text": stable_prompt, "cache_control": {"type": "ephemeral"}}]
|
||||
context_text = f"
|
||||
|
||||
<context>
|
||||
{md_content}
|
||||
</context>"
|
||||
context_blocks = _build_chunked_context_blocks(context_text)
|
||||
system_blocks = stable_blocks + context_blocks
|
||||
if discussion_history and not _anthropic_history:
|
||||
user_content: list[dict[str, Any]] = [{"type": "text", "text": f"[DISCUSSION HISTORY]
|
||||
|
||||
{discussion_history}
|
||||
|
||||
---
|
||||
|
||||
{user_message}"}]
|
||||
else:
|
||||
user_content = [{"type": "text", "text": user_message}]
|
||||
for msg in _anthropic_history:
|
||||
if msg.get("role") == "user" and isinstance(msg.get("content"), list):
|
||||
modified = False
|
||||
for block in msg["content"]:
|
||||
if isinstance(block, dict) and block.get("type") == "tool_result":
|
||||
t_content = block.get("content", "")
|
||||
if _history_trunc_limit > 0 and isinstance(t_content, str) and len(t_content) > _history_trunc_limit:
|
||||
block["content"] = t_content[:_history_trunc_limit] + "
|
||||
|
||||
... [TRUNCATED BY SYSTEM]"
|
||||
modified = True
|
||||
if modified: _invalidate_token_estimate(msg)
|
||||
_strip_cache_controls(_anthropic_history)
|
||||
_repair_anthropic_history(_anthropic_history)
|
||||
_anthropic_history.append({"role": "user", "content": user_content})
|
||||
_add_history_cache_breakpoint(_anthropic_history)
|
||||
all_text_parts: list[str] = []
|
||||
_cumulative_tool_bytes = 0
|
||||
def _strip_private_keys(history: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
return [{k: v for k, v in m.items() if not k.startswith("_")} for m in history]
|
||||
for round_idx in range(MAX_TOOL_ROUNDS + 2):
|
||||
dropped = _trim_anthropic_history(system_blocks, _anthropic_history)
|
||||
if dropped > 0:
|
||||
est_tokens = _estimate_prompt_tokens(system_blocks, _anthropic_history)
|
||||
_append_comms("OUT", "request", {"message": f"[HISTORY TRIMMED: dropped {dropped} old messages]"})
|
||||
events.emit("request_start", payload={"provider": "anthropic", "model": _model, "round": round_idx})
|
||||
if stream_callback:
|
||||
with _anthropic_client.messages.stream(
|
||||
model=_model,
|
||||
max_tokens=_max_tokens,
|
||||
temperature=_temperature,
|
||||
system=system_blocks,
|
||||
tools=_get_anthropic_tools(),
|
||||
messages=_strip_private_keys(_anthropic_history),
|
||||
) as stream:
|
||||
for event in stream:
|
||||
if event.type == "content_block_delta" and event.delta.type == "text_delta":
|
||||
stream_callback(event.delta.text)
|
||||
response = stream.get_final_message()
|
||||
else:
|
||||
response = _anthropic_client.messages.create(
|
||||
model=_model,
|
||||
max_tokens=_max_tokens,
|
||||
temperature=_temperature,
|
||||
system=system_blocks,
|
||||
tools=_get_anthropic_tools(),
|
||||
messages=_strip_private_keys(_anthropic_history),
|
||||
)
|
||||
serialised_content = [_content_block_to_dict(b) for b in response.content]
|
||||
_anthropic_history.append({"role": "assistant", "content": serialised_content})
|
||||
text_blocks = [b.text for b in response.content if hasattr(b, "text") and b.text]
|
||||
if text_blocks: all_text_parts.append("
|
||||
".join(text_blocks))
|
||||
tool_use_blocks = [{"id": b.id, "name": b.name, "input": b.input} for b in response.content if getattr(b, "type", None) == "tool_use"]
|
||||
usage_dict: dict[str, Any] = {}
|
||||
if response.usage:
|
||||
usage_dict["input_tokens"] = response.usage.input_tokens
|
||||
usage_dict["output_tokens"] = response.usage.output_tokens
|
||||
for k in ["cache_creation_input_tokens", "cache_read_input_tokens"]:
|
||||
val = getattr(response.usage, k, None)
|
||||
if val is not None: usage_dict[k] = val
|
||||
events.emit("response_received", payload={"provider": "anthropic", "model": _model, "usage": usage_dict, "round": round_idx})
|
||||
_append_comms("IN", "response", {"round": round_idx, "stop_reason": response.stop_reason, "text": "
|
||||
".join(text_blocks), "tool_calls": tool_use_blocks, "usage": usage_dict})
|
||||
if response.stop_reason != "tool_use" or not tool_use_blocks: break
|
||||
if round_idx > MAX_TOOL_ROUNDS: break
|
||||
tool_results: list[dict[str, Any]] = []
|
||||
for block in response.content:
|
||||
if getattr(block, "type", None) != "tool_use": continue
|
||||
b_name, b_id, b_input = block.name, block.id, block.input
|
||||
if pre_tool_callback:
|
||||
if not pre_tool_callback(json.dumps({"tool": b_name, "args": b_input})):
|
||||
tool_results.append({"type": "tool_result", "tool_use_id": b_id, "content": "USER REJECTED: tool execution cancelled"})
|
||||
continue
|
||||
events.emit("tool_execution", payload={"status": "started", "tool": b_name, "args": b_input, "round": round_idx})
|
||||
if b_name in mcp_client.TOOL_NAMES:
|
||||
_append_comms("OUT", "tool_call", {"name": b_name, "id": b_id, "args": b_input})
|
||||
output = mcp_client.dispatch(b_name, b_input)
|
||||
elif b_name == TOOL_NAME:
|
||||
scr = b_input.get("script", "")
|
||||
_append_comms("OUT", "tool_call", {"name": TOOL_NAME, "id": b_id, "script": scr})
|
||||
output = _run_script(scr, base_dir, qa_callback)
|
||||
else: output = f"ERROR: unknown tool '{b_name}'"
|
||||
truncated = _truncate_tool_output(output)
|
||||
_cumulative_tool_bytes += len(truncated)
|
||||
tool_results.append({"type": "tool_result", "tool_use_id": b_id, "content": truncated})
|
||||
_append_comms("IN", "tool_result", {"name": b_name, "id": b_id, "output": output})
|
||||
events.emit("tool_execution", payload={"status": "completed", "tool": b_name, "result": output, "round": round_idx})
|
||||
if _cumulative_tool_bytes > _MAX_TOOL_OUTPUT_BYTES:
|
||||
tool_results.append({"type": "text", "text": "SYSTEM WARNING: Cumulative tool output exceeded budget."})
|
||||
if file_items:
|
||||
file_items, changed = _reread_file_items(file_items)
|
||||
refreshed_ctx = _build_file_diff_text(changed)
|
||||
if refreshed_ctx: tool_results.append({"type": "text", "text": f"[FILES UPDATED]
|
||||
|
||||
{refreshed_ctx}"})
|
||||
if round_idx == MAX_TOOL_ROUNDS: tool_results.append({"type": "text", "text": "SYSTEM WARNING: MAX TOOL ROUNDS REACHED."})
|
||||
_anthropic_history.append({"role": "user", "content": tool_results})
|
||||
_append_comms("OUT", "tool_result_send", {"results": [{"tool_use_id": r["tool_use_id"], "content": r["content"]} for r in tool_results if r.get("type") == "tool_result"]})
|
||||
return "
|
||||
|
||||
".join(all_text_parts) if all_text_parts else "(No text returned)"
|
||||
except Exception as exc: raise _classify_anthropic_error(exc) from exc
|
||||
'''
|
||||
|
||||
_SEND_DEEPSEEK_NEW = '''def _send_deepseek(md_content: str, user_message: str, base_dir: str,
|
||||
file_items: list[dict[str, Any]] | None = None,
|
||||
discussion_history: str = "",
|
||||
stream: bool = False,
|
||||
pre_tool_callback: Optional[Callable[[str], bool]] = None,
|
||||
qa_callback: Optional[Callable[[str], str]] = None,
|
||||
stream_callback: Optional[Callable[[str], None]] = None) -> str:
|
||||
try:
|
||||
mcp_client.configure(file_items or [], [base_dir])
|
||||
creds = _load_credentials()
|
||||
api_key = creds.get("deepseek", {}).get("api_key")
|
||||
if not api_key: raise ValueError("DeepSeek API key not found")
|
||||
api_url = "https://api.deepseek.com/chat/completions"
|
||||
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
||||
current_api_messages: list[dict[str, Any]] = []
|
||||
with _deepseek_history_lock:
|
||||
for msg in _deepseek_history: current_api_messages.append(msg)
|
||||
initial_user_message_content = user_message
|
||||
if discussion_history: initial_user_message_content = f"[DISCUSSION HISTORY]
|
||||
|
||||
{discussion_history}
|
||||
|
||||
---
|
||||
|
||||
{user_message}"
|
||||
current_api_messages.append({"role": "user", "content": initial_user_message_content})
|
||||
request_payload: dict[str, Any] = {"model": _model, "messages": current_api_messages, "temperature": _temperature, "max_tokens": _max_tokens, "stream": stream}
|
||||
sys_msg = {"role": "system", "content": f"{_get_combined_system_prompt()}
|
||||
|
||||
<context>
|
||||
{md_content}
|
||||
</context>"}
|
||||
request_payload["messages"].insert(0, sys_msg)
|
||||
all_text_parts: list[str] = []
|
||||
_cumulative_tool_bytes = 0
|
||||
round_idx = 0
|
||||
while round_idx <= MAX_TOOL_ROUNDS + 1:
|
||||
events.emit("request_start", payload={"provider": "deepseek", "model": _model, "round": round_idx, "streaming": stream})
|
||||
try:
|
||||
response = requests.post(api_url, headers=headers, json=request_payload, timeout=60, stream=stream)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e: raise _classify_deepseek_error(e) from e
|
||||
if stream:
|
||||
aggregated_content, aggregated_tool_calls, aggregated_reasoning = "", [], ""
|
||||
current_usage, final_finish_reason = {}, "stop"
|
||||
for line in response.iter_lines():
|
||||
if not line: continue
|
||||
decoded = line.decode('utf-8')
|
||||
if decoded.startswith('data: '):
|
||||
chunk_str = decoded[len('data: '):]
|
||||
if chunk_str.strip() == '[DONE]': continue
|
||||
try:
|
||||
chunk = json.loads(chunk_str)
|
||||
delta = chunk.get("choices", [{}])[0].get("delta", {})
|
||||
if delta.get("content"):
|
||||
aggregated_content += delta["content"]
|
||||
if stream_callback: stream_callback(delta["content"])
|
||||
if delta.get("reasoning_content"): aggregated_reasoning += delta["reasoning_content"]
|
||||
if delta.get("tool_calls"):
|
||||
for tc_delta in delta["tool_calls"]:
|
||||
idx = tc_delta.get("index", 0)
|
||||
while len(aggregated_tool_calls) <= idx: aggregated_tool_calls.append({"id": "", "type": "function", "function": {"name": "", "arguments": ""}})
|
||||
target = aggregated_tool_calls[idx]
|
||||
if tc_delta.get("id"): target["id"] = tc_delta["id"]
|
||||
if tc_delta.get("function", {}).get("name"): target["function"]["name"] += tc_delta["function"]["name"]
|
||||
if tc_delta.get("function", {}).get("arguments"): target["function"]["arguments"] += tc_delta["function"]["arguments"]
|
||||
if chunk.get("choices", [{}])[0].get("finish_reason"): final_finish_reason = chunk["choices"][0]["finish_reason"]
|
||||
if chunk.get("usage"): current_usage = chunk["usage"]
|
||||
except json.JSONDecodeError: continue
|
||||
assistant_text, tool_calls_raw, reasoning_content, finish_reason, usage = aggregated_content, aggregated_tool_calls, aggregated_reasoning, final_finish_reason, current_usage
|
||||
else:
|
||||
response_data = response.json()
|
||||
choices = response_data.get("choices", [])
|
||||
if not choices: break
|
||||
choice = choices[0]
|
||||
message = choice.get("message", {})
|
||||
assistant_text, tool_calls_raw, reasoning_content, finish_reason, usage = message.get("content", ""), message.get("tool_calls", []), message.get("reasoning_content", ""), choice.get("finish_reason", "stop"), response_data.get("usage", {})
|
||||
full_assistant_text = (f"<thinking>
|
||||
{reasoning_content}
|
||||
</thinking>
|
||||
" if reasoning_content else "") + assistant_text
|
||||
with _deepseek_history_lock:
|
||||
msg_to_store = {"role": "assistant", "content": assistant_text}
|
||||
if reasoning_content: msg_to_store["reasoning_content"] = reasoning_content
|
||||
if tool_calls_raw: msg_to_store["tool_calls"] = tool_calls_raw
|
||||
_deepseek_history.append(msg_to_store)
|
||||
if full_assistant_text: all_text_parts.append(full_assistant_text)
|
||||
_append_comms("IN", "response", {"round": round_idx, "stop_reason": finish_reason, "text": full_assistant_text, "tool_calls": tool_calls_raw, "usage": usage, "streaming": stream})
|
||||
if finish_reason != "tool_calls" and not tool_calls_raw: break
|
||||
if round_idx > MAX_TOOL_ROUNDS: break
|
||||
tool_results_for_history: list[dict[str, Any]] = []
|
||||
for i, tc_raw in enumerate(tool_calls_raw):
|
||||
tool_info = tc_raw.get("function", {})
|
||||
tool_name, tool_args_str, tool_id = tool_info.get("name"), tool_info.get("arguments", "{}"), tc_raw.get("id")
|
||||
try: tool_args = json.loads(tool_args_str)
|
||||
except: tool_args = {}
|
||||
if pre_tool_callback:
|
||||
if not pre_tool_callback(json.dumps({"tool": tool_name, "args": tool_args})):
|
||||
tool_output = "USER REJECTED: tool execution cancelled"
|
||||
tool_results_for_history.append({"role": "tool", "tool_call_id": tool_id, "content": tool_output})
|
||||
continue
|
||||
events.emit("tool_execution", payload={"status": "started", "tool": tool_name, "args": tool_args, "round": round_idx})
|
||||
if tool_name in mcp_client.TOOL_NAMES:
|
||||
_append_comms("OUT", "tool_call", {"name": tool_name, "id": tool_id, "args": tool_args})
|
||||
tool_output = mcp_client.dispatch(tool_name, tool_args)
|
||||
elif tool_name == TOOL_NAME:
|
||||
script = tool_args.get("script", "")
|
||||
_append_comms("OUT", "tool_call", {"name": TOOL_NAME, "id": tool_id, "script": script})
|
||||
tool_output = _run_script(script, base_dir, qa_callback)
|
||||
else: tool_output = f"ERROR: unknown tool '{tool_name}'"
|
||||
if i == len(tool_calls_raw) - 1:
|
||||
if file_items:
|
||||
file_items, changed = _reread_file_items(file_items)
|
||||
ctx = _build_file_diff_text(changed)
|
||||
if ctx: tool_output += f"
|
||||
|
||||
[SYSTEM: FILES UPDATED]
|
||||
|
||||
{ctx}"
|
||||
if round_idx == MAX_TOOL_ROUNDS: tool_output += "
|
||||
|
||||
[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
|
||||
tool_output = _truncate_tool_output(tool_output)
|
||||
_cumulative_tool_bytes += len(tool_output)
|
||||
tool_results_for_history.append({"role": "tool", "tool_call_id": tool_id, "content": tool_output})
|
||||
_append_comms("IN", "tool_result", {"name": tool_name, "id": tool_id, "output": tool_output})
|
||||
events.emit("tool_execution", payload={"status": "completed", "tool": tool_name, "result": tool_output, "round": round_idx})
|
||||
if _cumulative_tool_bytes > _MAX_TOOL_OUTPUT_BYTES:
|
||||
tool_results_for_history.append({"role": "user", "content": "SYSTEM WARNING: Cumulative tool output exceeded budget."})
|
||||
with _deepseek_history_lock:
|
||||
for tr in tool_results_for_history: _deepseek_history.append(tr)
|
||||
next_messages: list[dict[str, Any]] = []
|
||||
with _deepseek_history_lock:
|
||||
for msg in _deepseek_history: next_messages.append(msg)
|
||||
next_messages.insert(0, sys_msg)
|
||||
request_payload["messages"] = next_messages
|
||||
round_idx += 1
|
||||
return "
|
||||
|
||||
".join(all_text_parts) if all_text_parts else "(No text returned)"
|
||||
except Exception as e: raise _classify_deepseek_error(e) from e
|
||||
'''
|
||||
|
||||
_SEND_NEW = '''def send(
|
||||
md_content: str,
|
||||
user_message: str,
|
||||
base_dir: str = ".",
|
||||
file_items: list[dict[str, Any]] | None = None,
|
||||
discussion_history: str = "",
|
||||
stream: bool = False,
|
||||
pre_tool_callback: Optional[Callable[[str], bool]] = None,
|
||||
qa_callback: Optional[Callable[[str], str]] = None,
|
||||
enable_tools: bool = True,
|
||||
stream_callback: Optional[Callable[[str], None]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Sends a prompt with the full markdown context to the current AI provider.
|
||||
Returns the final text response.
|
||||
"""
|
||||
with _send_lock:
|
||||
if _provider == "gemini":
|
||||
return _send_gemini(
|
||||
md_content, user_message, base_dir, file_items, discussion_history,
|
||||
pre_tool_callback, qa_callback, enable_tools, stream_callback
|
||||
)
|
||||
elif _provider == "gemini_cli":
|
||||
return _send_gemini_cli(
|
||||
md_content, user_message, base_dir, file_items, discussion_history,
|
||||
pre_tool_callback, qa_callback
|
||||
)
|
||||
elif _provider == "anthropic":
|
||||
return _send_anthropic(
|
||||
md_content, user_message, base_dir, file_items, discussion_history,
|
||||
pre_tool_callback, qa_callback, stream_callback=stream_callback
|
||||
)
|
||||
elif _provider == "deepseek":
|
||||
return _send_deepseek(
|
||||
md_content, user_message, base_dir, file_items, discussion_history,
|
||||
stream, pre_tool_callback, qa_callback, stream_callback
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown provider: {_provider}")
|
||||
'''
|
||||
|
||||
# Use regex or simple string replacement to replace the old functions with new ones.
|
||||
import re
|
||||
|
||||
def replace_func(content, func_name, new_body):
|
||||
# This is tricky because functions can be complex.
|
||||
# I'll just use a marker based approach for this specific file.
|
||||
start_marker = f'def {func_name}('
|
||||
# Find the next 'def ' or end of file
|
||||
start_idx = content.find(start_marker)
|
||||
if start_idx == -1: return content
|
||||
|
||||
# Find the end of the function (rough estimation based on next def at column 0)
|
||||
next_def = re.search(r'
|
||||
|
||||
def ', content[start_idx+1:])
|
||||
if next_def:
|
||||
end_idx = start_idx + 1 + next_def.start()
|
||||
else:
|
||||
end_idx = len(content)
|
||||
|
||||
return content[:start_idx] + new_body + content[end_idx:]
|
||||
|
||||
# Final content construction
|
||||
content = replace_func(content, '_send_gemini', _SEND_GEMINI_NEW)
|
||||
content = replace_func(content, '_send_anthropic', _SEND_ANTHROPIC_NEW)
|
||||
content = replace_func(content, '_send_deepseek', _SEND_DEEPSEEK_NEW)
|
||||
content = replace_func(content, 'send', _SEND_NEW)
|
||||
|
||||
# Remove the duplicated parts at the end if any
|
||||
marker = 'import json
|
||||
from typing import Any, Callable, Optional, List'
|
||||
if marker in content:
|
||||
content = content[:content.find(marker)]
|
||||
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
@@ -1,6 +1,5 @@
|
||||
import subprocess
|
||||
import sys
|
||||
import os
|
||||
|
||||
def run_diag(role: str, prompt: str) -> str:
|
||||
print(f"--- Running Diag for {role} ---")
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
import subprocess
|
||||
import pytest
|
||||
import os
|
||||
|
||||
def run_ps_script(role: str, prompt: str) -> subprocess.CompletedProcess:
|
||||
"""Helper to run the run_subagent.ps1 script."""
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[ai]
|
||||
provider = "gemini"
|
||||
model = "gemini-2.5-flash-lite"
|
||||
provider = "gemini_cli"
|
||||
model = "gemini-2.0-flash"
|
||||
temperature = 0.0
|
||||
max_tokens = 8192
|
||||
history_trunc_limit = 8000
|
||||
@@ -15,7 +15,7 @@ paths = [
|
||||
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_livetoolssim.toml",
|
||||
"C:\\projects\\manual_slop\\tests\\artifacts\\temp_liveexecutionsim.toml",
|
||||
]
|
||||
active = "C:\\projects\\manual_slop\\tests\\artifacts\\temp_livecontextsim.toml"
|
||||
active = "C:\\projects\\manual_slop\\tests\\artifacts\\temp_project.toml"
|
||||
|
||||
[gui.show_windows]
|
||||
"Context Hub" = true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import List, Optional
|
||||
from typing import List
|
||||
from models import Ticket
|
||||
|
||||
class TrackDAG:
|
||||
|
||||
18
debug_ast.py
18
debug_ast.py
@@ -1,18 +0,0 @@
|
||||
import tree_sitter
|
||||
import tree_sitter_python
|
||||
|
||||
code = """def hot_func():
|
||||
# [HOT]
|
||||
print(1)"""
|
||||
|
||||
PY_LANGUAGE = tree_sitter.Language(tree_sitter_python.language())
|
||||
parser = tree_sitter.Parser(PY_LANGUAGE)
|
||||
tree = parser.parse(bytes(code, "utf8"))
|
||||
|
||||
def walk(node, indent=0):
|
||||
content = code[node.start_byte:node.end_byte].strip()
|
||||
print(f"{' ' * indent}{node.type} ({node.start_byte}-{node.end_byte}): {content[:20]}")
|
||||
for child in node.children:
|
||||
walk(child, indent + 1)
|
||||
|
||||
walk(tree.root_node)
|
||||
@@ -1,98 +0,0 @@
|
||||
import tree_sitter
|
||||
import tree_sitter_python
|
||||
|
||||
class ASTParser:
|
||||
def __init__(self, language: str) -> None:
|
||||
self.language = tree_sitter.Language(tree_sitter_python.language())
|
||||
self.parser = tree_sitter.Parser(self.language)
|
||||
|
||||
def parse(self, code: str) -> tree_sitter.Tree:
|
||||
return self.parser.parse(bytes(code, "utf8"))
|
||||
|
||||
def get_curated_view(self, code: str) -> str:
|
||||
tree = self.parse(code)
|
||||
edits = []
|
||||
|
||||
def is_docstring(node):
|
||||
if node.type == "expression_statement" and node.child_count > 0:
|
||||
if node.children[0].type == "string":
|
||||
return True
|
||||
return False
|
||||
|
||||
def has_core_logic_decorator(node):
|
||||
parent = node.parent
|
||||
if parent and parent.type == "decorated_definition":
|
||||
for child in parent.children:
|
||||
if child.type == "decorator":
|
||||
if "@core_logic" in code[child.start_byte:child.end_byte]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def has_hot_comment(func_node):
|
||||
print(f"Checking {code[func_node.start_byte:func_node.start_byte+20].strip()}...")
|
||||
stack = [func_node]
|
||||
while stack:
|
||||
curr = stack.pop()
|
||||
if curr.type == "comment":
|
||||
comment_text = code[curr.start_byte:curr.end_byte]
|
||||
print(f" Found comment: {comment_text}")
|
||||
if "[HOT]" in comment_text:
|
||||
print(" [HOT] FOUND!")
|
||||
return True
|
||||
for child in curr.children:
|
||||
stack.append(child)
|
||||
return False
|
||||
|
||||
def walk(node):
|
||||
if node.type == "function_definition":
|
||||
body = node.child_by_field_name("body")
|
||||
if body and body.type == "block":
|
||||
preserve = has_core_logic_decorator(node) or has_hot_comment(node)
|
||||
print(f"Function {code[node.start_byte:node.start_byte+20].strip()}, preserve={preserve}")
|
||||
if not preserve:
|
||||
indent = " " * body.start_point.column
|
||||
first_stmt = None
|
||||
for child in body.children:
|
||||
if child.type != "comment":
|
||||
first_stmt = child
|
||||
break
|
||||
if first_stmt and is_docstring(first_stmt):
|
||||
start_byte = first_stmt.end_byte
|
||||
end_byte = body.end_byte
|
||||
if end_byte > start_byte:
|
||||
edits.append((start_byte, end_byte, "\\n" + indent + "..."))
|
||||
else:
|
||||
start_byte = body.start_byte
|
||||
end_byte = body.end_byte
|
||||
edits.append((start_byte, end_byte, "..."))
|
||||
for child in node.children:
|
||||
walk(child)
|
||||
walk(tree.root_node)
|
||||
edits.sort(key=lambda x: x[0], reverse=True)
|
||||
code_bytes = bytearray(code, "utf8")
|
||||
for start, end, replacement in edits:
|
||||
code_bytes[start:end] = bytes(replacement, "utf8")
|
||||
return code_bytes.decode("utf8")
|
||||
|
||||
parser = ASTParser("python")
|
||||
code = '''
|
||||
@core_logic
|
||||
def core_func():
|
||||
"""Core logic doc."""
|
||||
print("this should be preserved")
|
||||
return True
|
||||
|
||||
def hot_func():
|
||||
# [HOT]
|
||||
print("this should also be preserved")
|
||||
return 42
|
||||
|
||||
def normal_func():
|
||||
"""Normal doc."""
|
||||
print("this should be stripped")
|
||||
return None
|
||||
'''
|
||||
|
||||
result = parser.get_curated_view(code)
|
||||
print("\\n--- RESULT ---\\n")
|
||||
print(result)
|
||||
@@ -1,10 +1,8 @@
|
||||
# gemini.py
|
||||
from __future__ import annotations
|
||||
import tomllib
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
|
||||
_client: genai.Client | None = None
|
||||
_chat: Any = None
|
||||
|
||||
@@ -38,7 +38,6 @@ class GeminiCliAdapter:
|
||||
accumulated_text = ""
|
||||
tool_calls = []
|
||||
stdout_content = []
|
||||
stderr_content = []
|
||||
|
||||
env = os.environ.copy()
|
||||
env["GEMINI_CLI_HOOK_CONTEXT"] = "manual_slop"
|
||||
|
||||
13
gui_2.py
13
gui_2.py
@@ -12,7 +12,7 @@ import uuid
|
||||
import requests
|
||||
from pathlib import Path
|
||||
from tkinter import filedialog, Tk
|
||||
from typing import Optional, Callable, Any, Dict, List, Tuple, Union
|
||||
from typing import Optional, Callable, Any
|
||||
import aggregate
|
||||
import ai_client
|
||||
import cost_tracker
|
||||
@@ -35,7 +35,7 @@ import multi_agent_conductor
|
||||
from models import Track, Ticket
|
||||
from file_cache import ASTParser
|
||||
|
||||
from fastapi import FastAPI, Depends, HTTPException, Security
|
||||
from fastapi import FastAPI, Depends, HTTPException
|
||||
from fastapi.security.api_key import APIKeyHeader
|
||||
from pydantic import BaseModel
|
||||
from imgui_bundle import imgui, hello_imgui, immapp
|
||||
@@ -431,7 +431,6 @@ class App:
|
||||
'btn_mma_start_track': self._cb_start_track,
|
||||
'btn_mma_create_track': lambda: self._cb_create_track(self.ui_new_track_name, self.ui_new_track_desc, self.ui_new_track_type),
|
||||
'btn_approve_tool': self._handle_approve_tool,
|
||||
'btn_approve_script': self._handle_approve_script,
|
||||
'btn_approve_mma_step': self._handle_approve_mma_step,
|
||||
'btn_approve_spawn': self._handle_approve_spawn,
|
||||
}
|
||||
@@ -1070,7 +1069,7 @@ class App:
|
||||
return
|
||||
self._scroll_disc_to_bottom = True
|
||||
for item in items:
|
||||
role = item.get("role", "unknown")
|
||||
item.get("role", "unknown")
|
||||
if item.get("role") and item["role"] not in self.disc_roles:
|
||||
self.disc_roles.append(item["role"])
|
||||
disc_sec = self.project.get("discussion", {})
|
||||
@@ -3389,7 +3388,7 @@ class App:
|
||||
imgui.begin_tooltip()
|
||||
imgui.text_colored(C_KEY, f"ID: {tid}")
|
||||
imgui.text_colored(C_LBL, f"Target: {target}")
|
||||
imgui.text_colored(C_LBL, f"Description:")
|
||||
imgui.text_colored(C_LBL, "Description:")
|
||||
imgui.same_line()
|
||||
imgui.text_wrapped(ticket.get('description', 'N/A'))
|
||||
deps = ticket.get('depends_on', [])
|
||||
@@ -3481,8 +3480,8 @@ class App:
|
||||
imgui.push_id(f"comms_{idx}")
|
||||
if blink_alpha > 0:
|
||||
# Draw a background highlight for the entry
|
||||
draw_list = imgui.get_window_draw_list()
|
||||
p_min = imgui.get_cursor_screen_pos()
|
||||
imgui.get_window_draw_list()
|
||||
imgui.get_cursor_screen_pos()
|
||||
# Estimate height or just use a fixed height for the background
|
||||
# It's better to wrap the entry in a group or just use separators
|
||||
# For now, let's just use the style color push if we are sure we pop it
|
||||
|
||||
@@ -1020,7 +1020,6 @@ class App:
|
||||
dpg.add_button(
|
||||
label="x", width=24, callback=self._make_remove_project_cb(i)
|
||||
)
|
||||
name_color = (140, 255, 160) if is_active else (200, 200, 200)
|
||||
marker = " *" if is_active else ""
|
||||
dpg.add_button(
|
||||
label=f"{Path(pp).stem}{marker}",
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
import tree_sitter
|
||||
import tree_sitter_python
|
||||
|
||||
language = tree_sitter.Language(tree_sitter_python.language())
|
||||
parser = tree_sitter.Parser(language)
|
||||
|
||||
code = """
|
||||
@core_logic
|
||||
def decorated_func():
|
||||
"""Docstring."""
|
||||
print("core logic here")
|
||||
|
||||
def hot_func():
|
||||
# [HOT]
|
||||
print("hot logic here")
|
||||
|
||||
def normal_func():
|
||||
print("normal logic here")
|
||||
"""
|
||||
|
||||
tree = parser.parse(bytes(code, "utf8"))
|
||||
|
||||
def print_node(node, indent=0):
|
||||
print(" " * indent + f"{node.type} [{node.start_byte}-{node.end_byte}] " + (f"'{code[node.start_byte:node.end_byte]}'" if node.type in ["decorator", "comment", "identifier"] else ""))
|
||||
for child in node.children:
|
||||
print_node(child, indent + 1)
|
||||
|
||||
print_node(tree.root_node)
|
||||
@@ -39,7 +39,7 @@ class LogPruner:
|
||||
old_sessions_to_check = self.log_registry.get_old_non_whitelisted_sessions(cutoff_time)
|
||||
# Prune sessions if their size is less than 2048 bytes
|
||||
for session_info in old_sessions_to_check:
|
||||
session_id = session_info['session_id']
|
||||
session_info['session_id']
|
||||
session_path = session_info['path']
|
||||
if not session_path or not os.path.isdir(session_path):
|
||||
continue
|
||||
|
||||
@@ -414,7 +414,7 @@ def py_set_signature(path: str, name: str, new_signature: str) -> str:
|
||||
try:
|
||||
import ast
|
||||
code = p.read_text(encoding="utf-8").lstrip(chr(0xFEFF))
|
||||
lines = code.splitlines(keepends=True)
|
||||
code.splitlines(keepends=True)
|
||||
tree = ast.parse(code)
|
||||
node = _get_symbol_node(tree, name)
|
||||
if not node or not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import ai_client
|
||||
import json
|
||||
import asyncio
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from typing import List, Optional, Tuple
|
||||
@@ -242,7 +241,7 @@ def run_worker_lifecycle(ticket: Ticket, context: WorkerContext, context_files:
|
||||
parser = ASTParser(language="python")
|
||||
for i, file_path in enumerate(context_files):
|
||||
try:
|
||||
abs_path = Path(file_path)
|
||||
Path(file_path)
|
||||
# (This is a bit simplified, but helps)
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
@@ -8,5 +8,5 @@ active = "main"
|
||||
|
||||
[discussions.main]
|
||||
git_commit = ""
|
||||
last_updated = "2026-03-01T22:58:49"
|
||||
last_updated = "2026-03-02T13:16:02"
|
||||
history = []
|
||||
|
||||
@@ -242,7 +242,6 @@ def load_track_history(track_id: str, base_dir: Union[str, Path] = ".") -> list[
|
||||
Loads the discussion history for a specific track from its state.toml.
|
||||
Returns a list of entry strings formatted with @timestamp.
|
||||
"""
|
||||
from models import TrackState
|
||||
state = load_track_state(track_id, base_dir)
|
||||
if not state:
|
||||
return []
|
||||
@@ -260,7 +259,6 @@ def save_track_history(track_id: str, history: list[str], base_dir: Union[str, P
|
||||
Saves the discussion history for a specific track to its state.toml.
|
||||
'history' is expected to be a list of formatted strings.
|
||||
"""
|
||||
from models import TrackState
|
||||
state = load_track_state(track_id, base_dir)
|
||||
if not state:
|
||||
return
|
||||
@@ -277,7 +275,6 @@ def get_all_tracks(base_dir: Union[str, Path] = ".") -> list[dict[str, Any]]:
|
||||
Handles missing or malformed metadata.json or state.toml by falling back
|
||||
to available info or defaults.
|
||||
"""
|
||||
from models import TrackState
|
||||
tracks_dir = Path(base_dir) / "conductor" / "tracks"
|
||||
if not tracks_dir.exists():
|
||||
return []
|
||||
|
||||
@@ -9,7 +9,7 @@ import ast
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
from typing import Any, Callable
|
||||
from typing import Any
|
||||
|
||||
BASE: str = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||
stats: dict[str, Any] = {"auto_none": 0, "manual_sig": 0, "vars": 0, "errors": []}
|
||||
@@ -332,7 +332,7 @@ if __name__ == "__main__":
|
||||
print(f" {f}: {r}")
|
||||
if "Error" in r:
|
||||
all_ok = False
|
||||
print(f"\n=== Summary ===")
|
||||
print("\n=== Summary ===")
|
||||
print(f" Auto -> None: {stats['auto_none']}")
|
||||
print(f" Manual sigs: {stats['manual_sig']}")
|
||||
print(f" Variables: {stats['vars']}")
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
import sys
|
||||
|
||||
files = ['ai_client.py', 'aggregate.py', 'mcp_client.py', 'shell_runner.py']
|
||||
for file_path in files:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import re
|
||||
import sys
|
||||
|
||||
files = ['ai_client.py', 'aggregate.py', 'mcp_client.py', 'shell_runner.py']
|
||||
for file_path in files:
|
||||
|
||||
@@ -97,7 +97,7 @@ def main():
|
||||
}))
|
||||
return
|
||||
if hook_context == "mma_headless":
|
||||
logging.debug(f"GEMINI_CLI_HOOK_CONTEXT is 'mma_headless'. Allowing execution for sub-agent.")
|
||||
logging.debug("GEMINI_CLI_HOOK_CONTEXT is 'mma_headless'. Allowing execution for sub-agent.")
|
||||
print(json.dumps({
|
||||
"decision": "allow",
|
||||
"reason": "Sub-agent headless mode (MMA)."
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
with open('mcp_client.py', 'r', encoding='utf-8') as f:
|
||||
|
||||
@@ -211,19 +211,19 @@ def execute_agent(role: str, prompt: str, docs: list[str], debug: bool = False,
|
||||
env = os.environ.copy()
|
||||
env["GEMINI_CLI_HOOK_CONTEXT"] = "mma_headless"
|
||||
if debug:
|
||||
print(f"--- MMA DEBUG ---")
|
||||
print("--- MMA DEBUG ---")
|
||||
print(f"Executing Command: {cmd}")
|
||||
print(f"Relevant Environment Variables:")
|
||||
print("Relevant Environment Variables:")
|
||||
for key, value in env.items():
|
||||
if key.startswith("GEMINI_CLI_"):
|
||||
print(f" {key}={value}")
|
||||
process = subprocess.run(cmd, input=command_text, capture_output=True, text=True, encoding='utf-8', env=env)
|
||||
if debug:
|
||||
print(f"Subprocess Result:")
|
||||
print("Subprocess Result:")
|
||||
print(f" Return Code: {process.returncode}")
|
||||
print(f" Stdout: {process.stdout[:1000]}...")
|
||||
print(f" Stderr: {process.stderr}")
|
||||
print(f"--- END DEBUG ---")
|
||||
print("--- END DEBUG ---")
|
||||
result = process.stdout
|
||||
if not process.stdout and process.stderr:
|
||||
result = f"Error: {process.stderr}"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Scan all .py files for missing type hints. Writes scan_report.txt."""
|
||||
import ast, os
|
||||
import ast
|
||||
import os
|
||||
|
||||
SKIP: set[str] = {'.git', '__pycache__', '.venv', 'venv', 'node_modules', '.claude', '.gemini'}
|
||||
BASE: str = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# shell_runner.py
|
||||
import os, subprocess, shutil
|
||||
import os
|
||||
import subprocess
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Callable, Optional
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import random
|
||||
from api_hook_client import ApiHookClient
|
||||
from simulation.workflow_sim import WorkflowSimulator
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
from simulation.sim_base import BaseSimulation, run_sim
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import pytest
|
||||
from typing import Any, Optional
|
||||
from api_hook_client import ApiHookClient
|
||||
from simulation.workflow_sim import WorkflowSimulator
|
||||
@@ -19,7 +18,7 @@ class BaseSimulation:
|
||||
self.project_path = None
|
||||
|
||||
def setup(self, project_name: str = "SimProject") -> None:
|
||||
print(f"\n[BaseSim] Connecting to GUI...")
|
||||
print("\n[BaseSim] Connecting to GUI...")
|
||||
if not self.client.wait_for_server(timeout=5):
|
||||
raise RuntimeError("Could not connect to GUI. Ensure it is running with --enable-test-hooks")
|
||||
self.client.set_value("auto_add_history", True)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
from simulation.sim_base import BaseSimulation, run_sim
|
||||
@@ -31,7 +30,7 @@ class ContextSimulation(BaseSimulation):
|
||||
self.client.click("btn_md_only")
|
||||
time.sleep(5)
|
||||
# Verify status
|
||||
proj_updated = self.client.get_project()
|
||||
self.client.get_project()
|
||||
status = self.client.get_value("ai_status")
|
||||
print(f"[Sim] Status: {status}")
|
||||
assert "md written" in status, f"Expected 'md written' in status, got {status}"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
from simulation.sim_base import BaseSimulation, run_sim
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
from simulation.sim_base import BaseSimulation, run_sim
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import time
|
||||
import os
|
||||
from api_hook_client import ApiHookClient
|
||||
from simulation.user_agent import UserSimAgent
|
||||
|
||||
@@ -84,7 +83,7 @@ class WorkflowSimulator:
|
||||
content = last_entry.get('content')
|
||||
print(f"\n[AI]: {content[:100]}...")
|
||||
if "error" in content.lower() or "blocked" in content.lower():
|
||||
print(f"[WARN] AI response appears to contain an error message.")
|
||||
print("[WARN] AI response appears to contain an error message.")
|
||||
return last_entry
|
||||
print("\nTimeout waiting for AI")
|
||||
active_disc = self.client.get_value("active_discussion")
|
||||
|
||||
@@ -171,7 +171,7 @@ def summarise_items(file_items: list[dict]) -> list[dict]:
|
||||
content = item.get("content", "")
|
||||
error = item.get("error", False)
|
||||
if error or path is None:
|
||||
summary = f"_Error reading file_"
|
||||
summary = "_Error reading file_"
|
||||
else:
|
||||
p = Path(path) if not isinstance(path, Path) else path
|
||||
summary = summarise_file(p, content)
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
|
||||
import os
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
import project_manager
|
||||
from models import Track, Ticket
|
||||
|
||||
class TestMMAPersistence(unittest.TestCase):
|
||||
def test_default_project_has_mma(self) -> None:
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import sys
|
||||
import json
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
def main() -> None:
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import pytest
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Ensure project root is in path
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
import ai_client
|
||||
|
||||
def test_agent_capabilities_listing() -> None:
|
||||
pass
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import pytest
|
||||
import sys
|
||||
import os
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
# Ensure project root is in path
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
import ai_client
|
||||
|
||||
def test_ai_client_send_gemini_cli() -> None:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
import ai_client
|
||||
|
||||
def test_list_models_gemini_cli() -> None:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
import textwrap
|
||||
from scripts.ai_style_formatter import format_code
|
||||
|
||||
|
||||
@@ -36,20 +36,7 @@ def test_event_emission() -> None:
|
||||
callback.assert_called_once_with(payload={"data": 123})
|
||||
|
||||
def test_send_emits_events() -> None:
|
||||
with patch("ai_client._send_gemini") as mock_send_gemini, \
|
||||
patch("ai_client._send_anthropic") as mock_send_anthropic:
|
||||
mock_send_gemini.return_value = "gemini response"
|
||||
start_callback = MagicMock()
|
||||
response_callback = MagicMock()
|
||||
ai_client.events.on("request_start", start_callback)
|
||||
ai_client.events.on("response_received", response_callback)
|
||||
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
|
||||
ai_client.send("context", "message")
|
||||
# We mocked _send_gemini so it doesn't emit events inside.
|
||||
# But wait, ai_client.send itself emits request_start and response_received?
|
||||
# Actually, ai_client.send delegates to _send_gemini.
|
||||
# Let's mock _gemini_client instead to let _send_gemini run and emit events.
|
||||
pass
|
||||
pytest.fail("TODO: This test is mocked incorrectly and asserts nothing. Use _proper version below.")
|
||||
|
||||
def test_send_emits_events_proper() -> None:
|
||||
with patch("ai_client._ensure_gemini_client"), \
|
||||
@@ -72,7 +59,6 @@ def test_send_emits_events_proper() -> None:
|
||||
assert kwargs['payload']['provider'] == 'gemini'
|
||||
|
||||
def test_send_emits_tool_events() -> None:
|
||||
import mcp_client
|
||||
with patch("ai_client._ensure_gemini_client"), \
|
||||
patch("ai_client._gemini_client") as mock_client, \
|
||||
patch("mcp_client.dispatch") as mock_dispatch:
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
import pytest
|
||||
import requests
|
||||
from unittest.mock import MagicMock, patch
|
||||
import threading
|
||||
import time
|
||||
import json
|
||||
from unittest.mock import patch
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
import sys
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
from file_cache import ASTParser
|
||||
|
||||
def test_ast_parser_get_curated_view() -> None:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import asyncio
|
||||
import pytest
|
||||
from events import AsyncEventQueue
|
||||
|
||||
def test_async_event_queue_put_get() -> None:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import os
|
||||
import pytest
|
||||
from typing import Any
|
||||
from datetime import datetime
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import json
|
||||
import requests
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
from models import Ticket, Track, WorkerContext
|
||||
import ai_client
|
||||
import multi_agent_conductor
|
||||
|
||||
# These tests define the expected interface for multi_agent_conductor.py
|
||||
# which will be implemented in the next phase of TDD.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
import conductor_tech_lead
|
||||
import pytest
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
from typing import Any
|
||||
import pytest
|
||||
import os
|
||||
import tomllib
|
||||
import tomli_w
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Ensure project root is in path
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
import ai_client
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
from models import Ticket
|
||||
from dag_engine import TrackDAG, ExecutionEngine
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import Any
|
||||
from unittest.mock import patch, MagicMock
|
||||
import json
|
||||
import subprocess
|
||||
import io
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import unittest
|
||||
from unittest.mock import patch, MagicMock, ANY
|
||||
from unittest.mock import patch, MagicMock
|
||||
import json
|
||||
import subprocess
|
||||
import io
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import pytest
|
||||
import time
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import requests
|
||||
import json
|
||||
from typing import Any
|
||||
from api_hook_client import ApiHookClient
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from typing import Any
|
||||
import pytest
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
@@ -23,7 +22,7 @@ def test_gemini_cli_full_integration(live_gui: Any) -> None:
|
||||
# For CI/testing we prefer mock
|
||||
mock_script = os.path.abspath("tests/mock_gemini_cli.py")
|
||||
cli_cmd = f'"{sys.executable}" "{mock_script}"'
|
||||
print(f"[TEST] Setting current_provider to gemini_cli")
|
||||
print("[TEST] Setting current_provider to gemini_cli")
|
||||
client.set_value("current_provider", "gemini_cli")
|
||||
print(f"[TEST] Setting gcli_path to {cli_cmd}")
|
||||
client.set_value("gcli_path", cli_cmd)
|
||||
@@ -64,7 +63,7 @@ def test_gemini_cli_full_integration(live_gui: Any) -> None:
|
||||
content = entry.get("content", "")
|
||||
success_markers = ["processed the tool results", "Here are the files", "Here are the lines", "Script hello.ps1 created successfully"]
|
||||
if any(marker in content for marker in success_markers):
|
||||
print(f"[TEST] Success! Found final message in history.")
|
||||
print("[TEST] Success! Found final message in history.")
|
||||
found_final = True
|
||||
break
|
||||
if found_final:
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from typing import Any
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from unittest.mock import patch
|
||||
import ai_client
|
||||
|
||||
@patch('ai_client.GeminiCliAdapter')
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
import os
|
||||
import sys
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
from typing import Generator
|
||||
from gui_2 import App
|
||||
import ai_client
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import pytest
|
||||
from typing import Any
|
||||
import time
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
@@ -68,7 +68,7 @@ def test_performance_parity() -> None:
|
||||
# Actually I'll use 0.15 for assertion and log the actual.
|
||||
fps_diff_pct = abs(gui_m["avg_fps"] - gui2_m["avg_fps"]) / gui_m["avg_fps"] if gui_m["avg_fps"] > 0 else 0
|
||||
cpu_diff_pct = abs(gui_m["avg_cpu"] - gui2_m["avg_cpu"]) / gui_m["avg_cpu"] if gui_m["avg_cpu"] > 0 else 0
|
||||
print(f"\n--- Performance Parity Results ---")
|
||||
print("\n--- Performance Parity Results ---")
|
||||
print(f"FPS Diff: {fps_diff_pct*100:.2f}%")
|
||||
print(f"CPU Diff: {cpu_diff_pct*100:.2f}%")
|
||||
# We follow the 5% requirement for FPS
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, AsyncMock
|
||||
import asyncio
|
||||
from unittest.mock import MagicMock, patch
|
||||
from gui_2 import App
|
||||
from events import UserRequestEvent
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ def test_diagnostics_panel_updates(app_instance: Any) -> None:
|
||||
app_instance.perf_monitor.get_metrics = MagicMock(return_value=mock_metrics)
|
||||
with patch('dearpygui.dearpygui.is_item_shown', return_value=True), \
|
||||
patch('dearpygui.dearpygui.set_value') as mock_set_value, \
|
||||
patch('dearpygui.dearpygui.configure_item') as mock_configure_item, \
|
||||
patch('dearpygui.dearpygui.configure_item'), \
|
||||
patch('dearpygui.dearpygui.does_item_exist', return_value=True):
|
||||
# We also need to mock ai_client stats
|
||||
with patch('ai_client.get_history_bleed_stats', return_value={}):
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
from typing import Generator
|
||||
import dearpygui.dearpygui as dpg
|
||||
import gui_legacy
|
||||
from gui_legacy import App
|
||||
import ai_client
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
@@ -36,7 +35,7 @@ def test_idle_performance_requirements(live_gui) -> None:
|
||||
assert frame_time < 33.3, f"Frame time {frame_time}ms exceeds 30fps threshold"
|
||||
|
||||
if valid_ft_count == 0 or total_ft == 0:
|
||||
print(f"[Warning] Frame time is 0.0. This is expected in headless CI/CD environments.")
|
||||
print("[Warning] Frame time is 0.0. This is expected in headless CI/CD environments.")
|
||||
print(f"[Test] Valid frame time samples: {valid_ft_count}/5")
|
||||
# In some CI environments without a real display, frame time might remain 0
|
||||
# but we've verified the hook is returning the dictionary.
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import os
|
||||
import shutil
|
||||
import json
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from gui_2 import App
|
||||
from models import Track, Ticket
|
||||
import project_manager
|
||||
from models import Track
|
||||
|
||||
@pytest.fixture
|
||||
def mock_app() -> App:
|
||||
@@ -73,7 +72,7 @@ def test_add_ticket_logic(mock_app: App):
|
||||
assert t["assigned_to"] == "tier3-worker"
|
||||
|
||||
# Verify form was closed
|
||||
assert mock_app._show_add_ticket_form == False
|
||||
assert not mock_app._show_add_ticket_form
|
||||
# Verify push was called
|
||||
mock_push.assert_called_once()
|
||||
|
||||
@@ -140,7 +139,7 @@ def test_track_discussion_toggle(mock_app: App):
|
||||
|
||||
mock_app._render_discussion_panel()
|
||||
|
||||
assert mock_app._track_discussion_active == True
|
||||
assert mock_app._track_discussion_active
|
||||
mock_flush.assert_called()
|
||||
mock_load.assert_called_with("track-1", ".")
|
||||
assert len(mock_app.disc_entries) == 1
|
||||
@@ -158,7 +157,7 @@ def test_track_discussion_toggle(mock_app: App):
|
||||
|
||||
mock_app._render_discussion_panel()
|
||||
|
||||
assert mock_app._track_discussion_active == False
|
||||
assert not mock_app._track_discussion_active
|
||||
mock_switch.assert_called_with(mock_app.active_discussion)
|
||||
|
||||
def test_push_mma_state_update(mock_app: App):
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import pytest
|
||||
import asyncio
|
||||
from unittest.mock import patch, MagicMock
|
||||
from unittest.mock import patch
|
||||
from gui_2 import App
|
||||
import events
|
||||
|
||||
@pytest.fixture
|
||||
def app_instance():
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from unittest.mock import patch
|
||||
import importlib.util
|
||||
import sys
|
||||
import os
|
||||
@@ -61,7 +61,7 @@ def test_telemetry_panel_updates_correctly(app_instance: Any) -> None:
|
||||
patch('dearpygui.dearpygui.set_value') as mock_set_value, \
|
||||
patch('dearpygui.dearpygui.configure_item') as mock_configure_item, \
|
||||
patch('dearpygui.dearpygui.is_item_shown', return_value=False), \
|
||||
patch('dearpygui.dearpygui.does_item_exist', return_value=True) as mock_does_item_exist:
|
||||
patch('dearpygui.dearpygui.does_item_exist', return_value=True):
|
||||
# 4. Call the method under test
|
||||
app_instance._refresh_api_metrics()
|
||||
# 5. Assert the results
|
||||
@@ -88,11 +88,11 @@ def test_cache_data_display_updates_correctly(app_instance: Any) -> None:
|
||||
expected_text = "Gemini Caches: 5 (12.1 KB)"
|
||||
# 3. Patch dependencies
|
||||
app_instance._last_bleed_update_time = 0 # Force update
|
||||
with patch('ai_client.get_gemini_cache_stats', return_value=mock_cache_stats) as mock_get_cache_stats, \
|
||||
with patch('ai_client.get_gemini_cache_stats', return_value=mock_cache_stats), \
|
||||
patch('dearpygui.dearpygui.set_value') as mock_set_value, \
|
||||
patch('dearpygui.dearpygui.configure_item') as mock_configure_item, \
|
||||
patch('dearpygui.dearpygui.is_item_shown', return_value=False), \
|
||||
patch('dearpygui.dearpygui.does_item_exist', return_value=True) as mock_does_item_exist:
|
||||
patch('dearpygui.dearpygui.does_item_exist', return_value=True):
|
||||
# We also need to mock get_history_bleed_stats as it's called in the same function
|
||||
with patch('ai_client.get_history_bleed_stats', return_value={}):
|
||||
# 4. Call the method under test with payload
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
from typing import Any
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, call
|
||||
from models import Ticket, Track, WorkerContext
|
||||
from unittest.mock import MagicMock, patch
|
||||
from models import Ticket, Track
|
||||
import multi_agent_conductor
|
||||
from multi_agent_conductor import ConductorEngine
|
||||
import ai_client
|
||||
import json
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_headless_verification_full_run(vlogger) -> None:
|
||||
@@ -121,7 +119,7 @@ async def test_headless_verification_error_and_qa_interceptor(vlogger) -> None:
|
||||
vlogger.log_state("T1 Initial Status", "todo", t1.status)
|
||||
|
||||
# Patch engine used in test
|
||||
with patch("multi_agent_conductor.run_worker_lifecycle", wraps=multi_agent_conductor.run_worker_lifecycle) as mock_worker_wrap:
|
||||
with patch("multi_agent_conductor.run_worker_lifecycle", wraps=multi_agent_conductor.run_worker_lifecycle):
|
||||
await engine.run()
|
||||
|
||||
vlogger.log_state("T1 Final Status", "todo", t1.status)
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import pytest
|
||||
import sys
|
||||
import os
|
||||
import tomli_w
|
||||
import tomllib
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
# Ensure project root is in path for imports
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
@@ -23,7 +21,7 @@ def test_aggregate_includes_segregated_history(tmp_path: Path) -> None:
|
||||
when it's segregated into a separate file.
|
||||
"""
|
||||
proj_path = tmp_path / "manual_slop.toml"
|
||||
hist_path = tmp_path / "manual_slop_history.toml"
|
||||
tmp_path / "manual_slop_history.toml"
|
||||
# Setup segregated project configuration
|
||||
proj_data = project_manager.default_project("test-aggregate")
|
||||
proj_data["discussion"]["discussions"]["main"]["history"] = ["@2026-02-24T14:00:00\nUser:\nShow me history"]
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
import requests
|
||||
import json
|
||||
from unittest.mock import patch
|
||||
|
||||
# Ensure project root is in path
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from typing import Generator
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, AsyncMock, ANY
|
||||
from unittest.mock import patch, ANY
|
||||
import asyncio
|
||||
import time
|
||||
from gui_2 import App
|
||||
from events import UserRequestEvent
|
||||
import ai_client
|
||||
|
||||
@pytest.fixture
|
||||
def mock_app() -> Generator[App, None, None]:
|
||||
@@ -94,7 +93,7 @@ def test_user_request_error_handling(mock_app: App) -> None:
|
||||
"""
|
||||
app = mock_app
|
||||
with (
|
||||
patch('ai_client.send', side_effect=Exception("API Failure")) as mock_send,
|
||||
patch('ai_client.send', side_effect=Exception("API Failure")),
|
||||
patch('ai_client.set_custom_system_prompt'),
|
||||
patch('ai_client.set_model_params'),
|
||||
patch('ai_client.set_agent_tools')
|
||||
|
||||
@@ -46,12 +46,10 @@ def test_full_live_workflow(live_gui) -> None:
|
||||
client.set_value("ai_input", "Hello! This is an automated test. Just say 'Acknowledged'.")
|
||||
client.click("btn_gen_send")
|
||||
time.sleep(2) # Verify thinking indicator appears (might be brief)
|
||||
thinking_seen = False
|
||||
print("\nPolling for thinking indicator...")
|
||||
for i in range(40):
|
||||
state = client.get_indicator_state("thinking_indicator")
|
||||
if state.get('shown'):
|
||||
thinking_seen = True
|
||||
print(f"Thinking indicator seen at poll {i}")
|
||||
break
|
||||
time.sleep(0.5)
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# We can safely import gui_2 if we don't instantiate App without mocking its threads
|
||||
import gui_2
|
||||
from gui_2 import App
|
||||
|
||||
@pytest.fixture
|
||||
@@ -69,8 +67,8 @@ def test_render_log_management_logic(app_instance: App) -> None:
|
||||
patch("gui_2.imgui.begin") as mock_begin, \
|
||||
patch("gui_2.imgui.begin_table") as mock_begin_table, \
|
||||
patch("gui_2.imgui.text") as mock_text, \
|
||||
patch("gui_2.imgui.end_table") as mock_end_table, \
|
||||
patch("gui_2.imgui.end") as mock_end, \
|
||||
patch("gui_2.imgui.end_table"), \
|
||||
patch("gui_2.imgui.end"), \
|
||||
patch("gui_2.imgui.push_style_color"), \
|
||||
patch("gui_2.imgui.pop_style_color"), \
|
||||
patch("gui_2.imgui.table_setup_column"), \
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from typing import Tuple
|
||||
import os
|
||||
import shutil
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
import os
|
||||
import shutil
|
||||
import pytest
|
||||
from typing import Any
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timedelta
|
||||
from unittest.mock import patch
|
||||
import session_logger
|
||||
import tomllib
|
||||
from log_registry import LogRegistry
|
||||
from log_pruner import LogPruner
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import pytest
|
||||
import sys
|
||||
import os
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
|
||||
# Ensure project root is in path
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from __future__ import annotations
|
||||
import math
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock, call
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from gui_2 import App
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from __future__ import annotations
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from gui_2 import App
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
from models import Ticket, Track, WorkerContext
|
||||
|
||||
def test_ticket_instantiation() -> None:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import pytest
|
||||
import json
|
||||
from unittest.mock import patch, MagicMock
|
||||
import threading
|
||||
from unittest.mock import patch
|
||||
import time
|
||||
from gui_2 import App
|
||||
|
||||
@@ -56,8 +55,7 @@ def test_cb_plan_epic_launches_thread(app_instance: App) -> None:
|
||||
with (
|
||||
patch('orchestrator_pm.get_track_history_summary', return_value="History summary") as mock_get_history,
|
||||
patch('orchestrator_pm.generate_tracks', return_value=mock_tracks) as mock_gen_tracks,
|
||||
patch('aggregate.build_file_items', return_value=[]) as mock_build_files
|
||||
):
|
||||
patch('aggregate.build_file_items', return_value=[])):
|
||||
# We need to mock project_manager.flat_config and project_manager.load_project
|
||||
with (
|
||||
patch('project_manager.load_project', return_value={}),
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
from mma_prompts import PROMPTS
|
||||
|
||||
def test_tier1_epic_init_constraints() -> None:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from typing import Generator
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
import asyncio
|
||||
from gui_2 import App
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import subprocess
|
||||
import json
|
||||
import pytest
|
||||
|
||||
|
||||
def get_message_content(stdout):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
import json
|
||||
from typing import Any
|
||||
import orchestrator_pm
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest
|
||||
from typing import Any
|
||||
from unittest.mock import patch, MagicMock
|
||||
from unittest.mock import patch
|
||||
import json
|
||||
import orchestrator_pm
|
||||
import mma_prompts
|
||||
@@ -32,7 +32,7 @@ class TestOrchestratorPM(unittest.TestCase):
|
||||
# Verify summarize call
|
||||
mock_summarize.assert_called_once_with(file_items)
|
||||
# Verify ai_client.send call
|
||||
expected_system_prompt = mma_prompts.PROMPTS['tier1_epic_init']
|
||||
mma_prompts.PROMPTS['tier1_epic_init']
|
||||
mock_send.assert_called_once()
|
||||
args, kwargs = mock_send.call_args
|
||||
self.assertEqual(kwargs['md_content'], "")
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import unittest
|
||||
from unittest.mock import patch, MagicMock
|
||||
import os
|
||||
import shutil
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, AsyncMock
|
||||
import asyncio
|
||||
import json
|
||||
import multi_agent_conductor
|
||||
from multi_agent_conductor import ConductorEngine, run_worker_lifecycle
|
||||
from models import Ticket, Track, WorkerContext
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Generator
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
import ai_client
|
||||
from gui_2 import App
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import pytest
|
||||
from typing import Any
|
||||
import json
|
||||
from pathlib import Path
|
||||
from project_manager import get_all_tracks, save_track_state
|
||||
from models import TrackState, Metadata, Ticket
|
||||
from datetime import datetime
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
import pytest
|
||||
import shutil
|
||||
import os
|
||||
import tomllib
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from unittest.mock import patch
|
||||
from typing import Generator
|
||||
import session_logger
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
import os
|
||||
import sys
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user