fix(simulation): Resolve simulation timeouts and stabilize history checks

This commit is contained in:
2026-03-03 00:56:35 -05:00
parent aed1f9a97e
commit dbd955a45b
4 changed files with 145 additions and 29 deletions

View File

@@ -1697,6 +1697,50 @@ def run_tier4_analysis(stderr: str) -> str:
def get_token_stats(md_content: str) -> dict[str, Any]:
"""
Returns token usage statistics for the given markdown content.
Uses the current provider's count_tokens if available, else estimates.
"""
global _provider, _gemini_client, _model, _CHARS_PER_TOKEN
total_tokens = 0
# 1. Attempt provider-specific counting
if _provider == "gemini":
try:
_ensure_gemini_client()
if _gemini_client:
resp = _gemini_client.models.count_tokens(model=_model, contents=md_content)
total_tokens = resp.total_tokens
except Exception:
pass # Fallback to estimation
elif _provider == "gemini_cli":
try:
_ensure_gemini_client()
if _gemini_client:
resp = _gemini_client.models.count_tokens(model=_model, contents=md_content)
total_tokens = resp.total_tokens
except Exception:
pass
# 2. Fallback to estimation
if total_tokens == 0:
total_tokens = max(1, int(len(md_content) / _CHARS_PER_TOKEN))
# Budget limits
limit = _GEMINI_MAX_INPUT_TOKENS if _provider in ["gemini", "gemini_cli"] else _ANTHROPIC_MAX_PROMPT_TOKENS
if _provider == "deepseek":
limit = 64000
pct = (total_tokens / limit * 100) if limit > 0 else 0
stats = {
"total_tokens": total_tokens,
"current": total_tokens,
"limit": limit,
"percentage": pct
}
return _add_bleed_derived(stats, sys_tok=total_tokens)
def send(
md_content: str,
user_message: str,