feat(logs): Implement file-based offloading for scripts and tool outputs
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
import threading
|
||||
import time
|
||||
import copy
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
@@ -1062,12 +1063,31 @@ class AppController:
|
||||
sys.stderr.flush()
|
||||
self.event_queue.put("response", {"text": f"ERROR: {e}", "status": "error", "role": "System"})
|
||||
|
||||
def _offload_entry_payload(self, entry: Dict[str, Any]) -> Dict[str, Any]:
|
||||
optimized = copy.deepcopy(entry)
|
||||
kind = optimized.get("kind")
|
||||
payload = optimized.get("payload", {})
|
||||
if kind == "tool_result" and "output" in payload:
|
||||
output = payload["output"]
|
||||
ref_path = session_logger.log_tool_output(output)
|
||||
if ref_path:
|
||||
filename = Path(ref_path).name
|
||||
payload["output"] = f"[REF:{filename}]"
|
||||
if kind == "tool_call" and "script" in payload:
|
||||
script = payload["script"]
|
||||
ref_path = session_logger.log_tool_call(script, "LOG_ONLY", None)
|
||||
if ref_path:
|
||||
filename = Path(ref_path).name
|
||||
payload["script"] = f"[REF:{filename}]"
|
||||
return optimized
|
||||
|
||||
def _on_ai_stream(self, text: str) -> None:
|
||||
"""Handles streaming text from the AI."""
|
||||
self.event_queue.put("response", {"text": text, "status": "streaming...", "role": "AI"})
|
||||
|
||||
def _on_comms_entry(self, entry: Dict[str, Any]) -> None:
|
||||
session_logger.log_comms(entry)
|
||||
optimized_entry = self._offload_entry_payload(entry)
|
||||
session_logger.log_comms(optimized_entry)
|
||||
entry["local_ts"] = time.time()
|
||||
kind = entry.get("kind")
|
||||
payload = entry.get("payload", {})
|
||||
@@ -1128,6 +1148,7 @@ class AppController:
|
||||
|
||||
def _on_tool_log(self, script: str, result: str) -> None:
|
||||
session_logger.log_tool_call(script, result, None)
|
||||
session_logger.log_tool_output(result)
|
||||
source_tier = ai_client.get_current_tier()
|
||||
with self._pending_tool_calls_lock:
|
||||
self._pending_tool_calls.append({"script": script, "result": result, "ts": time.time(), "source_tier": source_tier})
|
||||
|
||||
@@ -29,7 +29,9 @@ _ts: str = "" # session timestamp string e.g. "20260301_142233"
|
||||
_session_id: str = "" # YYYYMMDD_HHMMSS[_Label]
|
||||
_session_dir: Optional[Path] = None # Path to the sub-directory for this session
|
||||
_seq: int = 0 # monotonic counter for script files this session
|
||||
_output_seq: int = 0 # monotonic counter for output files this session
|
||||
_seq_lock: threading.Lock = threading.Lock()
|
||||
_output_seq_lock: threading.Lock = threading.Lock()
|
||||
|
||||
_comms_fh: Optional[TextIO] = None # file handle: logs/sessions/<session_id>/comms.log
|
||||
_tool_fh: Optional[TextIO] = None # file handle: logs/sessions/<session_id>/toolcalls.log
|
||||
@@ -44,7 +46,7 @@ def open_session(label: Optional[str] = None) -> None:
|
||||
Called once at GUI startup. Creates the log directories if needed and
|
||||
opens the log files for this session within a sub-directory.
|
||||
"""
|
||||
global _ts, _session_id, _session_dir, _comms_fh, _tool_fh, _api_fh, _cli_fh, _seq
|
||||
global _ts, _session_id, _session_dir, _comms_fh, _tool_fh, _api_fh, _cli_fh, _seq, _output_seq
|
||||
if _comms_fh is not None:
|
||||
return
|
||||
|
||||
@@ -56,9 +58,13 @@ def open_session(label: Optional[str] = None) -> None:
|
||||
|
||||
_session_dir = paths.get_logs_dir() / _session_id
|
||||
_session_dir.mkdir(parents=True, exist_ok=True)
|
||||
(_session_dir / "scripts").mkdir(exist_ok=True)
|
||||
(_session_dir / "outputs").mkdir(exist_ok=True)
|
||||
|
||||
paths.get_scripts_dir().mkdir(parents=True, exist_ok=True)
|
||||
|
||||
_seq = 0
|
||||
_output_seq = 0
|
||||
_comms_fh = open(_session_dir / "comms.log", "w", encoding="utf-8", buffering=1)
|
||||
_tool_fh = open(_session_dir / "toolcalls.log", "w", encoding="utf-8", buffering=1)
|
||||
_api_fh = open(_session_dir / "apihooks.log", "w", encoding="utf-8", buffering=1)
|
||||
@@ -132,7 +138,7 @@ def log_comms(entry: dict[str, Any]) -> None:
|
||||
def log_tool_call(script: str, result: str, script_path: Optional[str]) -> Optional[str]:
|
||||
"""
|
||||
Append a tool-call record to the toolcalls log and write the PS1 script to
|
||||
scripts/generated/. Returns the path of the written script file.
|
||||
the session's scripts directory. Returns the path of the written script file.
|
||||
"""
|
||||
global _seq
|
||||
if _tool_fh is None:
|
||||
@@ -143,8 +149,12 @@ def log_tool_call(script: str, result: str, script_path: Optional[str]) -> Optio
|
||||
seq = _seq
|
||||
|
||||
ts_entry = datetime.datetime.now().strftime("%H:%M:%S")
|
||||
ps1_name = f"{_ts}_{seq:04d}.ps1"
|
||||
ps1_path: Optional[Path] = paths.get_scripts_dir() / ps1_name
|
||||
ps1_name = f"script_{seq:04d}.ps1"
|
||||
|
||||
if _session_dir:
|
||||
ps1_path: Optional[Path] = _session_dir / "scripts" / ps1_name
|
||||
else:
|
||||
ps1_path = paths.get_scripts_dir() / f"{_ts}_{seq:04d}.ps1"
|
||||
|
||||
try:
|
||||
if ps1_path:
|
||||
@@ -167,6 +177,28 @@ def log_tool_call(script: str, result: str, script_path: Optional[str]) -> Optio
|
||||
|
||||
return str(ps1_path) if ps1_path else None
|
||||
|
||||
def log_tool_output(content: str) -> Optional[str]:
|
||||
"""
|
||||
Save tool output content to a unique file in the session's outputs directory.
|
||||
Returns the path of the written file.
|
||||
"""
|
||||
global _output_seq
|
||||
if _session_dir is None:
|
||||
return None
|
||||
|
||||
with _output_seq_lock:
|
||||
_output_seq += 1
|
||||
seq = _output_seq
|
||||
|
||||
out_name = f"output_{seq:04d}.txt"
|
||||
out_path = _session_dir / "outputs" / out_name
|
||||
|
||||
try:
|
||||
out_path.write_text(content, encoding="utf-8")
|
||||
return str(out_path)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def log_cli_call(command: str, stdin_content: Optional[str], stdout_content: Optional[str], stderr_content: Optional[str], latency: float) -> None:
|
||||
"""Log details of a CLI subprocess execution."""
|
||||
if _cli_fh is None:
|
||||
|
||||
110
tests/test_app_controller_offloading.py
Normal file
110
tests/test_app_controller_offloading.py
Normal file
@@ -0,0 +1,110 @@
|
||||
import pytest
|
||||
import json
|
||||
import time
|
||||
import copy
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
from src.app_controller import AppController
|
||||
from src import session_logger, paths, ai_client, project_manager
|
||||
|
||||
@pytest.fixture
|
||||
def tmp_session_dir(tmp_path, monkeypatch):
|
||||
"""Set up a temporary session directory for session_logger."""
|
||||
logs_dir = tmp_path / "logs"
|
||||
scripts_dir = tmp_path / "scripts"
|
||||
logs_dir.mkdir()
|
||||
scripts_dir.mkdir()
|
||||
|
||||
monkeypatch.setenv("SLOP_LOGS_DIR", str(logs_dir))
|
||||
monkeypatch.setenv("SLOP_SCRIPTS_DIR", str(scripts_dir))
|
||||
paths.reset_resolved()
|
||||
|
||||
# Ensure session_logger is clean
|
||||
with patch("src.session_logger._comms_fh", None):
|
||||
session_logger.open_session("test_offloading")
|
||||
yield logs_dir / session_logger._session_id
|
||||
session_logger.close_session()
|
||||
|
||||
@pytest.fixture
|
||||
def app_controller(tmp_session_dir):
|
||||
"""Create an AppController instance for testing."""
|
||||
with patch("src.app_controller.performance_monitor.PerformanceMonitor"):
|
||||
ctrl = AppController()
|
||||
# Minimal setup to avoid complex initialization
|
||||
ctrl.ui_auto_add_history = True
|
||||
return ctrl
|
||||
|
||||
def test_on_comms_entry_tool_result_offloading(app_controller, tmp_session_dir):
|
||||
"""
|
||||
Test that _on_comms_entry offloads tool_result output to a separate file.
|
||||
"""
|
||||
output_content = "This is a large tool output that should be offloaded."
|
||||
entry = {
|
||||
"kind": "tool_result",
|
||||
"payload": {
|
||||
"output": output_content
|
||||
},
|
||||
"ts": "12:00:00"
|
||||
}
|
||||
|
||||
# Track calls to session_logger.log_comms
|
||||
with patch("src.session_logger.log_comms") as mock_log_comms:
|
||||
app_controller._on_comms_entry(entry)
|
||||
|
||||
# 1. Verify log_comms was called with an optimized entry
|
||||
assert mock_log_comms.called
|
||||
optimized_entry = mock_log_comms.call_args[0][0]
|
||||
assert optimized_entry["kind"] == "tool_result"
|
||||
assert "output" in optimized_entry["payload"]
|
||||
# The output should be a reference like [REF:output_0001.txt]
|
||||
ref_text = optimized_entry["payload"]["output"]
|
||||
assert ref_text.startswith("[REF:output_")
|
||||
assert ref_text.endswith(".txt]")
|
||||
|
||||
# 2. Verify the original entry was NOT modified in terms of its payload content
|
||||
# Wait, the tool uses deepcopy so it should be fine.
|
||||
assert entry["payload"]["output"] == output_content
|
||||
|
||||
# 3. Verify the offloaded file exists and contains the correct content
|
||||
ref_filename = ref_text[5:-1] # Strip [REF: and ]
|
||||
offloaded_path = tmp_session_dir / "outputs" / ref_filename
|
||||
assert offloaded_path.exists()
|
||||
assert offloaded_path.read_text(encoding="utf-8") == output_content
|
||||
|
||||
# 4. Verify that effects on internal state (like history adds) use the original output
|
||||
# _on_comms_entry appends to _pending_history_adds
|
||||
with app_controller._pending_history_adds_lock:
|
||||
assert len(app_controller._pending_history_adds) > 0
|
||||
history_entry = next(e for e in app_controller._pending_history_adds if e["role"] == "Tool")
|
||||
assert output_content in history_entry["content"]
|
||||
assert "[TOOL RESULT]" in history_entry["content"]
|
||||
|
||||
def test_on_tool_log_offloading(app_controller, tmp_session_dir):
|
||||
"""
|
||||
Test that _on_tool_log calls session_logger.log_tool_call and log_tool_output.
|
||||
"""
|
||||
script = "Get-Process"
|
||||
result = "Process list..."
|
||||
|
||||
with patch("src.ai_client.get_current_tier", return_value="Tier 3"):
|
||||
app_controller._on_tool_log(script, result)
|
||||
|
||||
# Verify files were created in session directory
|
||||
scripts_dir = tmp_session_dir / "scripts"
|
||||
outputs_dir = tmp_session_dir / "outputs"
|
||||
|
||||
script_files = list(scripts_dir.glob("script_*.ps1"))
|
||||
assert len(script_files) == 1
|
||||
assert script_files[0].read_text(encoding="utf-8") == script
|
||||
|
||||
output_files = list(outputs_dir.glob("output_*.txt"))
|
||||
# We expect at least one output file for the result
|
||||
assert len(output_files) >= 1
|
||||
assert any(f.read_text(encoding="utf-8") == result for f in output_files)
|
||||
|
||||
# Verify AppController internal state
|
||||
with app_controller._pending_tool_calls_lock:
|
||||
assert len(app_controller._pending_tool_calls) == 1
|
||||
assert app_controller._pending_tool_calls[0]["script"] == script
|
||||
assert app_controller._pending_tool_calls[0]["result"] == result
|
||||
assert app_controller._pending_tool_calls[0]["source_tier"] == "Tier 3"
|
||||
95
tests/test_session_logger_optimization.py
Normal file
95
tests/test_session_logger_optimization.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
from typing import Generator
|
||||
from src import session_logger
|
||||
from src import paths
|
||||
|
||||
@pytest.fixture
|
||||
def temp_session_setup(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Generator[tuple[Path, Path], None, None]:
|
||||
# Ensure session is closed and state is reset
|
||||
session_logger.close_session()
|
||||
monkeypatch.setattr(session_logger, "_comms_fh", None)
|
||||
monkeypatch.setattr(session_logger, "_session_dir", None)
|
||||
monkeypatch.setattr(session_logger, "_seq", 0)
|
||||
monkeypatch.setattr(session_logger, "_output_seq", 0)
|
||||
|
||||
log_dir = tmp_path / "logs"
|
||||
scripts_dir = tmp_path / "scripts" / "generated"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
scripts_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
monkeypatch.setattr(paths, "get_logs_dir", lambda: log_dir)
|
||||
monkeypatch.setattr(paths, "get_scripts_dir", lambda: scripts_dir)
|
||||
|
||||
yield log_dir, scripts_dir
|
||||
|
||||
# Cleanup
|
||||
session_logger.close_session()
|
||||
|
||||
def test_session_directory_and_subdirectories_creation(temp_session_setup: tuple[Path, Path]) -> None:
|
||||
log_dir, _ = temp_session_setup
|
||||
session_logger.open_session(label="opt-test")
|
||||
|
||||
# Find the session directory
|
||||
session_dirs = [d for d in log_dir.iterdir() if d.is_dir()]
|
||||
assert len(session_dirs) == 1
|
||||
session_dir = session_dirs[0]
|
||||
|
||||
assert (session_dir / "scripts").exists()
|
||||
assert (session_dir / "outputs").exists()
|
||||
assert (session_dir / "comms.log").exists()
|
||||
assert (session_dir / "toolcalls.log").exists()
|
||||
|
||||
def test_log_tool_call_saves_in_session_scripts(temp_session_setup: tuple[Path, Path]) -> None:
|
||||
log_dir, _ = temp_session_setup
|
||||
session_logger.open_session(label="tool-call-test")
|
||||
|
||||
# Find the session directory
|
||||
session_dir = next(d for d in log_dir.iterdir() if d.is_dir())
|
||||
scripts_subdir = session_dir / "scripts"
|
||||
|
||||
script_content = "Write-Host 'Hello from test'"
|
||||
result_content = "Success"
|
||||
|
||||
# Call log_tool_call with script_path=None
|
||||
ps1_path_str = session_logger.log_tool_call(script_content, result_content, None)
|
||||
assert ps1_path_str is not None
|
||||
|
||||
ps1_path = Path(ps1_path_str)
|
||||
assert ps1_path.parent == scripts_subdir
|
||||
assert ps1_path.name == "script_0001.ps1"
|
||||
assert ps1_path.read_text(encoding="utf-8") == script_content
|
||||
|
||||
# Verify second call increments sequence
|
||||
ps1_path_str_2 = session_logger.log_tool_call("Get-Date", "2026-03-08", None)
|
||||
assert ps1_path_str_2 is not None
|
||||
assert Path(ps1_path_str_2).name == "script_0002.ps1"
|
||||
|
||||
def test_log_tool_output_saves_in_session_outputs(temp_session_setup: tuple[Path, Path]) -> None:
|
||||
log_dir, _ = temp_session_setup
|
||||
session_logger.open_session(label="output-test")
|
||||
|
||||
# Find the session directory
|
||||
session_dir = next(d for d in log_dir.iterdir() if d.is_dir())
|
||||
outputs_subdir = session_dir / "outputs"
|
||||
|
||||
output_content = "This is some tool output content."
|
||||
|
||||
# Call log_tool_output
|
||||
output_path_str = session_logger.log_tool_output(output_content)
|
||||
assert output_path_str is not None
|
||||
|
||||
output_path = Path(output_path_str)
|
||||
assert output_path.parent == outputs_subdir
|
||||
assert output_path.name == "output_0001.txt"
|
||||
assert output_path.read_text(encoding="utf-8") == output_content
|
||||
|
||||
# Verify second call increments sequence
|
||||
output_path_str_2 = session_logger.log_tool_output("More content")
|
||||
assert output_path_str_2 is not None
|
||||
assert Path(output_path_str_2).name == "output_0002.txt"
|
||||
|
||||
def test_log_tool_output_returns_none_if_no_session(temp_session_setup: tuple[Path, Path]) -> None:
|
||||
# We don't call open_session here
|
||||
output_path_str = session_logger.log_tool_output("Should not save")
|
||||
assert output_path_str is None
|
||||
Reference in New Issue
Block a user