Applied 236 return type annotations to functions with no return values across 100+ files (core modules, tests, scripts, simulations). Added Phase 4 to python_style_refactor track for remaining 597 items (untyped params, vars, and functions with return values). Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
180 lines
7.1 KiB
Python
180 lines
7.1 KiB
Python
import pytest
|
|
from unittest.mock import MagicMock, patch
|
|
import subprocess
|
|
from shell_runner import run_powershell
|
|
|
|
def test_run_powershell_qa_callback_on_failure() -> None:
|
|
"""
|
|
Test that qa_callback is called when a powershell command fails (non-zero exit code).
|
|
The result of the callback should be appended to the output.
|
|
"""
|
|
script = "Write-Error 'something went wrong'; exit 1"
|
|
base_dir = "."
|
|
# Mocking subprocess.run to simulate failure
|
|
mock_result = MagicMock()
|
|
mock_result.stdout = ""
|
|
mock_result.stderr = "something went wrong"
|
|
mock_result.returncode = 1
|
|
qa_callback = MagicMock(return_value="QA ANALYSIS: This looks like a syntax error.")
|
|
with patch("subprocess.run", return_value=mock_result), \
|
|
patch("shutil.which", return_value="powershell.exe"):
|
|
# We expect run_powershell to accept qa_callback
|
|
output = run_powershell(script, base_dir, qa_callback=qa_callback)
|
|
# Verify callback was called with stderr
|
|
qa_callback.assert_called_once_with("something went wrong")
|
|
# Verify output contains the callback result
|
|
assert "QA ANALYSIS: This looks like a syntax error." in output
|
|
assert "STDERR:\nsomething went wrong" in output
|
|
assert "EXIT CODE: 1" in output
|
|
|
|
def test_run_powershell_qa_callback_on_stderr_only() -> None:
|
|
"""
|
|
Test that qa_callback is called when a command has stderr even if exit code is 0.
|
|
"""
|
|
script = "Write-Error 'non-fatal error'"
|
|
base_dir = "."
|
|
mock_result = MagicMock()
|
|
mock_result.stdout = "Success"
|
|
mock_result.stderr = "non-fatal error"
|
|
mock_result.returncode = 0
|
|
qa_callback = MagicMock(return_value="QA ANALYSIS: Ignorable warning.")
|
|
with patch("subprocess.run", return_value=mock_result), \
|
|
patch("shutil.which", return_value="powershell.exe"):
|
|
output = run_powershell(script, base_dir, qa_callback=qa_callback)
|
|
qa_callback.assert_called_once_with("non-fatal error")
|
|
assert "QA ANALYSIS: Ignorable warning." in output
|
|
assert "STDOUT:\nSuccess" in output
|
|
|
|
def test_run_powershell_no_qa_callback_on_success() -> None:
|
|
"""
|
|
Test that qa_callback is NOT called when the command succeeds without stderr.
|
|
"""
|
|
script = "Write-Output 'All good'"
|
|
base_dir = "."
|
|
mock_result = MagicMock()
|
|
mock_result.stdout = "All good"
|
|
mock_result.stderr = ""
|
|
mock_result.returncode = 0
|
|
qa_callback = MagicMock()
|
|
with patch("subprocess.run", return_value=mock_result), \
|
|
patch("shutil.which", return_value="powershell.exe"):
|
|
output = run_powershell(script, base_dir, qa_callback=qa_callback)
|
|
qa_callback.assert_not_called()
|
|
assert "STDOUT:\nAll good" in output
|
|
assert "EXIT CODE: 0" in output
|
|
assert "QA ANALYSIS" not in output
|
|
|
|
def test_run_powershell_optional_qa_callback() -> None:
|
|
"""
|
|
Test that run_powershell still works without providing a qa_callback.
|
|
"""
|
|
script = "Write-Error 'error'"
|
|
base_dir = "."
|
|
mock_result = MagicMock()
|
|
mock_result.stdout = ""
|
|
mock_result.stderr = "error"
|
|
mock_result.returncode = 1
|
|
with patch("subprocess.run", return_value=mock_result), \
|
|
patch("shutil.which", return_value="powershell.exe"):
|
|
# Should not raise TypeError even if qa_callback is not provided
|
|
output = run_powershell(script, base_dir)
|
|
assert "STDERR:\nerror" in output
|
|
assert "EXIT CODE: 1" in output
|
|
|
|
def test_end_to_end_tier4_integration() -> None:
|
|
"""
|
|
Verifies that shell_runner.run_powershell correctly uses ai_client.run_tier4_analysis.
|
|
"""
|
|
import ai_client
|
|
script = "Invoke-Item non_existent_file"
|
|
base_dir = "."
|
|
stderr_content = "Invoke-Item : Cannot find path 'C:\\non_existent_file' because it does not exist."
|
|
mock_result = MagicMock()
|
|
mock_result.stdout = ""
|
|
mock_result.stderr = stderr_content
|
|
mock_result.returncode = 1
|
|
expected_analysis = "Path does not exist. Verify the file path and ensure the file is present before invoking."
|
|
with patch("subprocess.run", return_value=mock_result), \
|
|
patch("shutil.which", return_value="powershell.exe"), \
|
|
patch("ai_client.run_tier4_analysis", return_value=expected_analysis) as mock_analysis:
|
|
output = run_powershell(script, base_dir, qa_callback=ai_client.run_tier4_analysis)
|
|
mock_analysis.assert_called_once_with(stderr_content)
|
|
assert f"QA ANALYSIS:\n{expected_analysis}" in output
|
|
|
|
def test_ai_client_passes_qa_callback() -> None:
|
|
"""
|
|
Verifies that ai_client.send passes the qa_callback down to the provider function.
|
|
"""
|
|
import ai_client
|
|
# Mocking a provider function to avoid actual API calls
|
|
mock_send_gemini = MagicMock(return_value="AI Response")
|
|
qa_callback = MagicMock(return_value="QA Analysis")
|
|
# Force provider to gemini and mock its send function
|
|
with patch("ai_client._provider", "gemini"), \
|
|
patch("ai_client._send_gemini", mock_send_gemini):
|
|
ai_client.send(
|
|
md_content="Context",
|
|
user_message="Hello",
|
|
qa_callback=qa_callback
|
|
)
|
|
# Verify provider received the qa_callback
|
|
mock_send_gemini.assert_called_once()
|
|
args, kwargs = mock_send_gemini.call_args
|
|
# qa_callback is the 7th positional argument in _send_gemini
|
|
assert args[6] == qa_callback
|
|
|
|
def test_gemini_provider_passes_qa_callback_to_run_script() -> None:
|
|
"""
|
|
Verifies that _send_gemini passes the qa_callback to _run_script.
|
|
"""
|
|
import ai_client
|
|
# Mock Gemini chat and client
|
|
mock_client = MagicMock()
|
|
mock_chat = MagicMock()
|
|
# Simulate a tool call response
|
|
mock_part = MagicMock()
|
|
mock_part.text = ""
|
|
mock_part.function_call = MagicMock()
|
|
mock_part.function_call.name = "run_powershell"
|
|
mock_part.function_call.args = {"script": "dir"}
|
|
mock_candidate = MagicMock()
|
|
mock_candidate.content.parts = [mock_part]
|
|
mock_candidate.finish_reason.name = "STOP"
|
|
mock_response = MagicMock()
|
|
mock_response.candidates = [mock_candidate]
|
|
mock_response.usage_metadata.prompt_token_count = 10
|
|
mock_response.usage_metadata.candidates_token_count = 5
|
|
# Second call returns a stop response to break the loop
|
|
mock_stop_part = MagicMock()
|
|
mock_stop_part.text = "Done"
|
|
mock_stop_part.function_call = None
|
|
mock_stop_candidate = MagicMock()
|
|
mock_stop_candidate.content.parts = [mock_stop_part]
|
|
mock_stop_candidate.finish_reason.name = "STOP"
|
|
mock_stop_response = MagicMock()
|
|
mock_stop_response.candidates = [mock_stop_candidate]
|
|
mock_stop_response.usage_metadata.prompt_token_count = 5
|
|
mock_stop_response.usage_metadata.candidates_token_count = 2
|
|
mock_chat.send_message.side_effect = [mock_response, mock_stop_response]
|
|
# Mock count_tokens to avoid chat creation failure
|
|
mock_count_resp = MagicMock()
|
|
mock_count_resp.total_tokens = 100
|
|
mock_client.models.count_tokens.return_value = mock_count_resp
|
|
qa_callback = MagicMock()
|
|
# Set global state for the test
|
|
with patch("ai_client._gemini_client", mock_client), \
|
|
patch("ai_client._gemini_chat", None), \
|
|
patch("ai_client._ensure_gemini_client"), \
|
|
patch("ai_client._run_script", return_value="output") as mock_run_script, \
|
|
patch("ai_client._get_gemini_history_list", return_value=[]):
|
|
# Ensure chats.create returns our mock_chat
|
|
mock_client.chats.create.return_value = mock_chat
|
|
ai_client._send_gemini(
|
|
md_content="Context",
|
|
user_message="Run dir",
|
|
base_dir=".",
|
|
qa_callback=qa_callback
|
|
)
|
|
# Verify _run_script received the qa_callback
|
|
mock_run_script.assert_called_once_with("dir", ".", qa_callback)
|