checkpoint: massive refactor
This commit is contained in:
@@ -10,43 +10,38 @@ import ai_client
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_ai_client():
|
||||
ai_client.reset_session()
|
||||
ai_client.set_provider("gemini_cli", "gemini-2.5-flash")
|
||||
ai_client.confirm_and_run_callback = lambda script, base_dir: "Mocked execution"
|
||||
ai_client.comms_log_callback = lambda entry: None
|
||||
ai_client.tool_log_callback = lambda script, result: None
|
||||
yield
|
||||
ai_client.reset_session()
|
||||
ai_client.set_provider("gemini_cli", "gemini-2.5-flash")
|
||||
ai_client.confirm_and_run_callback = lambda script, base_dir: "Mocked execution"
|
||||
ai_client.comms_log_callback = lambda entry: None
|
||||
ai_client.tool_log_callback = lambda script, result: None
|
||||
yield
|
||||
|
||||
@patch('ai_client.GeminiCliAdapter')
|
||||
@patch('ai_client._get_combined_system_prompt')
|
||||
def test_send_invokes_adapter_send(mock_prompt, mock_adapter_class):
|
||||
mock_prompt.return_value = "Mocked Prompt"
|
||||
mock_instance = mock_adapter_class.return_value
|
||||
mock_instance.send.return_value = {"text": "Done", "tool_calls": []}
|
||||
mock_instance.last_usage = {"input_tokens": 10}
|
||||
mock_instance.last_latency = 0.1
|
||||
mock_instance.session_id = None
|
||||
|
||||
ai_client.send("context", "message", discussion_history="hist")
|
||||
|
||||
expected_payload = "[DISCUSSION HISTORY]\n\nhist\n\n---\n\nmessage"
|
||||
assert mock_instance.send.called
|
||||
args, kwargs = mock_instance.send.call_args
|
||||
assert args[0] == expected_payload
|
||||
assert kwargs['system_instruction'] == "Mocked Prompt\n\n<context>\ncontext\n</context>"
|
||||
mock_prompt.return_value = "Mocked Prompt"
|
||||
mock_instance = mock_adapter_class.return_value
|
||||
mock_instance.send.return_value = {"text": "Done", "tool_calls": []}
|
||||
mock_instance.last_usage = {"input_tokens": 10}
|
||||
mock_instance.last_latency = 0.1
|
||||
mock_instance.session_id = None
|
||||
ai_client.send("context", "message", discussion_history="hist")
|
||||
expected_payload = "[DISCUSSION HISTORY]\n\nhist\n\n---\n\nmessage"
|
||||
assert mock_instance.send.called
|
||||
args, kwargs = mock_instance.send.call_args
|
||||
assert args[0] == expected_payload
|
||||
assert kwargs['system_instruction'] == "Mocked Prompt\n\n<context>\ncontext\n</context>"
|
||||
|
||||
@patch('ai_client.GeminiCliAdapter')
|
||||
def test_get_history_bleed_stats(mock_adapter_class):
|
||||
mock_instance = mock_adapter_class.return_value
|
||||
mock_instance.send.return_value = {"text": "txt", "tool_calls": []}
|
||||
mock_instance.last_usage = {"input_tokens": 1500}
|
||||
mock_instance.last_latency = 0.5
|
||||
mock_instance.session_id = "sess"
|
||||
|
||||
# Initialize by sending a message
|
||||
ai_client.send("context", "msg")
|
||||
|
||||
stats = ai_client.get_history_bleed_stats()
|
||||
|
||||
assert stats["provider"] == "gemini_cli"
|
||||
assert stats["current"] == 1500
|
||||
mock_instance = mock_adapter_class.return_value
|
||||
mock_instance.send.return_value = {"text": "txt", "tool_calls": []}
|
||||
mock_instance.last_usage = {"input_tokens": 1500}
|
||||
mock_instance.last_latency = 0.5
|
||||
mock_instance.session_id = "sess"
|
||||
# Initialize by sending a message
|
||||
ai_client.send("context", "msg")
|
||||
stats = ai_client.get_history_bleed_stats()
|
||||
assert stats["provider"] == "gemini_cli"
|
||||
assert stats["current"] == 1500
|
||||
|
||||
Reference in New Issue
Block a user