import os import shutil import tempfile from unittest.mock import MagicMock, patch import pytest from src.app_controller import AppController from src import ai_client from src import events from src import models @pytest.fixture def mock_project(): # Use a temporary directory for the mock project temp_dir = tempfile.mkdtemp() try: # Create a minimal manual_slop.toml with open(os.path.join(temp_dir, "manual_slop.toml"), "w") as f: f.write('discussion_history = []\n') yield temp_dir finally: # Clean up after test shutil.rmtree(temp_dir) def test_rag_integration(mock_project): """ Integration test verifying the flow from AppController through RAGEngine to ai_client. """ # 1. Initializes a mock project and AppController. # We patch several components to avoid side effects during initialization. with patch('src.app_controller.AppController._fetch_models'), \ patch('src.models.load_config', return_value={}), \ patch('src.paths.get_full_path_info', return_value={'logs_dir': {'path': mock_project}, 'scripts_dir': {'path': mock_project}}), \ patch('src.theme.load_from_config'): app = AppController() # Minimal state setup for _handle_request_event app.ui_global_system_prompt = "" app.ui_project_system_prompt = "" app.ui_base_system_prompt = "" app.ui_use_default_base_prompt = True app.ui_project_context_marker = "" app.temperature = 0.0 app.max_tokens = 100 app.history_trunc_limit = 1000 app.top_p = 1.0 app.ui_agent_tools = {} app.ui_gemini_cli_path = "gemini" app.current_model = "gemini-1.5-flash" app.active_project_path = os.path.join(mock_project, "manual_slop.toml") # Ensure the provider is set to 'gemini' for our test ai_client.set_provider("gemini", "gemini-1.5-flash") # 2. Configures a mock RAG setup (enabled=True, provider='mock'). rag_config = models.RAGConfig( enabled=True, vector_store=models.VectorStoreConfig(provider='mock') ) app.rag_config = rag_config # 3. Mocks rag_engine.search to return a known chunk. mock_rag_engine = MagicMock() mock_rag_engine.config = rag_config mock_rag_engine.search.return_value = [ {"document": "This is a retrieved chunk from RAG.", "metadata": {"path": "test_file.py"}} ] app.rag_engine = mock_rag_engine # 4. Mocks ai_client.send to verify that the retrieved chunk appears in the # message sent to the provider. We use 'wraps' to let the real logic run # while still having a mock we can inspect. We also mock the internal # _send_gemini which is what actually "sends to the provider". with patch('src.ai_client.send', wraps=ai_client.send) as mock_send: with patch('src.ai_client._send_gemini') as mock_provider: mock_provider.return_value = "Mock AI Response" # Create a UserRequestEvent as if the user clicked "Gen + Send" event = events.UserRequestEvent( prompt="Tell me about the code.", stable_md="Context", file_items=[], disc_text="History", base_dir=mock_project ) # Trigger the request event processing logic in AppController app._handle_request_event(event) # 5. This verifies the wiring from AppController through RAGEngine to ai_client. # Verify that ai_client.send was called by AppController assert mock_send.called _, kwargs = mock_send.call_args assert kwargs['rag_engine'] == mock_rag_engine # Verify that the internal provider call was made assert mock_provider.called # Extract the user_message passed to the provider call args, _ = mock_provider.call_args # _send_gemini(md_content, user_message, ...) -> user_message is index 1 sent_user_message = args[1] # Verify that the RAG chunk was prepended to the original prompt assert "This is a retrieved chunk from RAG." in sent_user_message assert "Tell me about the code." in sent_user_message assert "## Retrieved Context" in sent_user_message assert "Source: test_file.py" in sent_user_message # Verify that rag_engine.search was called with the original prompt mock_rag_engine.search.assert_called_once_with("Tell me about the code.")