18 lines
996 B
TOML
18 lines
996 B
TOML
role = "tier3-worker"
|
|
prompt = """TASK: Implement streaming support for the DeepSeek provider in ai_client.py and add failing tests.
|
|
|
|
INSTRUCTIONS:
|
|
1. In @tests/test_deepseek_provider.py:
|
|
- Add a test function 'test_deepseek_streaming' that mocks a streaming API response using 'requests.post(..., stream=True)'.
|
|
- Use 'mock_response.iter_lines()' to simulate chunks of data.
|
|
- Assert that 'ai_client.send()' correctly aggregates these chunks into a single string.
|
|
|
|
2. In @ai_client.py:
|
|
- Modify the '_send_deepseek' function to use 'requests.post(..., stream=True)'.
|
|
- Implement a loop to iterate over the response lines using 'iter_lines()'.
|
|
- Aggregate the content from each chunk.
|
|
- Ensure the aggregated content is added to the history and returned by the function.
|
|
|
|
OUTPUT: Provide the raw Python code for the modified sections or the full files. No pleasantries."""
|
|
docs = ["conductor/workflow.md", "ai_client.py", "tests/test_deepseek_provider.py", "mcp_client.py"]
|