51 lines
2.0 KiB
Python
51 lines
2.0 KiB
Python
import time
|
|
import random
|
|
import ai_client
|
|
|
|
class UserSimAgent:
|
|
def __init__(self, hook_client, model="gemini-2.0-flash"):
|
|
self.hook_client = hook_client
|
|
self.model = model
|
|
self.system_prompt = (
|
|
"You are a software engineer testing an AI coding assistant called 'Manual Slop'. "
|
|
"You want to build a small Python project and verify the assistant's capabilities. "
|
|
"Keep your responses concise and human-like. "
|
|
"Do not use markdown blocks for your main message unless you are providing code."
|
|
)
|
|
|
|
def generate_response(self, conversation_history):
|
|
"""
|
|
Generates a human-like response based on the conversation history.
|
|
conversation_history: list of dicts with 'role' and 'content'
|
|
"""
|
|
# Format history for ai_client
|
|
# ai_client expects md_content and user_message.
|
|
# It handles its own internal history.
|
|
# We want the 'User AI' to have context of what the 'Assistant AI' said.
|
|
|
|
# For now, let's just use the last message from Assistant as the prompt.
|
|
last_ai_msg = ""
|
|
for entry in reversed(conversation_history):
|
|
if entry.get('role') == 'AI':
|
|
last_ai_msg = entry.get('content', '')
|
|
break
|
|
|
|
# We need to set a custom system prompt for the User Simulator
|
|
try:
|
|
ai_client.set_custom_system_prompt(self.system_prompt)
|
|
# We'll use a blank md_content for now as the 'User' doesn't need to read its own files
|
|
# via the same mechanism, but we could provide it if needed.
|
|
response = ai_client.send(md_content="", user_message=last_ai_msg)
|
|
finally:
|
|
ai_client.set_custom_system_prompt("")
|
|
|
|
return response
|
|
|
|
def perform_action_with_delay(self, action_func, *args, **kwargs):
|
|
"""
|
|
Executes an action with a human-like delay.
|
|
"""
|
|
delay = random.uniform(0.5, 2.0)
|
|
time.sleep(delay)
|
|
return action_func(*args, **kwargs)
|