31 lines
1.2 KiB
Python
31 lines
1.2 KiB
Python
with open("src/orchestrator_pm.py", "r", encoding="utf-8", newline="") as f:
|
|
content = f.read()
|
|
|
|
# Add provider setup before ai_client.send()
|
|
old_code = """ # Set custom system prompt for this call
|
|
old_system_prompt = ai_client._custom_system_prompt
|
|
ai_client.set_custom_system_prompt(system_prompt or "")
|
|
try:
|
|
# 3. Call Tier 1 Model (Strategic - Pro)
|
|
# Note: We use gemini-1.5-pro or similar high-reasoning model for Tier 1
|
|
response = ai_client.send("""
|
|
|
|
new_code = """ # Set custom system prompt for this call
|
|
old_system_prompt = ai_client._custom_system_prompt
|
|
ai_client.set_custom_system_prompt(system_prompt or "")
|
|
# Ensure we use the current provider from ai_client state
|
|
current_provider = ai_client.get_provider()
|
|
current_model = ai_client._model if hasattr(ai_client, '_model') else 'gemini-2.5-flash-lite'
|
|
ai_client.set_provider(current_provider, current_model)
|
|
try:
|
|
# 3. Call Tier 1 Model (Strategic - Pro)
|
|
# Note: We use gemini-1.5-pro or similar high-reasoning model for Tier 1
|
|
response = ai_client.send("""
|
|
|
|
content = content.replace(old_code, new_code)
|
|
|
|
with open("src/orchestrator_pm.py", "w", encoding="utf-8", newline="") as f:
|
|
f.write(content)
|
|
|
|
print("Fixed provider in orchestrator_pm.py")
|