diff --git a/config.toml b/config.toml
index 5ffadcd..fc86da2 100644
--- a/config.toml
+++ b/config.toml
@@ -1,6 +1,6 @@
[ai]
-provider = "deepseek"
-model = "deepseek-v3"
+provider = "gemini"
+model = "gemini-2.5-flash-lite"
temperature = 0.0
max_tokens = 8192
history_trunc_limit = 8000
diff --git a/mock_debug_prompt.txt b/mock_debug_prompt.txt
index 426d495..2ec238e 100644
--- a/mock_debug_prompt.txt
+++ b/mock_debug_prompt.txt
@@ -700,3 +700,164 @@ System:
testing gemini cli
------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+PATH: Epic Initialization — please produce tracks
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please generate the implementation tickets for this track.
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please read test.txt
+You are assigned to Ticket T1.
+Task Description: do something
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+role: tool
+Here are the results: {"content": "done"}
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+PATH: Epic Initialization — please produce tracks
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please generate the implementation tickets for this track.
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please read test.txt
+You are assigned to Ticket T1.
+Task Description: do something
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+role: tool
+Here are the results: {"content": "done"}
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+PATH: Epic Initialization — please produce tracks
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please generate the implementation tickets for this track.
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please read test.txt
+You are assigned to Ticket T1.
+Task Description: do something
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+role: tool
+Here are the results: {"content": "done"}
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+PATH: Epic Initialization — please produce tracks
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please generate the implementation tickets for this track.
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please read test.txt
+You are assigned to Ticket T1.
+Task Description: do something
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+role: tool
+Here are the results: {"content": "done"}
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+PATH: Epic Initialization — please produce tracks
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please generate the implementation tickets for this track.
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please read test.txt
+You are assigned to Ticket T1.
+Task Description: do something
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+role: tool
+Here are the results: {"content": "done"}
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+PATH: Epic Initialization — please produce tracks
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please generate the implementation tickets for this track.
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please read test.txt
+You are assigned to Ticket T1.
+Task Description: do something
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+role: tool
+Here are the results: {"content": "done"}
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+PATH: Epic Initialization — please produce tracks
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please generate the implementation tickets for this track.
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+Please read test.txt
+You are assigned to Ticket T1.
+Task Description: do something
+------------------
+--- MOCK INVOKED ---
+ARGS: ['tests/mock_gemini_cli.py']
+PROMPT:
+role: tool
+Here are the results: {"content": "done"}
+------------------
diff --git a/project_history.toml b/project_history.toml
index ce8afbe..e2ae7ca 100644
--- a/project_history.toml
+++ b/project_history.toml
@@ -8,5 +8,5 @@ active = "main"
[discussions.main]
git_commit = ""
-last_updated = "2026-03-06T16:40:04"
+last_updated = "2026-03-06T23:22:48"
history = []
diff --git a/src/ai_client.py b/src/ai_client.py
index 47d5d6c..fbcc34e 100644
--- a/src/ai_client.py
+++ b/src/ai_client.py
@@ -75,6 +75,10 @@ _deepseek_client: Any = None
_deepseek_history: list[dict[str, Any]] = []
_deepseek_history_lock: threading.Lock = threading.Lock()
+_minimax_client: Any = None
+_minimax_history: list[dict[str, Any]] = []
+_minimax_history_lock: threading.Lock = threading.Lock()
+
_send_lock: threading.Lock = threading.Lock()
_gemini_cli_adapter: Optional[GeminiCliAdapter] = None
@@ -176,6 +180,7 @@ def _load_credentials() -> dict[str, Any]:
f" [gemini]\n api_key = \"your-key\"\n"
f" [anthropic]\n api_key = \"your-key\"\n"
f" [deepseek]\n api_key = \"your-key\"\n"
+ f" [minimax]\n api_key = \"your-key\"\n"
f"Or set SLOP_CREDENTIALS env var to a custom path."
)
@@ -284,6 +289,37 @@ def _classify_deepseek_error(exc: Exception) -> ProviderError:
return ProviderError("unknown", "deepseek", Exception(body))
+def _classify_minimax_error(exc: Exception) -> ProviderError:
+ body = ""
+ if isinstance(exc, requests.exceptions.HTTPError) and exc.response is not None:
+ try:
+ err_data = exc.response.json()
+ if "error" in err_data:
+ body = str(err_data["error"].get("message", exc.response.text))
+ else:
+ body = exc.response.text
+ except:
+ body = exc.response.text
+ else:
+ body = str(exc)
+
+ body_l = body.lower()
+ if "429" in body_l or "rate" in body_l:
+ return ProviderError("rate_limit", "minimax", Exception(body))
+ if "401" in body_l or "403" in body_l or "auth" in body_l or "api key" in body_l:
+ return ProviderError("auth", "minimax", Exception(body))
+ if "402" in body_l or "balance" in body_l or "billing" in body_l:
+ return ProviderError("balance", "minimax", Exception(body))
+ if "quota" in body_l or "limit exceeded" in body_l:
+ return ProviderError("quota", "minimax", Exception(body))
+ if "connection" in body_l or "timeout" in body_l or "network" in body_l:
+ return ProviderError("network", "minimax", Exception(body))
+
+ if "400" in body_l or "bad request" in body_l:
+ return ProviderError("unknown", "minimax", Exception(f"MiniMax Bad Request: {body}"))
+
+ return ProviderError("unknown", "minimax", Exception(body))
+
def set_provider(provider: str, model: str) -> None:
global _provider, _model
_provider = provider
@@ -293,6 +329,12 @@ def set_provider(provider: str, model: str) -> None:
_model = "gemini-3-flash-preview"
else:
_model = model
+ elif provider == "minimax":
+ valid_models = _list_minimax_models("")
+ if model not in valid_models:
+ _model = "MiniMax-M2.5"
+ else:
+ _model = model
else:
_model = model
@@ -312,6 +354,7 @@ def reset_session() -> None:
global _gemini_cache_md_hash, _gemini_cache_created_at
global _anthropic_client, _anthropic_history
global _deepseek_client, _deepseek_history
+ global _minimax_client, _minimax_history
global _CACHED_ANTHROPIC_TOOLS
global _gemini_cli_adapter
if _gemini_client and _gemini_cache:
@@ -336,6 +379,9 @@ def reset_session() -> None:
_deepseek_client = None
with _deepseek_history_lock:
_deepseek_history = []
+ _minimax_client = None
+ with _minimax_history_lock:
+ _minimax_history = []
_CACHED_ANTHROPIC_TOOLS = None
file_cache.reset_client()
@@ -362,6 +408,8 @@ def list_models(provider: str) -> list[str]:
return _list_deepseek_models(creds["deepseek"]["api_key"])
elif provider == "gemini_cli":
return _list_gemini_cli_models()
+ elif provider == "minimax":
+ return _list_minimax_models(creds["minimax"]["api_key"])
return []
def _list_gemini_cli_models() -> list[str]:
@@ -402,6 +450,9 @@ def _list_anthropic_models() -> list[str]:
def _list_deepseek_models(api_key: str) -> list[str]:
return ["deepseek-chat", "deepseek-reasoner"]
+def _list_minimax_models(api_key: str) -> list[str]:
+ return ["MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1", "MiniMax-M2.1-highspeed", "MiniMax-M2"]
+
TOOL_NAME: str = "run_powershell"
_agent_tools: dict[str, bool] = {}
@@ -1405,6 +1456,16 @@ def _ensure_deepseek_client() -> None:
_load_credentials()
pass
+def _ensure_minimax_client() -> None:
+ global _minimax_client
+ if _minimax_client is None:
+ from openai import OpenAI
+ creds = _load_credentials()
+ api_key = creds.get("minimax", {}).get("api_key")
+ if not api_key:
+ raise ValueError("MiniMax API key not found in credentials.toml")
+ _minimax_client = OpenAI(api_key=api_key, base_url="https://api.minimax.chat/v1")
+
def _send_deepseek(md_content: str, user_message: str, base_dir: str,
file_items: list[dict[str, Any]] | None = None,
discussion_history: str = "",
@@ -1648,6 +1709,222 @@ def _send_deepseek(md_content: str, user_message: str, base_dir: str,
except Exception as e:
raise _classify_deepseek_error(e) from e
+def _send_minimax(md_content: str, user_message: str, base_dir: str,
+ file_items: list[dict[str, Any]] | None = None,
+ discussion_history: str = "",
+ stream: bool = False,
+ pre_tool_callback: Optional[Callable[[str, str, Optional[Callable[[str], str]]], Optional[str]]] = None,
+ qa_callback: Optional[Callable[[str], str]] = None,
+ stream_callback: Optional[Callable[[str], None]] = None) -> str:
+ try:
+ mcp_client.configure(file_items or [], [base_dir])
+ creds = _load_credentials()
+ api_key = creds.get("minimax", {}).get("api_key")
+ if not api_key:
+ raise ValueError("MiniMax API key not found in credentials.toml")
+
+ from openai import OpenAI
+ client = OpenAI(api_key=api_key, base_url="https://api.minimax.chat/v1")
+
+ with _minimax_history_lock:
+ if discussion_history and not _minimax_history:
+ user_content = f"[DISCUSSION HISTORY]\n\n{discussion_history}\n\n---\n\n{user_message}"
+ else:
+ user_content = user_message
+ _minimax_history.append({"role": "user", "content": user_content})
+
+ all_text_parts: list[str] = []
+ _cumulative_tool_bytes = 0
+
+ for round_idx in range(MAX_TOOL_ROUNDS + 2):
+ current_api_messages: list[dict[str, Any]] = []
+
+ sys_msg = {"role": "system", "content": f"{_get_combined_system_prompt()}\n\n\n{md_content}\n"}
+ current_api_messages.append(sys_msg)
+
+ with _minimax_history_lock:
+ for i, msg in enumerate(_minimax_history):
+ role = msg.get("role")
+ api_msg = {"role": role}
+
+ content = msg.get("content")
+ if role == "assistant":
+ if msg.get("tool_calls"):
+ api_msg["content"] = content or None
+ api_msg["tool_calls"] = msg["tool_calls"]
+ else:
+ api_msg["content"] = content or ""
+ elif role == "tool":
+ api_msg["content"] = content or ""
+ api_msg["tool_call_id"] = msg.get("tool_call_id")
+ else:
+ api_msg["content"] = content or ""
+
+ current_api_messages.append(api_msg)
+
+ request_payload: dict[str, Any] = {
+ "model": _model,
+ "messages": current_api_messages,
+ "stream": stream,
+ "extra_body": {"reasoning_split": True},
+ }
+
+ if stream:
+ request_payload["stream_options"] = {"include_usage": True}
+
+ request_payload["temperature"] = _temperature
+ request_payload["max_tokens"] = min(_max_tokens, 8192)
+
+ tools = _get_deepseek_tools()
+ if tools:
+ request_payload["tools"] = tools
+
+ events.emit("request_start", payload={"provider": "minimax", "model": _model, "round": round_idx, "streaming": stream})
+
+ try:
+ response = client.chat.completions.create(**request_payload, timeout=120)
+ except Exception as e:
+ raise _classify_minimax_error(e) from e
+
+ assistant_text = ""
+ tool_calls_raw = []
+ reasoning_content = ""
+ finish_reason = "stop"
+ usage = {}
+
+ if stream:
+ aggregated_content = ""
+ aggregated_tool_calls: list[dict[str, Any]] = []
+ aggregated_reasoning = ""
+ current_usage: dict[str, Any] = {}
+ final_finish_reason = "stop"
+
+ for chunk in response:
+ if not chunk.choices:
+ if chunk.usage:
+ current_usage = chunk.usage.model_dump()
+ continue
+
+ delta = chunk.choices[0].delta
+ if delta.content:
+ content_chunk = delta.content
+ aggregated_content += content_chunk
+ if stream_callback:
+ stream_callback(content_chunk)
+
+ if hasattr(delta, "reasoning_details") and delta.reasoning_details:
+ for detail in delta.reasoning_details:
+ if "text" in detail:
+ aggregated_reasoning += detail["text"]
+
+ if delta.tool_calls:
+ for tc_delta in delta.tool_calls:
+ idx = tc_delta.index
+ while len(aggregated_tool_calls) <= idx:
+ aggregated_tool_calls.append({"id": "", "type": "function", "function": {"name": "", "arguments": ""}})
+ target = aggregated_tool_calls[idx]
+ if tc_delta.id:
+ target["id"] = tc_delta.id
+ if tc_delta.function and tc_delta.function.name:
+ target["function"]["name"] += tc_delta.function.name
+ if tc_delta.function and tc_delta.function.arguments:
+ target["function"]["arguments"] += tc_delta.function.arguments
+
+ if chunk.choices[0].finish_reason:
+ final_finish_reason = chunk.choices[0].finish_reason
+ if chunk.usage:
+ current_usage = chunk.usage.model_dump()
+
+ assistant_text = aggregated_content
+ tool_calls_raw = aggregated_tool_calls
+ reasoning_content = aggregated_reasoning
+ finish_reason = final_finish_reason
+ usage = current_usage
+ else:
+ choice = response.choices[0]
+ message = choice.message
+ assistant_text = message.content or ""
+ tool_calls_raw = message.tool_calls or []
+ if hasattr(message, "reasoning_details") and message.reasoning_details:
+ reasoning_content = message.reasoning_details[0].get("text", "") if message.reasoning_details else ""
+ finish_reason = choice.finish_reason or "stop"
+ usage = response.usage.model_dump() if response.usage else {}
+
+ thinking_tags = ""
+ if reasoning_content:
+ thinking_tags = f"\n{reasoning_content}\n\n"
+ full_assistant_text = thinking_tags + assistant_text
+
+ with _minimax_history_lock:
+ msg_to_store: dict[str, Any] = {"role": "assistant", "content": assistant_text or None}
+ if reasoning_content:
+ msg_to_store["reasoning_content"] = reasoning_content
+ if tool_calls_raw:
+ msg_to_store["tool_calls"] = tool_calls_raw
+ _minimax_history.append(msg_to_store)
+
+ if full_assistant_text:
+ all_text_parts.append(full_assistant_text)
+
+ _append_comms("IN", "response", {
+ "round": round_idx,
+ "stop_reason": finish_reason,
+ "text": full_assistant_text,
+ "tool_calls": tool_calls_raw,
+ "usage": usage,
+ "streaming": stream
+ })
+
+ if finish_reason != "tool_calls" and not tool_calls_raw:
+ break
+ if round_idx > MAX_TOOL_ROUNDS:
+ break
+
+ try:
+ loop = asyncio.get_running_loop()
+ results = asyncio.run_coroutine_threadsafe(
+ _execute_tool_calls_concurrently(tool_calls_raw, base_dir, pre_tool_callback, qa_callback, round_idx, "minimax"),
+ loop
+ ).result()
+ except RuntimeError:
+ results = asyncio.run(_execute_tool_calls_concurrently(tool_calls_raw, base_dir, pre_tool_callback, qa_callback, round_idx, "minimax"))
+
+ tool_results_for_history: list[dict[str, Any]] = []
+ for i, (name, call_id, out, _) in enumerate(results):
+ if i == len(results) - 1:
+ if file_items:
+ file_items, changed = _reread_file_items(file_items)
+ ctx = _build_file_diff_text(changed)
+ if ctx:
+ out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
+ if round_idx == MAX_TOOL_ROUNDS:
+ out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
+
+ truncated = _truncate_tool_output(out)
+ _cumulative_tool_bytes += len(truncated)
+ tool_results_for_history.append({
+ "role": "tool",
+ "tool_call_id": call_id,
+ "content": truncated,
+ })
+ _append_comms("IN", "tool_result", {"name": name, "id": call_id, "output": out})
+ events.emit("tool_execution", payload={"status": "completed", "tool": name, "result": out, "round": round_idx})
+
+ if _cumulative_tool_bytes > _MAX_TOOL_OUTPUT_BYTES:
+ tool_results_for_history.append({
+ "role": "user",
+ "content": f"SYSTEM WARNING: Cumulative tool output exceeded {_MAX_TOOL_OUTPUT_BYTES // 1000}KB budget. Provide your final answer now."
+ })
+ _append_comms("OUT", "request", {"message": f"[TOOL OUTPUT BUDGET EXCEEDED: {_cumulative_tool_bytes} bytes]"})
+
+ with _minimax_history_lock:
+ for tr in tool_results_for_history:
+ _minimax_history.append(tr)
+
+ return "\n\n".join(all_text_parts) if all_text_parts else "(No text returned)"
+ except Exception as e:
+ raise _classify_minimax_error(e) from e
+
def run_tier4_analysis(stderr: str) -> str:
if not stderr or not stderr.strip():
@@ -1742,6 +2019,11 @@ def send(
md_content, user_message, base_dir, file_items, discussion_history,
stream, pre_tool_callback, qa_callback, stream_callback
)
+ elif _provider == "minimax":
+ return _send_minimax(
+ md_content, user_message, base_dir, file_items, discussion_history,
+ stream, pre_tool_callback, qa_callback, stream_callback
+ )
else:
raise ValueError(f"Unknown provider: {_provider}")
@@ -1888,6 +2170,33 @@ def get_history_bleed_stats(md_content: Optional[str] = None) -> dict[str, Any]:
"current": current_tokens,
"percentage": percentage,
})
+ elif _provider == "minimax":
+ limit_tokens = 204800
+ current_tokens = 0
+ with _minimax_history_lock:
+ for msg in _minimax_history:
+ content = msg.get("content", "")
+ if isinstance(content, str):
+ current_tokens += len(content)
+ elif isinstance(content, list):
+ for block in content:
+ if isinstance(block, dict):
+ text = block.get("text", "")
+ if isinstance(text, str):
+ current_tokens += len(text)
+ inp = block.get("input")
+ if isinstance(inp, dict):
+ import json as _json
+ current_tokens += len(_json.dumps(inp, ensure_ascii=False))
+ if md_content: current_tokens += len(md_content)
+ current_tokens = max(1, int(current_tokens / _CHARS_PER_TOKEN))
+ percentage = (current_tokens / limit_tokens) * 100 if limit_tokens > 0 else 0
+ return _add_bleed_derived({
+ "provider": "minimax",
+ "limit": limit_tokens,
+ "current": current_tokens,
+ "percentage": percentage,
+ })
return _add_bleed_derived({
"provider": _provider,
"limit": 0,
diff --git a/src/app_controller.py b/src/app_controller.py
index bd4e03f..9919706 100644
--- a/src/app_controller.py
+++ b/src/app_controller.py
@@ -110,7 +110,7 @@ class AppController:
The headless controller for the Manual Slop application.
Owns the application state and manages background services.
"""
- PROVIDERS: list[str] = ["gemini", "anthropic", "gemini_cli", "deepseek"]
+ PROVIDERS: list[str] = ["gemini", "anthropic", "gemini_cli", "deepseek", "minimax"]
def __init__(self):
# Initialize locks first to avoid initialization order issues
diff --git a/src/gui_2.py b/src/gui_2.py
index b3baf87..72d1593 100644
--- a/src/gui_2.py
+++ b/src/gui_2.py
@@ -25,7 +25,7 @@ from src import app_controller
from pydantic import BaseModel
from imgui_bundle import imgui, hello_imgui, immapp, imgui_node_editor as ed
-PROVIDERS: list[str] = ["gemini", "anthropic", "gemini_cli", "deepseek"]
+PROVIDERS: list[str] = ["gemini", "anthropic", "gemini_cli", "deepseek", "minimax"]
COMMS_CLAMP_CHARS: int = 300
def hide_tk_root() -> Tk:
diff --git a/tests/test_minimax_provider.py b/tests/test_minimax_provider.py
new file mode 100644
index 0000000..e470568
--- /dev/null
+++ b/tests/test_minimax_provider.py
@@ -0,0 +1,41 @@
+import unittest.mock
+from unittest.mock import patch, MagicMock
+from src import ai_client
+
+def test_minimax_model_selection() -> None:
+ ai_client.set_provider("minimax", "MiniMax-M2.5")
+ assert ai_client._provider == "minimax"
+ assert ai_client._model == "MiniMax-M2.5"
+
+def test_minimax_default_model() -> None:
+ ai_client.set_provider("minimax", "invalid-model")
+ assert ai_client._model == "MiniMax-M2.5"
+
+def test_minimax_list_models() -> None:
+ models = ai_client.list_models("minimax")
+ assert "MiniMax-M2.5" in models
+ assert "MiniMax-M2.5-highspeed" in models
+ assert "MiniMax-M2.1" in models
+ assert "MiniMax-M2" in models
+
+def test_minimax_history_bleed_stats() -> None:
+ ai_client.set_provider("minimax", "MiniMax-M2.5")
+ ai_client.reset_session()
+ stats = ai_client.get_history_bleed_stats(md_content="Test context")
+ assert stats["provider"] == "minimax"
+ assert stats["limit"] == 204800
+
+def test_minimax_in_providers_list() -> None:
+ from src.gui_2 import PROVIDERS
+ assert "minimax" in PROVIDERS
+
+def test_minimax_in_app_controller_providers() -> None:
+ from src.app_controller import AppController
+ assert "minimax" in AppController.PROVIDERS
+
+def test_minimax_credentials_template() -> None:
+ try:
+ ai_client._load_credentials()
+ except FileNotFoundError as e:
+ error_msg = str(e)
+ assert "minimax" in error_msg