From fcb0293c0d5c83b269509dfce368ff5790fc8543 Mon Sep 17 00:00:00 2001 From: Jeff Nash <9919536+jeffnash@users.noreply.github.com> Date: Fri, 7 Nov 2025 16:54:28 -0800 Subject: [PATCH] feat(registry): add GPT-5 Codex Mini model variants Adds three new Codex Mini model variants (mini, mini-medium, mini-high) that map to codex-mini-latest. Codex Mini supports medium and high reasoning effort levels only (no low/minimal). Base model defaults to medium reasoning effort. --- internal/registry/model_definitions.go | 40 +++++++++++++++++---- internal/runtime/executor/codex_executor.go | 27 ++++++++++++++ 2 files changed, 60 insertions(+), 7 deletions(-) diff --git a/internal/registry/model_definitions.go b/internal/registry/model_definitions.go index ce662131..a5120d2d 100644 --- a/internal/registry/model_definitions.go +++ b/internal/registry/model_definitions.go @@ -352,17 +352,43 @@ func GetOpenAIModels() []*ModelInfo { SupportedParameters: []string{"tools"}, }, { - ID: "codex-mini-latest", + ID: "gpt-5-codex-mini", Object: "model", Created: time.Now().Unix(), OwnedBy: "openai", Type: "openai", - Version: "1.0", - DisplayName: "Codex Mini", - Description: "Lightweight code generation model", - ContextLength: 4096, - MaxCompletionTokens: 2048, - SupportedParameters: []string{"temperature", "max_tokens", "stream", "stop"}, + Version: "gpt-5-2025-11-07", + DisplayName: "GPT 5 Codex Mini", + Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.", + ContextLength: 400000, + MaxCompletionTokens: 128000, + SupportedParameters: []string{"tools"}, + }, + { + ID: "gpt-5-codex-mini-medium", + Object: "model", + Created: time.Now().Unix(), + OwnedBy: "openai", + Type: "openai", + Version: "gpt-5-2025-11-07", + DisplayName: "GPT 5 Codex Mini Medium", + Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.", + ContextLength: 400000, + MaxCompletionTokens: 128000, + SupportedParameters: []string{"tools"}, + }, + { + ID: "gpt-5-codex-mini-high", + Object: "model", + Created: time.Now().Unix(), + OwnedBy: "openai", + Type: "openai", + Version: "gpt-5-2025-11-07", + DisplayName: "GPT 5 Codex Mini High", + Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.", + ContextLength: 400000, + MaxCompletionTokens: 128000, + SupportedParameters: []string{"tools"}, }, } } diff --git a/internal/runtime/executor/codex_executor.go b/internal/runtime/executor/codex_executor.go index 443e41e7..4297795c 100644 --- a/internal/runtime/executor/codex_executor.go +++ b/internal/runtime/executor/codex_executor.go @@ -75,6 +75,14 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re case "gpt-5-codex-high": body, _ = sjson.SetBytes(body, "reasoning.effort", "high") } + } else if util.InArray([]string{"gpt-5-codex-mini", "gpt-5-codex-mini-medium", "gpt-5-codex-mini-high"}, req.Model) { + body, _ = sjson.SetBytes(body, "model", "codex-mini-latest") + switch req.Model { + case "gpt-5-codex-mini-medium": + body, _ = sjson.SetBytes(body, "reasoning.effort", "medium") + case "gpt-5-codex-mini-high": + body, _ = sjson.SetBytes(body, "reasoning.effort", "high") + } } body, _ = sjson.SetBytes(body, "stream", true) @@ -188,6 +196,14 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au case "gpt-5-codex-high": body, _ = sjson.SetBytes(body, "reasoning.effort", "high") } + } else if util.InArray([]string{"gpt-5-codex-mini", "gpt-5-codex-mini-medium", "gpt-5-codex-mini-high"}, req.Model) { + body, _ = sjson.SetBytes(body, "model", "codex-mini-latest") + switch req.Model { + case "gpt-5-codex-mini-medium": + body, _ = sjson.SetBytes(body, "reasoning.effort", "medium") + case "gpt-5-codex-mini-high": + body, _ = sjson.SetBytes(body, "reasoning.effort", "high") + } } body, _ = sjson.DeleteBytes(body, "previous_response_id") @@ -312,6 +328,17 @@ func (e *CodexExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Auth default: body, _ = sjson.SetBytes(body, "reasoning.effort", "low") } + } else if util.InArray([]string{"gpt-5-codex-mini", "gpt-5-codex-mini-medium", "gpt-5-codex-mini-high"}, req.Model) { + modelForCounting = "gpt-5" + body, _ = sjson.SetBytes(body, "model", "codex-mini-latest") + switch req.Model { + case "gpt-5-codex-mini-medium": + body, _ = sjson.SetBytes(body, "reasoning.effort", "medium") + case "gpt-5-codex-mini-high": + body, _ = sjson.SetBytes(body, "reasoning.effort", "high") + default: + body, _ = sjson.SetBytes(body, "reasoning.effort", "medium") + } } body, _ = sjson.DeleteBytes(body, "previous_response_id")