mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-18 04:10:51 +08:00
feat(registry): add GPT-5 Codex Mini model variants
Adds three new Codex Mini model variants (mini, mini-medium, mini-high) that map to codex-mini-latest. Codex Mini supports medium and high reasoning effort levels only (no low/minimal). Base model defaults to medium reasoning effort.
This commit is contained in:
@@ -352,17 +352,43 @@ func GetOpenAIModels() []*ModelInfo {
|
|||||||
SupportedParameters: []string{"tools"},
|
SupportedParameters: []string{"tools"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "codex-mini-latest",
|
ID: "gpt-5-codex-mini",
|
||||||
Object: "model",
|
Object: "model",
|
||||||
Created: time.Now().Unix(),
|
Created: time.Now().Unix(),
|
||||||
OwnedBy: "openai",
|
OwnedBy: "openai",
|
||||||
Type: "openai",
|
Type: "openai",
|
||||||
Version: "1.0",
|
Version: "gpt-5-2025-11-07",
|
||||||
DisplayName: "Codex Mini",
|
DisplayName: "GPT 5 Codex Mini",
|
||||||
Description: "Lightweight code generation model",
|
Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.",
|
||||||
ContextLength: 4096,
|
ContextLength: 400000,
|
||||||
MaxCompletionTokens: 2048,
|
MaxCompletionTokens: 128000,
|
||||||
SupportedParameters: []string{"temperature", "max_tokens", "stream", "stop"},
|
SupportedParameters: []string{"tools"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5-codex-mini-medium",
|
||||||
|
Object: "model",
|
||||||
|
Created: time.Now().Unix(),
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5-2025-11-07",
|
||||||
|
DisplayName: "GPT 5 Codex Mini Medium",
|
||||||
|
Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5-codex-mini-high",
|
||||||
|
Object: "model",
|
||||||
|
Created: time.Now().Unix(),
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5-2025-11-07",
|
||||||
|
DisplayName: "GPT 5 Codex Mini High",
|
||||||
|
Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,6 +75,14 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
|||||||
case "gpt-5-codex-high":
|
case "gpt-5-codex-high":
|
||||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||||
}
|
}
|
||||||
|
} else if util.InArray([]string{"gpt-5-codex-mini", "gpt-5-codex-mini-medium", "gpt-5-codex-mini-high"}, req.Model) {
|
||||||
|
body, _ = sjson.SetBytes(body, "model", "codex-mini-latest")
|
||||||
|
switch req.Model {
|
||||||
|
case "gpt-5-codex-mini-medium":
|
||||||
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "medium")
|
||||||
|
case "gpt-5-codex-mini-high":
|
||||||
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
body, _ = sjson.SetBytes(body, "stream", true)
|
body, _ = sjson.SetBytes(body, "stream", true)
|
||||||
@@ -188,6 +196,14 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
|||||||
case "gpt-5-codex-high":
|
case "gpt-5-codex-high":
|
||||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||||
}
|
}
|
||||||
|
} else if util.InArray([]string{"gpt-5-codex-mini", "gpt-5-codex-mini-medium", "gpt-5-codex-mini-high"}, req.Model) {
|
||||||
|
body, _ = sjson.SetBytes(body, "model", "codex-mini-latest")
|
||||||
|
switch req.Model {
|
||||||
|
case "gpt-5-codex-mini-medium":
|
||||||
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "medium")
|
||||||
|
case "gpt-5-codex-mini-high":
|
||||||
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
||||||
@@ -312,6 +328,17 @@ func (e *CodexExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Auth
|
|||||||
default:
|
default:
|
||||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "low")
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "low")
|
||||||
}
|
}
|
||||||
|
} else if util.InArray([]string{"gpt-5-codex-mini", "gpt-5-codex-mini-medium", "gpt-5-codex-mini-high"}, req.Model) {
|
||||||
|
modelForCounting = "gpt-5"
|
||||||
|
body, _ = sjson.SetBytes(body, "model", "codex-mini-latest")
|
||||||
|
switch req.Model {
|
||||||
|
case "gpt-5-codex-mini-medium":
|
||||||
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "medium")
|
||||||
|
case "gpt-5-codex-mini-high":
|
||||||
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "high")
|
||||||
|
default:
|
||||||
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "medium")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
||||||
|
|||||||
Reference in New Issue
Block a user