diff --git a/internal/runtime/executor/codex_executor.go b/internal/runtime/executor/codex_executor.go index 3fe5ed6e..7003373f 100644 --- a/internal/runtime/executor/codex_executor.go +++ b/internal/runtime/executor/codex_executor.go @@ -56,6 +56,9 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false) body = applyReasoningEffortMetadata(body, req.Metadata, req.Model) body = normalizeThinkingConfig(body, upstreamModel) + if errValidate := validateThinkingConfig(body, upstreamModel); errValidate != nil { + return resp, errValidate + } body = applyPayloadConfig(e.cfg, req.Model, body) body, _ = sjson.SetBytes(body, "model", upstreamModel) body, _ = sjson.SetBytes(body, "stream", true) @@ -151,6 +154,9 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au body = applyReasoningEffortMetadata(body, req.Metadata, req.Model) body = normalizeThinkingConfig(body, upstreamModel) + if errValidate := validateThinkingConfig(body, upstreamModel); errValidate != nil { + return nil, errValidate + } body = applyPayloadConfig(e.cfg, req.Model, body) body, _ = sjson.DeleteBytes(body, "previous_response_id") body, _ = sjson.SetBytes(body, "model", upstreamModel) diff --git a/internal/runtime/executor/openai_compat_executor.go b/internal/runtime/executor/openai_compat_executor.go index ba47750e..507b0fd9 100644 --- a/internal/runtime/executor/openai_compat_executor.go +++ b/internal/runtime/executor/openai_compat_executor.go @@ -64,6 +64,9 @@ func (e *OpenAICompatExecutor) Execute(ctx context.Context, auth *cliproxyauth.A translated, _ = sjson.SetBytes(translated, "model", upstreamModel) } translated = normalizeThinkingConfig(translated, upstreamModel) + if errValidate := validateThinkingConfig(translated, upstreamModel); errValidate != nil { + return resp, errValidate + } url := strings.TrimSuffix(baseURL, "/") + "/chat/completions" httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(translated)) @@ -155,6 +158,9 @@ func (e *OpenAICompatExecutor) ExecuteStream(ctx context.Context, auth *cliproxy translated, _ = sjson.SetBytes(translated, "model", upstreamModel) } translated = normalizeThinkingConfig(translated, upstreamModel) + if errValidate := validateThinkingConfig(translated, upstreamModel); errValidate != nil { + return nil, errValidate + } url := strings.TrimSuffix(baseURL, "/") + "/chat/completions" httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(translated)) diff --git a/internal/runtime/executor/payload_helpers.go b/internal/runtime/executor/payload_helpers.go index 9d431f11..5711bbbd 100644 --- a/internal/runtime/executor/payload_helpers.go +++ b/internal/runtime/executor/payload_helpers.go @@ -1,6 +1,8 @@ package executor import ( + "fmt" + "net/http" "strings" "github.com/router-for-me/CLIProxyAPI/v6/internal/config" @@ -289,3 +291,36 @@ func normalizeReasoningEffortLevel(payload []byte, model string) []byte { return out } + +// validateThinkingConfig checks for unsupported reasoning levels on level-based models. +// Returns a statusErr with 400 when an unsupported level is supplied to avoid silently +// downgrading requests. +func validateThinkingConfig(payload []byte, model string) error { + if len(payload) == 0 || model == "" { + return nil + } + if !util.ModelSupportsThinking(model) || !util.ModelUsesThinkingLevels(model) { + return nil + } + + levels := util.GetModelThinkingLevels(model) + checkField := func(path string) error { + if effort := gjson.GetBytes(payload, path); effort.Exists() { + if _, ok := util.NormalizeReasoningEffortLevel(model, effort.String()); !ok { + return statusErr{ + code: http.StatusBadRequest, + msg: fmt.Sprintf("unsupported reasoning effort level %q for model %s (supported: %s)", effort.String(), model, strings.Join(levels, ", ")), + } + } + } + return nil + } + + if err := checkField("reasoning_effort"); err != nil { + return err + } + if err := checkField("reasoning.effort"); err != nil { + return err + } + return nil +} diff --git a/internal/util/thinking.go b/internal/util/thinking.go index 37200980..9671f20b 100644 --- a/internal/util/thinking.go +++ b/internal/util/thinking.go @@ -91,8 +91,7 @@ func ModelUsesThinkingLevels(model string) bool { } // NormalizeReasoningEffortLevel validates and normalizes a reasoning effort -// level for the given model. If the level is not supported, it returns the -// first (lowest) level from the model's supported levels. +// level for the given model. Returns false when the level is not supported. func NormalizeReasoningEffortLevel(model, effort string) (string, bool) { levels := GetModelThinkingLevels(model) if len(levels) == 0 { @@ -104,12 +103,5 @@ func NormalizeReasoningEffortLevel(model, effort string) (string, bool) { return lvl, true } } - return defaultReasoningLevel(levels), true -} - -func defaultReasoningLevel(levels []string) string { - if len(levels) > 0 { - return levels[0] - } - return "" + return "", false }