From 6e4a602c6070466aaf464571a08c8365d31ab729 Mon Sep 17 00:00:00 2001 From: hkfires <10558748+hkfires@users.noreply.github.com> Date: Wed, 14 Jan 2026 22:45:07 +0800 Subject: [PATCH] fix(thinking): map reasoning_effort to thinkingConfig --- .../runtime/executor/antigravity_executor.go | 6 +- internal/thinking/convert.go | 93 +--- internal/thinking/suffix.go | 24 - internal/thinking/text.go | 41 ++ .../claude/antigravity_claude_request.go | 3 +- .../antigravity_openai_request.go | 32 +- .../gemini-cli_openai_request.go | 23 +- .../chat-completions/gemini_openai_request.go | 35 +- .../gemini_openai-responses_request.go | 26 +- .../openai/claude/openai_claude_request.go | 3 +- internal/util/gemini_thinking.go | 513 ------------------ internal/util/thinking.go | 122 ----- internal/util/thinking_text.go | 95 ---- 13 files changed, 107 insertions(+), 909 deletions(-) create mode 100644 internal/thinking/text.go delete mode 100644 internal/util/gemini_thinking.go delete mode 100644 internal/util/thinking.go delete mode 100644 internal/util/thinking_text.go diff --git a/internal/runtime/executor/antigravity_executor.go b/internal/runtime/executor/antigravity_executor.go index 90ebb53f..234b06cb 100644 --- a/internal/runtime/executor/antigravity_executor.go +++ b/internal/runtime/executor/antigravity_executor.go @@ -1452,15 +1452,15 @@ func generateProjectID() string { // This function is called AFTER thinking.ApplyThinking() to apply Claude-specific constraints. // // It handles: -// - Stripping thinking config for unsupported models (via util.StripThinkingConfigIfUnsupported) +// - Stripping thinking config for unsupported models // - Normalizing budget to model range (via thinking.ClampBudget) // - For Claude models: ensuring thinking budget < max_tokens // - For Claude models: removing thinkingConfig if budget < minimum allowed func normalizeAntigravityThinking(model string, payload []byte, isClaude bool) []byte { - payload = util.StripThinkingConfigIfUnsupported(model, payload) modelInfo := registry.LookupModelInfo(model) if modelInfo == nil || modelInfo.Thinking == nil { - return payload + // Model doesn't support thinking - strip any thinking config + return thinking.StripThinkingConfig(payload, "antigravity") } budget := gjson.GetBytes(payload, "request.generationConfig.thinkingConfig.thinkingBudget") if !budget.Exists() { diff --git a/internal/thinking/convert.go b/internal/thinking/convert.go index 92e54120..776ccef6 100644 --- a/internal/thinking/convert.go +++ b/internal/thinking/convert.go @@ -1,7 +1,6 @@ package thinking import ( - "fmt" "strings" "github.com/router-for-me/CLIProxyAPI/v6/internal/registry" @@ -111,7 +110,7 @@ const ( // detectModelCapability determines the thinking format capability of a model. // -// This is an internal function used by NormalizeForModel to decide conversion strategy. +// This is an internal function used by validation and conversion helpers. // It analyzes the model's ThinkingSupport configuration to classify the model: // - CapabilityNone: modelInfo.Thinking is nil (model doesn't support thinking) // - CapabilityBudgetOnly: Has Min/Max but no Levels (Claude, Gemini 2.5) @@ -141,93 +140,3 @@ func detectModelCapability(modelInfo *registry.ModelInfo) ModelCapability { return CapabilityNone } } - -// normalizeMixedConfig resolves a thinking configuration when both budget and level -// might be present, applying priority rules. -// -// Priority rules (Level takes precedence over Budget): -// - If level is non-empty: use level (special handling for "auto" and "none") -// - If level is empty and budget is set: use budget -// - If neither is set (budget=0, level=""): return ModeNone -// -// This function is used internally to handle ambiguous input configurations. -func normalizeMixedConfig(budget int, level string) ThinkingConfig { - normalizedLevel := strings.ToLower(strings.TrimSpace(level)) - if normalizedLevel != "" { - switch normalizedLevel { - case string(LevelAuto): - return ThinkingConfig{Mode: ModeAuto, Budget: -1, Level: ThinkingLevel(normalizedLevel)} - case string(LevelNone): - return ThinkingConfig{Mode: ModeNone, Budget: 0, Level: ThinkingLevel(normalizedLevel)} - default: - return ThinkingConfig{Mode: ModeLevel, Level: ThinkingLevel(normalizedLevel)} - } - } - switch budget { - case -1: - return ThinkingConfig{Mode: ModeAuto, Budget: -1} - case 0: - return ThinkingConfig{Mode: ModeNone, Budget: 0} - default: - return ThinkingConfig{Mode: ModeBudget, Budget: budget} - } -} - -// NormalizeForModel normalizes a thinking configuration for a specific model. -// -// This function converts the configuration format based on model capabilities: -// - Budget-only models (Claude, Gemini 2.5): Level → Budget conversion -// - Level-only models (OpenAI, iFlow): Budget → Level conversion -// - Hybrid models (Gemini 3): preserve the original format -// - No thinking support (Thinking is nil): degrade to ModeNone -// - Unknown model (modelInfo is nil): passthrough (preserve original format) -// -// Parameters: -// - config: The thinking configuration to normalize (must not be nil) -// - modelInfo: Model registry information containing ThinkingSupport properties -// -// Returns: -// - Normalized ThinkingConfig suitable for the model -// - Error if conversion fails (e.g., unsupported level or invalid budget) -func NormalizeForModel(config *ThinkingConfig, modelInfo *registry.ModelInfo) (*ThinkingConfig, error) { - if config == nil { - return nil, fmt.Errorf("thinking config is nil") - } - - normalized := *config - capability := detectModelCapability(modelInfo) - - // If model doesn't support thinking, degrade to ModeNone - if capability == CapabilityNone && config.Mode != ModeNone && config.Mode != ModeAuto { - return &ThinkingConfig{Mode: ModeNone, Budget: 0}, nil - } - - switch config.Mode { - case ModeAuto, ModeNone: - return &normalized, nil - case ModeBudget: - if capability == CapabilityLevelOnly { - level, ok := ConvertBudgetToLevel(config.Budget) - if !ok { - return nil, fmt.Errorf("invalid budget: %d", config.Budget) - } - normalized.Mode = ModeLevel - normalized.Level = ThinkingLevel(level) - normalized.Budget = 0 - } - return &normalized, nil - case ModeLevel: - if capability == CapabilityBudgetOnly { - budget, ok := ConvertLevelToBudget(string(config.Level)) - if !ok { - return nil, fmt.Errorf("unknown level: %s", config.Level) - } - normalized.Mode = ModeBudget - normalized.Budget = budget - normalized.Level = "" - } - return &normalized, nil - default: - return &normalized, nil - } -} diff --git a/internal/thinking/suffix.go b/internal/thinking/suffix.go index e3b4087e..275c0856 100644 --- a/internal/thinking/suffix.go +++ b/internal/thinking/suffix.go @@ -5,7 +5,6 @@ package thinking import ( - "fmt" "strconv" "strings" ) @@ -44,29 +43,6 @@ func ParseSuffix(model string) SuffixResult { } } -// ParseSuffixWithError extracts thinking suffix and returns an error on invalid format. -// -// Invalid format cases: -// - Contains "(" but does not end with ")" -// - Contains ")" without any "(" -// -// The error message includes the original input for debugging context. -func ParseSuffixWithError(model string) (SuffixResult, error) { - lastOpen := strings.LastIndex(model, "(") - if lastOpen == -1 { - if strings.Contains(model, ")") { - return SuffixResult{ModelName: model, HasSuffix: false}, NewThinkingError(ErrInvalidSuffix, fmt.Sprintf("invalid suffix format: %s", model)) - } - return SuffixResult{ModelName: model, HasSuffix: false}, nil - } - - if !strings.HasSuffix(model, ")") { - return SuffixResult{ModelName: model, HasSuffix: false}, NewThinkingError(ErrInvalidSuffix, fmt.Sprintf("invalid suffix format: %s", model)) - } - - return ParseSuffix(model), nil -} - // ParseNumericSuffix attempts to parse a raw suffix as a numeric budget value. // // This function parses the raw suffix content (from ParseSuffix.RawSuffix) as an integer. diff --git a/internal/thinking/text.go b/internal/thinking/text.go new file mode 100644 index 00000000..eed1ba28 --- /dev/null +++ b/internal/thinking/text.go @@ -0,0 +1,41 @@ +package thinking + +import ( + "github.com/tidwall/gjson" +) + +// GetThinkingText extracts the thinking text from a content part. +// Handles various formats: +// - Simple string: { "thinking": "text" } or { "text": "text" } +// - Wrapped object: { "thinking": { "text": "text", "cache_control": {...} } } +// - Gemini-style: { "thought": true, "text": "text" } +// Returns the extracted text string. +func GetThinkingText(part gjson.Result) string { + // Try direct text field first (Gemini-style) + if text := part.Get("text"); text.Exists() && text.Type == gjson.String { + return text.String() + } + + // Try thinking field + thinkingField := part.Get("thinking") + if !thinkingField.Exists() { + return "" + } + + // thinking is a string + if thinkingField.Type == gjson.String { + return thinkingField.String() + } + + // thinking is an object with inner text/thinking + if thinkingField.IsObject() { + if inner := thinkingField.Get("text"); inner.Exists() && inner.Type == gjson.String { + return inner.String() + } + if inner := thinkingField.Get("thinking"); inner.Exists() && inner.Type == gjson.String { + return inner.String() + } + } + + return "" +} diff --git a/internal/translator/antigravity/claude/antigravity_claude_request.go b/internal/translator/antigravity/claude/antigravity_claude_request.go index c3e4c63f..593ae8f6 100644 --- a/internal/translator/antigravity/claude/antigravity_claude_request.go +++ b/internal/translator/antigravity/claude/antigravity_claude_request.go @@ -13,6 +13,7 @@ import ( "github.com/router-for-me/CLIProxyAPI/v6/internal/cache" "github.com/router-for-me/CLIProxyAPI/v6/internal/registry" + "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking" "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/common" "github.com/router-for-me/CLIProxyAPI/v6/internal/util" "github.com/tidwall/gjson" @@ -123,7 +124,7 @@ func ConvertClaudeRequestToAntigravity(modelName string, inputRawJSON []byte, _ contentTypeResult := contentResult.Get("type") if contentTypeResult.Type == gjson.String && contentTypeResult.String() == "thinking" { // Use GetThinkingText to handle wrapped thinking objects - thinkingText := util.GetThinkingText(contentResult) + thinkingText := thinking.GetThinkingText(contentResult) signatureResult := contentResult.Get("signature") clientSignature := "" if signatureResult.Exists() && signatureResult.String() != "" { diff --git a/internal/translator/antigravity/openai/chat-completions/antigravity_openai_request.go b/internal/translator/antigravity/openai/chat-completions/antigravity_openai_request.go index 87782a5a..7cfaa6e9 100644 --- a/internal/translator/antigravity/openai/chat-completions/antigravity_openai_request.go +++ b/internal/translator/antigravity/openai/chat-completions/antigravity_openai_request.go @@ -36,33 +36,27 @@ func ConvertOpenAIRequestToAntigravity(modelName string, inputRawJSON []byte, _ // Model out, _ = sjson.SetBytes(out, "model", modelName) - // Reasoning effort -> thinkingBudget/include_thoughts - // Note: OpenAI official fields take precedence over extra_body.google.thinking_config + // Apply thinking configuration: convert OpenAI reasoning_effort to Gemini CLI thinkingConfig. + // Inline translation-only mapping; capability checks happen later in ApplyThinking. + modelInfo := registry.LookupModelInfo(modelName) re := gjson.GetBytes(rawJSON, "reasoning_effort") - hasOfficialThinking := re.Exists() - modelInfo := registry.GetGlobalRegistry().GetModelInfo(modelName) - if hasOfficialThinking && modelInfo != nil && modelInfo.Thinking != nil { + if re.Exists() { effort := strings.ToLower(strings.TrimSpace(re.String())) - if util.IsGemini3Model(modelName) { - switch effort { - case "none": - out, _ = sjson.DeleteBytes(out, "request.generationConfig.thinkingConfig") - case "auto": - includeThoughts := true - out = util.ApplyGeminiCLIThinkingLevel(out, "", &includeThoughts) - default: - if level, ok := util.ValidateGemini3ThinkingLevel(modelName, effort); ok { - out = util.ApplyGeminiCLIThinkingLevel(out, level, nil) - } + if effort != "" { + thinkingPath := "request.generationConfig.thinkingConfig" + if effort == "auto" { + out, _ = sjson.SetBytes(out, thinkingPath+".thinkingBudget", -1) + out, _ = sjson.SetBytes(out, thinkingPath+".includeThoughts", true) + } else { + out, _ = sjson.SetBytes(out, thinkingPath+".thinkingLevel", effort) + out, _ = sjson.SetBytes(out, thinkingPath+".includeThoughts", effort != "none") } - } else if len(modelInfo.Thinking.Levels) == 0 { - out = util.ApplyReasoningEffortToGeminiCLI(out, effort) } } // Cherry Studio extension extra_body.google.thinking_config (effective only when official fields are absent) // Only apply for models that use numeric budgets, not discrete levels. - if !hasOfficialThinking && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { + if !re.Exists() && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { if tc := gjson.GetBytes(rawJSON, "extra_body.google.thinking_config"); tc.Exists() && tc.IsObject() { var setBudget bool var budget int diff --git a/internal/translator/gemini-cli/openai/chat-completions/gemini-cli_openai_request.go b/internal/translator/gemini-cli/openai/chat-completions/gemini-cli_openai_request.go index 1a6505d0..09d1dea7 100644 --- a/internal/translator/gemini-cli/openai/chat-completions/gemini-cli_openai_request.go +++ b/internal/translator/gemini-cli/openai/chat-completions/gemini-cli_openai_request.go @@ -36,18 +36,27 @@ func ConvertOpenAIRequestToGeminiCLI(modelName string, inputRawJSON []byte, _ bo // Model out, _ = sjson.SetBytes(out, "model", modelName) - // Reasoning effort -> thinkingBudget/include_thoughts - // Note: OpenAI official fields take precedence over extra_body.google.thinking_config + // Apply thinking configuration: convert OpenAI reasoning_effort to Gemini CLI thinkingConfig. + // Inline translation-only mapping; capability checks happen later in ApplyThinking. re := gjson.GetBytes(rawJSON, "reasoning_effort") - hasOfficialThinking := re.Exists() - modelInfo := registry.GetGlobalRegistry().GetModelInfo(modelName) - if hasOfficialThinking && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { - out = util.ApplyReasoningEffortToGeminiCLI(out, re.String()) + modelInfo := registry.LookupModelInfo(modelName) + if re.Exists() { + effort := strings.ToLower(strings.TrimSpace(re.String())) + if effort != "" { + thinkingPath := "request.generationConfig.thinkingConfig" + if effort == "auto" { + out, _ = sjson.SetBytes(out, thinkingPath+".thinkingBudget", -1) + out, _ = sjson.SetBytes(out, thinkingPath+".includeThoughts", true) + } else { + out, _ = sjson.SetBytes(out, thinkingPath+".thinkingLevel", effort) + out, _ = sjson.SetBytes(out, thinkingPath+".includeThoughts", effort != "none") + } + } } // Cherry Studio extension extra_body.google.thinking_config (effective only when official fields are absent) // Only apply for models that use numeric budgets, not discrete levels. - if !hasOfficialThinking && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { + if !re.Exists() && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { if tc := gjson.GetBytes(rawJSON, "extra_body.google.thinking_config"); tc.Exists() && tc.IsObject() { var setBudget bool var budget int diff --git a/internal/translator/gemini/openai/chat-completions/gemini_openai_request.go b/internal/translator/gemini/openai/chat-completions/gemini_openai_request.go index 2328ad36..6e1a5014 100644 --- a/internal/translator/gemini/openai/chat-completions/gemini_openai_request.go +++ b/internal/translator/gemini/openai/chat-completions/gemini_openai_request.go @@ -36,36 +36,27 @@ func ConvertOpenAIRequestToGemini(modelName string, inputRawJSON []byte, _ bool) // Model out, _ = sjson.SetBytes(out, "model", modelName) - // Reasoning effort -> thinkingBudget/include_thoughts - // Note: OpenAI official fields take precedence over extra_body.google.thinking_config - // Only apply numeric budgets for models that use budgets (not discrete levels) to avoid - // incorrectly applying thinkingBudget for level-based models like gpt-5. Gemini 3 models - // use thinkingLevel/includeThoughts instead. + // Apply thinking configuration: convert OpenAI reasoning_effort to Gemini thinkingConfig. + // Inline translation-only mapping; capability checks happen later in ApplyThinking. + modelInfo := registry.LookupModelInfo(modelName) re := gjson.GetBytes(rawJSON, "reasoning_effort") - hasOfficialThinking := re.Exists() - modelInfo := registry.GetGlobalRegistry().GetModelInfo(modelName) - if hasOfficialThinking && modelInfo != nil && modelInfo.Thinking != nil { + if re.Exists() { effort := strings.ToLower(strings.TrimSpace(re.String())) - if util.IsGemini3Model(modelName) { - switch effort { - case "none": - out, _ = sjson.DeleteBytes(out, "generationConfig.thinkingConfig") - case "auto": - includeThoughts := true - out = util.ApplyGeminiThinkingLevel(out, "", &includeThoughts) - default: - if level, ok := util.ValidateGemini3ThinkingLevel(modelName, effort); ok { - out = util.ApplyGeminiThinkingLevel(out, level, nil) - } + if effort != "" { + thinkingPath := "generationConfig.thinkingConfig" + if effort == "auto" { + out, _ = sjson.SetBytes(out, thinkingPath+".thinkingBudget", -1) + out, _ = sjson.SetBytes(out, thinkingPath+".includeThoughts", true) + } else { + out, _ = sjson.SetBytes(out, thinkingPath+".thinkingLevel", effort) + out, _ = sjson.SetBytes(out, thinkingPath+".includeThoughts", effort != "none") } - } else if len(modelInfo.Thinking.Levels) == 0 { - out = util.ApplyReasoningEffortToGemini(out, effort) } } // Cherry Studio extension extra_body.google.thinking_config (effective only when official fields are absent) // Only apply for models that use numeric budgets, not discrete levels. - if !hasOfficialThinking && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { + if !re.Exists() && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { if tc := gjson.GetBytes(rawJSON, "extra_body.google.thinking_config"); tc.Exists() && tc.IsObject() { var setBudget bool var budget int diff --git a/internal/translator/gemini/openai/responses/gemini_openai-responses_request.go b/internal/translator/gemini/openai/responses/gemini_openai-responses_request.go index 62e85eef..81bb7d40 100644 --- a/internal/translator/gemini/openai/responses/gemini_openai-responses_request.go +++ b/internal/translator/gemini/openai/responses/gemini_openai-responses_request.go @@ -6,7 +6,6 @@ import ( "github.com/router-for-me/CLIProxyAPI/v6/internal/registry" "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/common" - "github.com/router-for-me/CLIProxyAPI/v6/internal/util" "github.com/tidwall/gjson" "github.com/tidwall/sjson" ) @@ -389,18 +388,27 @@ func ConvertOpenAIResponsesRequestToGemini(modelName string, inputRawJSON []byte out, _ = sjson.Set(out, "generationConfig.stopSequences", sequences) } - // OpenAI official reasoning fields take precedence - // Only convert for models that use numeric budgets (not discrete levels). - hasOfficialThinking := root.Get("reasoning.effort").Exists() - modelInfo := registry.GetGlobalRegistry().GetModelInfo(modelName) - if hasOfficialThinking && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { - reasoningEffort := root.Get("reasoning.effort") - out = string(util.ApplyReasoningEffortToGemini([]byte(out), reasoningEffort.String())) + // Apply thinking configuration: convert OpenAI Responses API reasoning.effort to Gemini thinkingConfig. + // Inline translation-only mapping; capability checks happen later in ApplyThinking. + modelInfo := registry.LookupModelInfo(modelName) + re := root.Get("reasoning.effort") + if re.Exists() { + effort := strings.ToLower(strings.TrimSpace(re.String())) + if effort != "" { + thinkingPath := "generationConfig.thinkingConfig" + if effort == "auto" { + out, _ = sjson.Set(out, thinkingPath+".thinkingBudget", -1) + out, _ = sjson.Set(out, thinkingPath+".includeThoughts", true) + } else { + out, _ = sjson.Set(out, thinkingPath+".thinkingLevel", effort) + out, _ = sjson.Set(out, thinkingPath+".includeThoughts", effort != "none") + } + } } // Cherry Studio extension (applies only when official fields are missing) // Only apply for models that use numeric budgets, not discrete levels. - if !hasOfficialThinking && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { + if !re.Exists() && modelInfo != nil && modelInfo.Thinking != nil && len(modelInfo.Thinking.Levels) == 0 { if tc := root.Get("extra_body.google.thinking_config"); tc.Exists() && tc.IsObject() { var setBudget bool var budget int diff --git a/internal/translator/openai/claude/openai_claude_request.go b/internal/translator/openai/claude/openai_claude_request.go index 44cb237e..3817b77b 100644 --- a/internal/translator/openai/claude/openai_claude_request.go +++ b/internal/translator/openai/claude/openai_claude_request.go @@ -10,7 +10,6 @@ import ( "strings" "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking" - "github.com/router-for-me/CLIProxyAPI/v6/internal/util" "github.com/tidwall/gjson" "github.com/tidwall/sjson" ) @@ -130,7 +129,7 @@ func ConvertClaudeRequestToOpenAI(modelName string, inputRawJSON []byte, stream case "thinking": // Only map thinking to reasoning_content for assistant messages (security: prevent injection) if role == "assistant" { - thinkingText := util.GetThinkingText(part) + thinkingText := thinking.GetThinkingText(part) // Skip empty or whitespace-only thinking if strings.TrimSpace(thinkingText) != "" { reasoningParts = append(reasoningParts, thinkingText) diff --git a/internal/util/gemini_thinking.go b/internal/util/gemini_thinking.go deleted file mode 100644 index 838def6e..00000000 --- a/internal/util/gemini_thinking.go +++ /dev/null @@ -1,513 +0,0 @@ -package util - -import ( - "regexp" - "strings" - - "github.com/tidwall/gjson" - "github.com/tidwall/sjson" -) - -// Gemini model family detection patterns -var ( - gemini3Pattern = regexp.MustCompile(`(?i)^gemini[_-]?3[_-]`) - gemini3ProPattern = regexp.MustCompile(`(?i)^gemini[_-]?3[_-]pro`) - gemini3FlashPattern = regexp.MustCompile(`(?i)^gemini[_-]?3[_-]flash`) - gemini25Pattern = regexp.MustCompile(`(?i)^gemini[_-]?2\.5[_-]`) -) - -// IsGemini3Model returns true if the model is a Gemini 3 family model. -// Gemini 3 models should use thinkingLevel (string) instead of thinkingBudget (number). -func IsGemini3Model(model string) bool { - return gemini3Pattern.MatchString(model) -} - -// IsGemini3ProModel returns true if the model is a Gemini 3 Pro variant. -// Gemini 3 Pro supports thinkingLevel: "low", "high" (default: "high") -func IsGemini3ProModel(model string) bool { - return gemini3ProPattern.MatchString(model) -} - -// IsGemini3FlashModel returns true if the model is a Gemini 3 Flash variant. -// Gemini 3 Flash supports thinkingLevel: "minimal", "low", "medium", "high" (default: "high") -func IsGemini3FlashModel(model string) bool { - return gemini3FlashPattern.MatchString(model) -} - -// IsGemini25Model returns true if the model is a Gemini 2.5 family model. -// Gemini 2.5 models should use thinkingBudget (number). -func IsGemini25Model(model string) bool { - return gemini25Pattern.MatchString(model) -} - -// Gemini3ProThinkingLevels are the valid thinkingLevel values for Gemini 3 Pro models. -var Gemini3ProThinkingLevels = []string{"low", "high"} - -// Gemini3FlashThinkingLevels are the valid thinkingLevel values for Gemini 3 Flash models. -var Gemini3FlashThinkingLevels = []string{"minimal", "low", "medium", "high"} - -func ApplyGeminiThinkingConfig(body []byte, budget *int, includeThoughts *bool) []byte { - if budget == nil && includeThoughts == nil { - return body - } - updated := body - if budget != nil { - valuePath := "generationConfig.thinkingConfig.thinkingBudget" - rewritten, err := sjson.SetBytes(updated, valuePath, *budget) - if err == nil { - updated = rewritten - } - } - // Default to including thoughts when a budget override is present but no explicit include flag is provided. - incl := includeThoughts - if incl == nil && budget != nil && *budget != 0 { - defaultInclude := true - incl = &defaultInclude - } - if incl != nil { - if !gjson.GetBytes(updated, "generationConfig.thinkingConfig.includeThoughts").Exists() && - !gjson.GetBytes(updated, "generationConfig.thinkingConfig.include_thoughts").Exists() { - valuePath := "generationConfig.thinkingConfig.include_thoughts" - rewritten, err := sjson.SetBytes(updated, valuePath, *incl) - if err == nil { - updated = rewritten - } - } - } - return updated -} - -func ApplyGeminiCLIThinkingConfig(body []byte, budget *int, includeThoughts *bool) []byte { - if budget == nil && includeThoughts == nil { - return body - } - updated := body - if budget != nil { - valuePath := "request.generationConfig.thinkingConfig.thinkingBudget" - rewritten, err := sjson.SetBytes(updated, valuePath, *budget) - if err == nil { - updated = rewritten - } - } - // Default to including thoughts when a budget override is present but no explicit include flag is provided. - incl := includeThoughts - if incl == nil && budget != nil && *budget != 0 { - defaultInclude := true - incl = &defaultInclude - } - if incl != nil { - if !gjson.GetBytes(updated, "request.generationConfig.thinkingConfig.includeThoughts").Exists() && - !gjson.GetBytes(updated, "request.generationConfig.thinkingConfig.include_thoughts").Exists() { - valuePath := "request.generationConfig.thinkingConfig.include_thoughts" - rewritten, err := sjson.SetBytes(updated, valuePath, *incl) - if err == nil { - updated = rewritten - } - } - } - return updated -} - -// ApplyGeminiThinkingLevel applies thinkingLevel config for Gemini 3 models. -// For standard Gemini API format (generationConfig.thinkingConfig path). -// Per Google's documentation, Gemini 3 models should use thinkingLevel instead of thinkingBudget. -func ApplyGeminiThinkingLevel(body []byte, level string, includeThoughts *bool) []byte { - if level == "" && includeThoughts == nil { - return body - } - updated := body - if level != "" { - valuePath := "generationConfig.thinkingConfig.thinkingLevel" - rewritten, err := sjson.SetBytes(updated, valuePath, level) - if err == nil { - updated = rewritten - } - } - // Default to including thoughts when a level is set but no explicit include flag is provided. - incl := includeThoughts - if incl == nil && level != "" { - defaultInclude := true - incl = &defaultInclude - } - if incl != nil { - if !gjson.GetBytes(updated, "generationConfig.thinkingConfig.includeThoughts").Exists() && - !gjson.GetBytes(updated, "generationConfig.thinkingConfig.include_thoughts").Exists() { - valuePath := "generationConfig.thinkingConfig.includeThoughts" - rewritten, err := sjson.SetBytes(updated, valuePath, *incl) - if err == nil { - updated = rewritten - } - } - } - if tb := gjson.GetBytes(body, "generationConfig.thinkingConfig.thinkingBudget"); tb.Exists() { - updated, _ = sjson.DeleteBytes(updated, "generationConfig.thinkingConfig.thinkingBudget") - } - return updated -} - -// ApplyGeminiCLIThinkingLevel applies thinkingLevel config for Gemini 3 models. -// For Gemini CLI API format (request.generationConfig.thinkingConfig path). -// Per Google's documentation, Gemini 3 models should use thinkingLevel instead of thinkingBudget. -func ApplyGeminiCLIThinkingLevel(body []byte, level string, includeThoughts *bool) []byte { - if level == "" && includeThoughts == nil { - return body - } - updated := body - if level != "" { - valuePath := "request.generationConfig.thinkingConfig.thinkingLevel" - rewritten, err := sjson.SetBytes(updated, valuePath, level) - if err == nil { - updated = rewritten - } - } - // Default to including thoughts when a level is set but no explicit include flag is provided. - incl := includeThoughts - if incl == nil && level != "" { - defaultInclude := true - incl = &defaultInclude - } - if incl != nil { - if !gjson.GetBytes(updated, "request.generationConfig.thinkingConfig.includeThoughts").Exists() && - !gjson.GetBytes(updated, "request.generationConfig.thinkingConfig.include_thoughts").Exists() { - valuePath := "request.generationConfig.thinkingConfig.includeThoughts" - rewritten, err := sjson.SetBytes(updated, valuePath, *incl) - if err == nil { - updated = rewritten - } - } - } - if tb := gjson.GetBytes(body, "request.generationConfig.thinkingConfig.thinkingBudget"); tb.Exists() { - updated, _ = sjson.DeleteBytes(updated, "request.generationConfig.thinkingConfig.thinkingBudget") - } - return updated -} - -// ValidateGemini3ThinkingLevel validates that the thinkingLevel is valid for the Gemini 3 model variant. -// Returns the validated level (normalized to lowercase) and true if valid, or empty string and false if invalid. -func ValidateGemini3ThinkingLevel(model, level string) (string, bool) { - if level == "" { - return "", false - } - normalized := strings.ToLower(strings.TrimSpace(level)) - - var validLevels []string - if IsGemini3ProModel(model) { - validLevels = Gemini3ProThinkingLevels - } else if IsGemini3FlashModel(model) { - validLevels = Gemini3FlashThinkingLevels - } else if IsGemini3Model(model) { - // Unknown Gemini 3 variant - allow all levels as fallback - validLevels = Gemini3FlashThinkingLevels - } else { - return "", false - } - - for _, valid := range validLevels { - if normalized == valid { - return normalized, true - } - } - return "", false -} - -// ThinkingBudgetToGemini3Level converts a thinkingBudget to a thinkingLevel for Gemini 3 models. -// This provides backward compatibility when thinkingBudget is provided for Gemini 3 models. -// Returns the appropriate thinkingLevel and true if conversion is possible. -func ThinkingBudgetToGemini3Level(model string, budget int) (string, bool) { - if !IsGemini3Model(model) { - return "", false - } - - // Map budget to level based on Google's documentation - // Gemini 3 Pro: "low", "high" (default: "high") - // Gemini 3 Flash: "minimal", "low", "medium", "high" (default: "high") - switch { - case budget == -1: - // Dynamic budget maps to "high" (API default) - return "high", true - case budget == 0: - // Zero budget - Gemini 3 doesn't support disabling thinking - // Map to lowest available level - if IsGemini3FlashModel(model) { - return "minimal", true - } - return "low", true - case budget > 0 && budget <= 512: - if IsGemini3FlashModel(model) { - return "minimal", true - } - return "low", true - case budget <= 1024: - return "low", true - case budget <= 8192: - if IsGemini3FlashModel(model) { - return "medium", true - } - return "low", true // Pro doesn't have medium, use low - default: - return "high", true - } -} - -// modelsWithDefaultThinking lists models that should have thinking enabled by default -// when no explicit thinkingConfig is provided. -// Note: Gemini 3 models are NOT included here because per Google's official documentation: -// - thinkingLevel defaults to "high" (dynamic thinking) -// - includeThoughts defaults to false -// -// We should not override these API defaults; let users explicitly configure if needed. -var modelsWithDefaultThinking = map[string]bool{ - // "gemini-3-pro-preview": true, - // "gemini-3-pro-image-preview": true, - // "gemini-3-flash-preview": true, -} - -// ModelHasDefaultThinking returns true if the model should have thinking enabled by default. -func ModelHasDefaultThinking(model string) bool { - return modelsWithDefaultThinking[model] -} - -// ApplyDefaultThinkingIfNeeded injects default thinkingConfig for models that require it. -// For standard Gemini API format (generationConfig.thinkingConfig path). -// Returns the modified body if thinkingConfig was added, otherwise returns the original. -// For Gemini 3 models, uses thinkingLevel instead of thinkingBudget per Google's documentation. -func ApplyDefaultThinkingIfNeeded(model string, body []byte) []byte { - if !ModelHasDefaultThinking(model) { - return body - } - if gjson.GetBytes(body, "generationConfig.thinkingConfig").Exists() { - return body - } - // Gemini 3 models use thinkingLevel instead of thinkingBudget - if IsGemini3Model(model) { - // Don't set a default - let the API use its dynamic default ("high") - // Only set includeThoughts - updated, _ := sjson.SetBytes(body, "generationConfig.thinkingConfig.includeThoughts", true) - return updated - } - // Gemini 2.5 and other models use thinkingBudget - updated, _ := sjson.SetBytes(body, "generationConfig.thinkingConfig.thinkingBudget", -1) - updated, _ = sjson.SetBytes(updated, "generationConfig.thinkingConfig.include_thoughts", true) - return updated -} - -// StripThinkingConfigIfUnsupported removes thinkingConfig from the request body -// when the target model does not advertise Thinking capability. It cleans both -// standard Gemini and Gemini CLI JSON envelopes. This acts as a final safety net -// in case upstream injected thinking for an unsupported model. -func StripThinkingConfigIfUnsupported(model string, body []byte) []byte { - if ModelSupportsThinking(model) || len(body) == 0 { - return body - } - updated := body - // Gemini CLI path - updated, _ = sjson.DeleteBytes(updated, "request.generationConfig.thinkingConfig") - // Standard Gemini path - updated, _ = sjson.DeleteBytes(updated, "generationConfig.thinkingConfig") - return updated -} - -// NormalizeGeminiThinkingBudget normalizes the thinkingBudget value in a standard Gemini -// request body (generationConfig.thinkingConfig.thinkingBudget path). -// For Gemini 3 models, converts thinkingBudget to thinkingLevel per Google's documentation, -// unless skipGemini3Check is provided and true. -func NormalizeGeminiThinkingBudget(model string, body []byte, skipGemini3Check ...bool) []byte { - const budgetPath = "generationConfig.thinkingConfig.thinkingBudget" - const levelPath = "generationConfig.thinkingConfig.thinkingLevel" - - budget := gjson.GetBytes(body, budgetPath) - if !budget.Exists() { - return body - } - - // For Gemini 3 models, convert thinkingBudget to thinkingLevel - skipGemini3 := len(skipGemini3Check) > 0 && skipGemini3Check[0] - if IsGemini3Model(model) && !skipGemini3 { - if level, ok := ThinkingBudgetToGemini3Level(model, int(budget.Int())); ok { - updated, _ := sjson.SetBytes(body, levelPath, level) - updated, _ = sjson.DeleteBytes(updated, budgetPath) - return updated - } - // If conversion fails, just remove the budget (let API use default) - updated, _ := sjson.DeleteBytes(body, budgetPath) - return updated - } - - // For Gemini 2.5 and other models, normalize the budget value - normalized := NormalizeThinkingBudget(model, int(budget.Int())) - updated, _ := sjson.SetBytes(body, budgetPath, normalized) - return updated -} - -// NormalizeGeminiCLIThinkingBudget normalizes the thinkingBudget value in a Gemini CLI -// request body (request.generationConfig.thinkingConfig.thinkingBudget path). -// For Gemini 3 models, converts thinkingBudget to thinkingLevel per Google's documentation, -// unless skipGemini3Check is provided and true. -func NormalizeGeminiCLIThinkingBudget(model string, body []byte, skipGemini3Check ...bool) []byte { - const budgetPath = "request.generationConfig.thinkingConfig.thinkingBudget" - const levelPath = "request.generationConfig.thinkingConfig.thinkingLevel" - - budget := gjson.GetBytes(body, budgetPath) - if !budget.Exists() { - return body - } - - // For Gemini 3 models, convert thinkingBudget to thinkingLevel - skipGemini3 := len(skipGemini3Check) > 0 && skipGemini3Check[0] - if IsGemini3Model(model) && !skipGemini3 { - if level, ok := ThinkingBudgetToGemini3Level(model, int(budget.Int())); ok { - updated, _ := sjson.SetBytes(body, levelPath, level) - updated, _ = sjson.DeleteBytes(updated, budgetPath) - return updated - } - // If conversion fails, just remove the budget (let API use default) - updated, _ := sjson.DeleteBytes(body, budgetPath) - return updated - } - - // For Gemini 2.5 and other models, normalize the budget value - normalized := NormalizeThinkingBudget(model, int(budget.Int())) - updated, _ := sjson.SetBytes(body, budgetPath, normalized) - return updated -} - -// ReasoningEffortBudgetMapping defines the thinkingBudget values for each reasoning effort level. -var ReasoningEffortBudgetMapping = map[string]int{ - "none": 0, - "auto": -1, - "minimal": 512, - "low": 1024, - "medium": 8192, - "high": 24576, - "xhigh": 32768, -} - -// ApplyReasoningEffortToGemini applies OpenAI reasoning_effort to Gemini thinkingConfig -// for standard Gemini API format (generationConfig.thinkingConfig path). -// Returns the modified body with thinkingBudget and include_thoughts set. -func ApplyReasoningEffortToGemini(body []byte, effort string) []byte { - normalized := strings.ToLower(strings.TrimSpace(effort)) - if normalized == "" { - return body - } - - budgetPath := "generationConfig.thinkingConfig.thinkingBudget" - includePath := "generationConfig.thinkingConfig.include_thoughts" - - if normalized == "none" { - body, _ = sjson.DeleteBytes(body, "generationConfig.thinkingConfig") - return body - } - - budget, ok := ReasoningEffortBudgetMapping[normalized] - if !ok { - return body - } - - body, _ = sjson.SetBytes(body, budgetPath, budget) - body, _ = sjson.SetBytes(body, includePath, true) - return body -} - -// ApplyReasoningEffortToGeminiCLI applies OpenAI reasoning_effort to Gemini CLI thinkingConfig -// for Gemini CLI API format (request.generationConfig.thinkingConfig path). -// Returns the modified body with thinkingBudget and include_thoughts set. -func ApplyReasoningEffortToGeminiCLI(body []byte, effort string) []byte { - normalized := strings.ToLower(strings.TrimSpace(effort)) - if normalized == "" { - return body - } - - budgetPath := "request.generationConfig.thinkingConfig.thinkingBudget" - includePath := "request.generationConfig.thinkingConfig.include_thoughts" - - if normalized == "none" { - body, _ = sjson.DeleteBytes(body, "request.generationConfig.thinkingConfig") - return body - } - - budget, ok := ReasoningEffortBudgetMapping[normalized] - if !ok { - return body - } - - body, _ = sjson.SetBytes(body, budgetPath, budget) - body, _ = sjson.SetBytes(body, includePath, true) - return body -} - -// ConvertThinkingLevelToBudget checks for "generationConfig.thinkingConfig.thinkingLevel" -// and converts it to "thinkingBudget" for Gemini 2.5 models. -// For Gemini 3 models, preserves thinkingLevel unless skipGemini3Check is provided and true. -// Mappings for Gemini 2.5: -// - "high" -> 32768 -// - "medium" -> 8192 -// - "low" -> 1024 -// - "minimal" -> 512 -// -// It removes "thinkingLevel" after conversion (for Gemini 2.5 only). -func ConvertThinkingLevelToBudget(body []byte, model string, skipGemini3Check ...bool) []byte { - levelPath := "generationConfig.thinkingConfig.thinkingLevel" - res := gjson.GetBytes(body, levelPath) - if !res.Exists() { - return body - } - - // For Gemini 3 models, preserve thinkingLevel unless explicitly skipped - skipGemini3 := len(skipGemini3Check) > 0 && skipGemini3Check[0] - if IsGemini3Model(model) && !skipGemini3 { - return body - } - - budget, ok := ThinkingLevelToBudget(res.String()) - if !ok { - updated, _ := sjson.DeleteBytes(body, levelPath) - return updated - } - - budgetPath := "generationConfig.thinkingConfig.thinkingBudget" - updated, err := sjson.SetBytes(body, budgetPath, budget) - if err != nil { - return body - } - - updated, err = sjson.DeleteBytes(updated, levelPath) - if err != nil { - return body - } - return updated -} - -// ConvertThinkingLevelToBudgetCLI checks for "request.generationConfig.thinkingConfig.thinkingLevel" -// and converts it to "thinkingBudget" for Gemini 2.5 models. -// For Gemini 3 models, preserves thinkingLevel as-is (does not convert). -func ConvertThinkingLevelToBudgetCLI(body []byte, model string) []byte { - levelPath := "request.generationConfig.thinkingConfig.thinkingLevel" - res := gjson.GetBytes(body, levelPath) - if !res.Exists() { - return body - } - - // For Gemini 3 models, preserve thinkingLevel - don't convert to budget - if IsGemini3Model(model) { - return body - } - - budget, ok := ThinkingLevelToBudget(res.String()) - if !ok { - updated, _ := sjson.DeleteBytes(body, levelPath) - return updated - } - - budgetPath := "request.generationConfig.thinkingConfig.thinkingBudget" - updated, err := sjson.SetBytes(body, budgetPath, budget) - if err != nil { - return body - } - - updated, err = sjson.DeleteBytes(updated, levelPath) - if err != nil { - return body - } - return updated -} diff --git a/internal/util/thinking.go b/internal/util/thinking.go deleted file mode 100644 index 657a1ff1..00000000 --- a/internal/util/thinking.go +++ /dev/null @@ -1,122 +0,0 @@ -package util - -import ( - "strings" - - "github.com/router-for-me/CLIProxyAPI/v6/internal/registry" -) - -// ModelSupportsThinking reports whether the given model has Thinking capability -// according to the model registry metadata (provider-agnostic). -// -// Deprecated: Use thinking.ApplyThinking with modelInfo.Thinking check. -func ModelSupportsThinking(model string) bool { - if model == "" { - return false - } - // First check the global dynamic registry - if info := registry.GetGlobalRegistry().GetModelInfo(model); info != nil { - return info.Thinking != nil - } - // Fallback: check static model definitions - if info := registry.LookupStaticModelInfo(model); info != nil { - return info.Thinking != nil - } - // Fallback: check Antigravity static config - if cfg := registry.GetAntigravityModelConfig()[model]; cfg != nil { - return cfg.Thinking != nil - } - return false -} - -// NormalizeThinkingBudget clamps the requested thinking budget to the -// supported range for the specified model using registry metadata only. -// If the model is unknown or has no Thinking metadata, returns the original budget. -// For dynamic (-1), returns -1 if DynamicAllowed; otherwise approximates mid-range -// or min (0 if zero is allowed and mid <= 0). -// -// Deprecated: Use thinking.ValidateConfig for budget normalization. -func NormalizeThinkingBudget(model string, budget int) int { - if budget == -1 { // dynamic - if found, minBudget, maxBudget, zeroAllowed, dynamicAllowed := thinkingRangeFromRegistry(model); found { - if dynamicAllowed { - return -1 - } - mid := (minBudget + maxBudget) / 2 - if mid <= 0 && zeroAllowed { - return 0 - } - if mid <= 0 { - return minBudget - } - return mid - } - return -1 - } - if found, minBudget, maxBudget, zeroAllowed, _ := thinkingRangeFromRegistry(model); found { - if budget == 0 { - if zeroAllowed { - return 0 - } - return minBudget - } - if budget < minBudget { - return minBudget - } - if budget > maxBudget { - return maxBudget - } - return budget - } - return budget -} - -// thinkingRangeFromRegistry attempts to read thinking ranges from the model registry. -func thinkingRangeFromRegistry(model string) (found bool, min int, max int, zeroAllowed bool, dynamicAllowed bool) { - if model == "" { - return false, 0, 0, false, false - } - // First check global dynamic registry - if info := registry.GetGlobalRegistry().GetModelInfo(model); info != nil && info.Thinking != nil { - return true, info.Thinking.Min, info.Thinking.Max, info.Thinking.ZeroAllowed, info.Thinking.DynamicAllowed - } - // Fallback: check static model definitions - if info := registry.LookupStaticModelInfo(model); info != nil && info.Thinking != nil { - return true, info.Thinking.Min, info.Thinking.Max, info.Thinking.ZeroAllowed, info.Thinking.DynamicAllowed - } - // Fallback: check Antigravity static config - if cfg := registry.GetAntigravityModelConfig()[model]; cfg != nil && cfg.Thinking != nil { - return true, cfg.Thinking.Min, cfg.Thinking.Max, cfg.Thinking.ZeroAllowed, cfg.Thinking.DynamicAllowed - } - return false, 0, 0, false, false -} - -// ThinkingLevelToBudget maps a Gemini thinkingLevel to a numeric thinking budget (tokens). -// -// Mappings: -// - "minimal" -> 512 -// - "low" -> 1024 -// - "medium" -> 8192 -// - "high" -> 32768 -// -// Returns false when the level is empty or unsupported. -// -// Deprecated: Use thinking.ConvertLevelToBudget instead. -func ThinkingLevelToBudget(level string) (int, bool) { - if level == "" { - return 0, false - } - normalized := strings.ToLower(strings.TrimSpace(level)) - switch normalized { - case "minimal": - return 512, true - case "low": - return 1024, true - case "medium": - return 8192, true - case "high": - return 32768, true - default: - return 0, false - } -} diff --git a/internal/util/thinking_text.go b/internal/util/thinking_text.go deleted file mode 100644 index 7ebb76fc..00000000 --- a/internal/util/thinking_text.go +++ /dev/null @@ -1,95 +0,0 @@ -package util - -import ( - "github.com/tidwall/gjson" - "github.com/tidwall/sjson" -) - -// GetThinkingText extracts the thinking text from a content part. -// Handles various formats: -// - Simple string: { "thinking": "text" } or { "text": "text" } -// - Wrapped object: { "thinking": { "text": "text", "cache_control": {...} } } -// - Gemini-style: { "thought": true, "text": "text" } -// Returns the extracted text string. -// -// Deprecated: Use thinking package for thinking text extraction. -func GetThinkingText(part gjson.Result) string { - // Try direct text field first (Gemini-style) - if text := part.Get("text"); text.Exists() && text.Type == gjson.String { - return text.String() - } - - // Try thinking field - thinkingField := part.Get("thinking") - if !thinkingField.Exists() { - return "" - } - - // thinking is a string - if thinkingField.Type == gjson.String { - return thinkingField.String() - } - - // thinking is an object with inner text/thinking - if thinkingField.IsObject() { - if inner := thinkingField.Get("text"); inner.Exists() && inner.Type == gjson.String { - return inner.String() - } - if inner := thinkingField.Get("thinking"); inner.Exists() && inner.Type == gjson.String { - return inner.String() - } - } - - return "" -} - -// GetThinkingTextFromJSON extracts thinking text from a raw JSON string. -// -// Deprecated: Use thinking package for thinking text extraction. -func GetThinkingTextFromJSON(jsonStr string) string { - return GetThinkingText(gjson.Parse(jsonStr)) -} - -// SanitizeThinkingPart normalizes a thinking part to a canonical form. -// Strips cache_control and other non-essential fields. -// Returns the sanitized part as JSON string. -// -// Deprecated: Use thinking package for thinking part sanitization. -func SanitizeThinkingPart(part gjson.Result) string { - // Gemini-style: { thought: true, text, thoughtSignature } - if part.Get("thought").Bool() { - result := `{"thought":true}` - if text := GetThinkingText(part); text != "" { - result, _ = sjson.Set(result, "text", text) - } - if sig := part.Get("thoughtSignature"); sig.Exists() && sig.Type == gjson.String { - result, _ = sjson.Set(result, "thoughtSignature", sig.String()) - } - return result - } - - // Anthropic-style: { type: "thinking", thinking, signature } - if part.Get("type").String() == "thinking" || part.Get("thinking").Exists() { - result := `{"type":"thinking"}` - if text := GetThinkingText(part); text != "" { - result, _ = sjson.Set(result, "thinking", text) - } - if sig := part.Get("signature"); sig.Exists() && sig.Type == gjson.String { - result, _ = sjson.Set(result, "signature", sig.String()) - } - return result - } - - // Not a thinking part, return as-is but strip cache_control - return StripCacheControl(part.Raw) -} - -// StripCacheControl removes cache_control and providerOptions from a JSON object. -// -// Deprecated: Use thinking package for cache control stripping. -func StripCacheControl(jsonStr string) string { - result := jsonStr - result, _ = sjson.Delete(result, "cache_control") - result, _ = sjson.Delete(result, "providerOptions") - return result -}