mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-02 20:40:52 +08:00
fix(thinking): align budget effort mapping across translators
Unify thinking budget-to-effort conversion in a shared helper, handle disabled/default thinking cases in translators, adjust zero-budget mapping, and drop the old OpenAI-specific helper with updated tests.
This commit is contained in:
@@ -72,13 +72,7 @@ func ApplyReasoningEffortMetadata(payload []byte, metadata map[string]any, model
|
|||||||
// Fallback: numeric thinking_budget suffix for level-based (OpenAI-style) models.
|
// Fallback: numeric thinking_budget suffix for level-based (OpenAI-style) models.
|
||||||
if util.ModelUsesThinkingLevels(baseModel) || allowCompat {
|
if util.ModelUsesThinkingLevels(baseModel) || allowCompat {
|
||||||
if budget, _, _, matched := util.ThinkingFromMetadata(metadata); matched && budget != nil {
|
if budget, _, _, matched := util.ThinkingFromMetadata(metadata); matched && budget != nil {
|
||||||
if effort, ok := util.OpenAIThinkingBudgetToEffort(baseModel, *budget); ok && effort != "" {
|
if effort, ok := util.ThinkingBudgetToEffort(baseModel, *budget); ok && effort != "" {
|
||||||
if *budget == 0 && effort == "none" && util.ModelUsesThinkingLevels(baseModel) {
|
|
||||||
if _, supported := util.NormalizeReasoningEffortLevel(baseModel, effort); !supported {
|
|
||||||
return StripThinkingFields(payload, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if updated, err := sjson.SetBytes(payload, field, effort); err == nil {
|
if updated, err := sjson.SetBytes(payload, field, effort); err == nil {
|
||||||
return updated
|
return updated
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -219,15 +219,20 @@ func ConvertClaudeRequestToCodex(modelName string, inputRawJSON []byte, _ bool)
|
|||||||
// Convert thinking.budget_tokens to reasoning.effort for level-based models
|
// Convert thinking.budget_tokens to reasoning.effort for level-based models
|
||||||
reasoningEffort := "medium" // default
|
reasoningEffort := "medium" // default
|
||||||
if thinking := rootResult.Get("thinking"); thinking.Exists() && thinking.IsObject() {
|
if thinking := rootResult.Get("thinking"); thinking.Exists() && thinking.IsObject() {
|
||||||
if thinking.Get("type").String() == "enabled" {
|
switch thinking.Get("type").String() {
|
||||||
|
case "enabled":
|
||||||
if util.ModelUsesThinkingLevels(modelName) {
|
if util.ModelUsesThinkingLevels(modelName) {
|
||||||
if budgetTokens := thinking.Get("budget_tokens"); budgetTokens.Exists() {
|
if budgetTokens := thinking.Get("budget_tokens"); budgetTokens.Exists() {
|
||||||
budget := int(budgetTokens.Int())
|
budget := int(budgetTokens.Int())
|
||||||
if effort, ok := util.OpenAIThinkingBudgetToEffort(modelName, budget); ok && effort != "" {
|
if effort, ok := util.ThinkingBudgetToEffort(modelName, budget); ok && effort != "" {
|
||||||
reasoningEffort = effort
|
reasoningEffort = effort
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case "disabled":
|
||||||
|
if effort, ok := util.ThinkingBudgetToEffort(modelName, 0); ok && effort != "" {
|
||||||
|
reasoningEffort = effort
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
template, _ = sjson.Set(template, "reasoning.effort", reasoningEffort)
|
template, _ = sjson.Set(template, "reasoning.effort", reasoningEffort)
|
||||||
|
|||||||
@@ -253,7 +253,7 @@ func ConvertGeminiRequestToCodex(modelName string, inputRawJSON []byte, _ bool)
|
|||||||
if util.ModelUsesThinkingLevels(modelName) {
|
if util.ModelUsesThinkingLevels(modelName) {
|
||||||
if thinkingBudget := thinkingConfig.Get("thinkingBudget"); thinkingBudget.Exists() {
|
if thinkingBudget := thinkingConfig.Get("thinkingBudget"); thinkingBudget.Exists() {
|
||||||
budget := int(thinkingBudget.Int())
|
budget := int(thinkingBudget.Int())
|
||||||
if effort, ok := util.OpenAIThinkingBudgetToEffort(modelName, budget); ok && effort != "" {
|
if effort, ok := util.ThinkingBudgetToEffort(modelName, budget); ok && effort != "" {
|
||||||
reasoningEffort = effort
|
reasoningEffort = effort
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -63,10 +63,22 @@ func ConvertClaudeRequestToOpenAI(modelName string, inputRawJSON []byte, stream
|
|||||||
|
|
||||||
// Thinking: Convert Claude thinking.budget_tokens to OpenAI reasoning_effort
|
// Thinking: Convert Claude thinking.budget_tokens to OpenAI reasoning_effort
|
||||||
if thinking := root.Get("thinking"); thinking.Exists() && thinking.IsObject() {
|
if thinking := root.Get("thinking"); thinking.Exists() && thinking.IsObject() {
|
||||||
if thinkingType := thinking.Get("type"); thinkingType.Exists() && thinkingType.String() == "enabled" {
|
if thinkingType := thinking.Get("type"); thinkingType.Exists() {
|
||||||
if budgetTokens := thinking.Get("budget_tokens"); budgetTokens.Exists() {
|
switch thinkingType.String() {
|
||||||
budget := int(budgetTokens.Int())
|
case "enabled":
|
||||||
if effort, ok := util.OpenAIThinkingBudgetToEffort(modelName, budget); ok && effort != "" {
|
if budgetTokens := thinking.Get("budget_tokens"); budgetTokens.Exists() {
|
||||||
|
budget := int(budgetTokens.Int())
|
||||||
|
if effort, ok := util.ThinkingBudgetToEffort(modelName, budget); ok && effort != "" {
|
||||||
|
out, _ = sjson.Set(out, "reasoning_effort", effort)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No budget_tokens specified, default to "auto" for enabled thinking
|
||||||
|
if effort, ok := util.ThinkingBudgetToEffort(modelName, -1); ok && effort != "" {
|
||||||
|
out, _ = sjson.Set(out, "reasoning_effort", effort)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "disabled":
|
||||||
|
if effort, ok := util.ThinkingBudgetToEffort(modelName, 0); ok && effort != "" {
|
||||||
out, _ = sjson.Set(out, "reasoning_effort", effort)
|
out, _ = sjson.Set(out, "reasoning_effort", effort)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -83,7 +83,7 @@ func ConvertGeminiRequestToOpenAI(modelName string, inputRawJSON []byte, stream
|
|||||||
if thinkingConfig := genConfig.Get("thinkingConfig"); thinkingConfig.Exists() && thinkingConfig.IsObject() {
|
if thinkingConfig := genConfig.Get("thinkingConfig"); thinkingConfig.Exists() && thinkingConfig.IsObject() {
|
||||||
if thinkingBudget := thinkingConfig.Get("thinkingBudget"); thinkingBudget.Exists() {
|
if thinkingBudget := thinkingConfig.Get("thinkingBudget"); thinkingBudget.Exists() {
|
||||||
budget := int(thinkingBudget.Int())
|
budget := int(thinkingBudget.Int())
|
||||||
if effort, ok := util.OpenAIThinkingBudgetToEffort(modelName, budget); ok && effort != "" {
|
if effort, ok := util.ThinkingBudgetToEffort(modelName, budget); ok && effort != "" {
|
||||||
out, _ = sjson.Set(out, "reasoning_effort", effort)
|
out, _ = sjson.Set(out, "reasoning_effort", effort)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,37 +0,0 @@
|
|||||||
package util
|
|
||||||
|
|
||||||
// OpenAIThinkingBudgetToEffort maps a numeric thinking budget (tokens)
|
|
||||||
// into an OpenAI-style reasoning effort level for level-based models.
|
|
||||||
//
|
|
||||||
// Ranges:
|
|
||||||
// - 0 -> "none"
|
|
||||||
// - -1 -> "auto"
|
|
||||||
// - 1..1024 -> "low"
|
|
||||||
// - 1025..8192 -> "medium"
|
|
||||||
// - 8193..24576 -> "high"
|
|
||||||
// - 24577.. -> highest supported level for the model (defaults to "xhigh")
|
|
||||||
//
|
|
||||||
// Negative values other than -1 are treated as unsupported.
|
|
||||||
func OpenAIThinkingBudgetToEffort(model string, budget int) (string, bool) {
|
|
||||||
switch {
|
|
||||||
case budget == -1:
|
|
||||||
return "auto", true
|
|
||||||
case budget < -1:
|
|
||||||
return "", false
|
|
||||||
case budget == 0:
|
|
||||||
return "none", true
|
|
||||||
case budget > 0 && budget <= 1024:
|
|
||||||
return "low", true
|
|
||||||
case budget <= 8192:
|
|
||||||
return "medium", true
|
|
||||||
case budget <= 24576:
|
|
||||||
return "high", true
|
|
||||||
case budget > 24576:
|
|
||||||
if levels := GetModelThinkingLevels(model); len(levels) > 0 {
|
|
||||||
return levels[len(levels)-1], true
|
|
||||||
}
|
|
||||||
return "xhigh", true
|
|
||||||
default:
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -118,3 +118,83 @@ func IsOpenAICompatibilityModel(model string) bool {
|
|||||||
}
|
}
|
||||||
return strings.EqualFold(strings.TrimSpace(info.Type), "openai-compatibility")
|
return strings.EqualFold(strings.TrimSpace(info.Type), "openai-compatibility")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ThinkingEffortToBudget maps a reasoning effort level to a numeric thinking budget (tokens),
|
||||||
|
// clamping the result to the model's supported range.
|
||||||
|
//
|
||||||
|
// Mappings (values are normalized to model's supported range):
|
||||||
|
// - "none" -> 0
|
||||||
|
// - "auto" -> -1
|
||||||
|
// - "minimal" -> 512
|
||||||
|
// - "low" -> 1024
|
||||||
|
// - "medium" -> 8192
|
||||||
|
// - "high" -> 24576
|
||||||
|
// - "xhigh" -> 32768
|
||||||
|
//
|
||||||
|
// Returns false when the effort level is empty or unsupported.
|
||||||
|
func ThinkingEffortToBudget(model, effort string) (int, bool) {
|
||||||
|
if effort == "" {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
normalized, ok := NormalizeReasoningEffortLevel(model, effort)
|
||||||
|
if !ok {
|
||||||
|
normalized = strings.ToLower(strings.TrimSpace(effort))
|
||||||
|
}
|
||||||
|
switch normalized {
|
||||||
|
case "none":
|
||||||
|
return 0, true
|
||||||
|
case "auto":
|
||||||
|
return NormalizeThinkingBudget(model, -1), true
|
||||||
|
case "minimal":
|
||||||
|
return NormalizeThinkingBudget(model, 512), true
|
||||||
|
case "low":
|
||||||
|
return NormalizeThinkingBudget(model, 1024), true
|
||||||
|
case "medium":
|
||||||
|
return NormalizeThinkingBudget(model, 8192), true
|
||||||
|
case "high":
|
||||||
|
return NormalizeThinkingBudget(model, 24576), true
|
||||||
|
case "xhigh":
|
||||||
|
return NormalizeThinkingBudget(model, 32768), true
|
||||||
|
default:
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ThinkingBudgetToEffort maps a numeric thinking budget (tokens)
|
||||||
|
// to a reasoning effort level for level-based models.
|
||||||
|
//
|
||||||
|
// Mappings:
|
||||||
|
// - 0 -> "none" (or lowest supported level if model doesn't support "none")
|
||||||
|
// - -1 -> "auto"
|
||||||
|
// - 1..1024 -> "low"
|
||||||
|
// - 1025..8192 -> "medium"
|
||||||
|
// - 8193..24576 -> "high"
|
||||||
|
// - 24577.. -> highest supported level for the model (defaults to "xhigh")
|
||||||
|
//
|
||||||
|
// Returns false when the budget is unsupported (negative values other than -1).
|
||||||
|
func ThinkingBudgetToEffort(model string, budget int) (string, bool) {
|
||||||
|
switch {
|
||||||
|
case budget == -1:
|
||||||
|
return "auto", true
|
||||||
|
case budget < -1:
|
||||||
|
return "", false
|
||||||
|
case budget == 0:
|
||||||
|
if levels := GetModelThinkingLevels(model); len(levels) > 0 {
|
||||||
|
return levels[0], true
|
||||||
|
}
|
||||||
|
return "none", true
|
||||||
|
case budget > 0 && budget <= 1024:
|
||||||
|
return "low", true
|
||||||
|
case budget <= 8192:
|
||||||
|
return "medium", true
|
||||||
|
case budget <= 24576:
|
||||||
|
return "high", true
|
||||||
|
case budget > 24576:
|
||||||
|
if levels := GetModelThinkingLevels(model); len(levels) > 0 {
|
||||||
|
return levels[len(levels)-1], true
|
||||||
|
}
|
||||||
|
return "xhigh", true
|
||||||
|
default:
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -201,36 +201,6 @@ func ReasoningEffortFromMetadata(metadata map[string]any) (string, bool) {
|
|||||||
return "", true
|
return "", true
|
||||||
}
|
}
|
||||||
|
|
||||||
// ThinkingEffortToBudget maps reasoning effort levels to approximate budgets,
|
|
||||||
// clamping the result to the model's supported range.
|
|
||||||
func ThinkingEffortToBudget(model, effort string) (int, bool) {
|
|
||||||
if effort == "" {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
normalized, ok := NormalizeReasoningEffortLevel(model, effort)
|
|
||||||
if !ok {
|
|
||||||
normalized = strings.ToLower(strings.TrimSpace(effort))
|
|
||||||
}
|
|
||||||
switch normalized {
|
|
||||||
case "none":
|
|
||||||
return 0, true
|
|
||||||
case "auto":
|
|
||||||
return NormalizeThinkingBudget(model, -1), true
|
|
||||||
case "minimal":
|
|
||||||
return NormalizeThinkingBudget(model, 512), true
|
|
||||||
case "low":
|
|
||||||
return NormalizeThinkingBudget(model, 1024), true
|
|
||||||
case "medium":
|
|
||||||
return NormalizeThinkingBudget(model, 8192), true
|
|
||||||
case "high":
|
|
||||||
return NormalizeThinkingBudget(model, 24576), true
|
|
||||||
case "xhigh":
|
|
||||||
return NormalizeThinkingBudget(model, 32768), true
|
|
||||||
default:
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveOriginalModel returns the original model name stored in metadata (if present),
|
// ResolveOriginalModel returns the original model name stored in metadata (if present),
|
||||||
// otherwise falls back to the provided model.
|
// otherwise falls back to the provided model.
|
||||||
func ResolveOriginalModel(model string, metadata map[string]any) string {
|
func ResolveOriginalModel(model string, metadata map[string]any) string {
|
||||||
|
|||||||
@@ -295,7 +295,7 @@ func TestThinkingConversionsAcrossProtocolsAndModels(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Check numeric budget fallback for allowCompat
|
// Check numeric budget fallback for allowCompat
|
||||||
if budget, _, _, matched := util.ThinkingFromMetadata(metadata); matched && budget != nil {
|
if budget, _, _, matched := util.ThinkingFromMetadata(metadata); matched && budget != nil {
|
||||||
if mapped, okMap := util.OpenAIThinkingBudgetToEffort(normalizedModel, *budget); okMap && mapped != "" {
|
if mapped, okMap := util.ThinkingBudgetToEffort(normalizedModel, *budget); okMap && mapped != "" {
|
||||||
return true, mapped, false
|
return true, mapped, false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -308,7 +308,7 @@ func TestThinkingConversionsAcrossProtocolsAndModels(t *testing.T) {
|
|||||||
effort, ok := util.ReasoningEffortFromMetadata(metadata)
|
effort, ok := util.ReasoningEffortFromMetadata(metadata)
|
||||||
if !ok || strings.TrimSpace(effort) == "" {
|
if !ok || strings.TrimSpace(effort) == "" {
|
||||||
if budget, _, _, matched := util.ThinkingFromMetadata(metadata); matched && budget != nil {
|
if budget, _, _, matched := util.ThinkingFromMetadata(metadata); matched && budget != nil {
|
||||||
if mapped, okMap := util.OpenAIThinkingBudgetToEffort(normalizedModel, *budget); okMap {
|
if mapped, okMap := util.ThinkingBudgetToEffort(normalizedModel, *budget); okMap {
|
||||||
effort = mapped
|
effort = mapped
|
||||||
ok = true
|
ok = true
|
||||||
}
|
}
|
||||||
@@ -336,7 +336,7 @@ func TestThinkingConversionsAcrossProtocolsAndModels(t *testing.T) {
|
|||||||
return false, "", true
|
return false, "", true
|
||||||
}
|
}
|
||||||
if budget, _, _, matched := util.ThinkingFromMetadata(metadata); matched && budget != nil {
|
if budget, _, _, matched := util.ThinkingFromMetadata(metadata); matched && budget != nil {
|
||||||
if mapped, okMap := util.OpenAIThinkingBudgetToEffort(normalizedModel, *budget); okMap && mapped != "" {
|
if mapped, okMap := util.ThinkingBudgetToEffort(normalizedModel, *budget); okMap && mapped != "" {
|
||||||
mapped = strings.ToLower(strings.TrimSpace(mapped))
|
mapped = strings.ToLower(strings.TrimSpace(mapped))
|
||||||
if normalized, okLevel := util.NormalizeReasoningEffortLevel(normalizedModel, mapped); okLevel {
|
if normalized, okLevel := util.NormalizeReasoningEffortLevel(normalizedModel, mapped); okLevel {
|
||||||
return true, normalized, false
|
return true, normalized, false
|
||||||
@@ -609,7 +609,7 @@ func TestRawPayloadThinkingConversions(t *testing.T) {
|
|||||||
return true, normalized, false
|
return true, normalized, false
|
||||||
}
|
}
|
||||||
if budget, ok := cs.thinkingParam.(int); ok {
|
if budget, ok := cs.thinkingParam.(int); ok {
|
||||||
if mapped, okM := util.OpenAIThinkingBudgetToEffort(model, budget); okM && mapped != "" {
|
if mapped, okM := util.ThinkingBudgetToEffort(model, budget); okM && mapped != "" {
|
||||||
return true, mapped, false
|
return true, mapped, false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -625,7 +625,7 @@ func TestRawPayloadThinkingConversions(t *testing.T) {
|
|||||||
return false, "", true // invalid level
|
return false, "", true // invalid level
|
||||||
}
|
}
|
||||||
if budget, ok := cs.thinkingParam.(int); ok {
|
if budget, ok := cs.thinkingParam.(int); ok {
|
||||||
if mapped, okM := util.OpenAIThinkingBudgetToEffort(model, budget); okM && mapped != "" {
|
if mapped, okM := util.ThinkingBudgetToEffort(model, budget); okM && mapped != "" {
|
||||||
// Check if the mapped effort is valid for this model
|
// Check if the mapped effort is valid for this model
|
||||||
if _, validLevel := util.NormalizeReasoningEffortLevel(model, mapped); !validLevel {
|
if _, validLevel := util.NormalizeReasoningEffortLevel(model, mapped); !validLevel {
|
||||||
return true, mapped, true // expect validation error
|
return true, mapped, true // expect validation error
|
||||||
@@ -646,7 +646,7 @@ func TestRawPayloadThinkingConversions(t *testing.T) {
|
|||||||
return false, "", true
|
return false, "", true
|
||||||
}
|
}
|
||||||
if budget, ok := cs.thinkingParam.(int); ok {
|
if budget, ok := cs.thinkingParam.(int); ok {
|
||||||
if mapped, okM := util.OpenAIThinkingBudgetToEffort(model, budget); okM && mapped != "" {
|
if mapped, okM := util.ThinkingBudgetToEffort(model, budget); okM && mapped != "" {
|
||||||
// Check if the mapped effort is valid for this model
|
// Check if the mapped effort is valid for this model
|
||||||
if _, validLevel := util.NormalizeReasoningEffortLevel(model, mapped); !validLevel {
|
if _, validLevel := util.NormalizeReasoningEffortLevel(model, mapped); !validLevel {
|
||||||
return true, mapped, true // expect validation error
|
return true, mapped, true // expect validation error
|
||||||
@@ -721,7 +721,7 @@ func TestRawPayloadThinkingConversions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOpenAIThinkingBudgetToEffortRanges(t *testing.T) {
|
func TestThinkingBudgetToEffortRanges(t *testing.T) {
|
||||||
cleanup := registerCoreModels(t)
|
cleanup := registerCoreModels(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
@@ -733,7 +733,7 @@ func TestOpenAIThinkingBudgetToEffortRanges(t *testing.T) {
|
|||||||
ok bool
|
ok bool
|
||||||
}{
|
}{
|
||||||
{name: "dynamic-auto", model: "gpt-5", budget: -1, want: "auto", ok: true},
|
{name: "dynamic-auto", model: "gpt-5", budget: -1, want: "auto", ok: true},
|
||||||
{name: "zero-none", model: "gpt-5", budget: 0, want: "none", ok: true},
|
{name: "zero-none", model: "gpt-5", budget: 0, want: "minimal", ok: true},
|
||||||
{name: "low-min", model: "gpt-5", budget: 1, want: "low", ok: true},
|
{name: "low-min", model: "gpt-5", budget: 1, want: "low", ok: true},
|
||||||
{name: "low-max", model: "gpt-5", budget: 1024, want: "low", ok: true},
|
{name: "low-max", model: "gpt-5", budget: 1024, want: "low", ok: true},
|
||||||
{name: "medium-min", model: "gpt-5", budget: 1025, want: "medium", ok: true},
|
{name: "medium-min", model: "gpt-5", budget: 1025, want: "medium", ok: true},
|
||||||
@@ -741,14 +741,14 @@ func TestOpenAIThinkingBudgetToEffortRanges(t *testing.T) {
|
|||||||
{name: "high-min", model: "gpt-5", budget: 8193, want: "high", ok: true},
|
{name: "high-min", model: "gpt-5", budget: 8193, want: "high", ok: true},
|
||||||
{name: "high-max", model: "gpt-5", budget: 24576, want: "high", ok: true},
|
{name: "high-max", model: "gpt-5", budget: 24576, want: "high", ok: true},
|
||||||
{name: "over-max-clamps-to-highest", model: "gpt-5", budget: 64000, want: "high", ok: true},
|
{name: "over-max-clamps-to-highest", model: "gpt-5", budget: 64000, want: "high", ok: true},
|
||||||
{name: "over-max-xhigh-model", model: "gpt-5.2", budget: 50000, want: "xhigh", ok: true},
|
{name: "over-max-xhigh-model", model: "gpt-5.2", budget: 64000, want: "xhigh", ok: true},
|
||||||
{name: "negative-unsupported", model: "gpt-5", budget: -5, want: "", ok: false},
|
{name: "negative-unsupported", model: "gpt-5", budget: -5, want: "", ok: false},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cs := range cases {
|
for _, cs := range cases {
|
||||||
cs := cs
|
cs := cs
|
||||||
t.Run(cs.name, func(t *testing.T) {
|
t.Run(cs.name, func(t *testing.T) {
|
||||||
got, ok := util.OpenAIThinkingBudgetToEffort(cs.model, cs.budget)
|
got, ok := util.ThinkingBudgetToEffort(cs.model, cs.budget)
|
||||||
if ok != cs.ok {
|
if ok != cs.ok {
|
||||||
t.Fatalf("ok mismatch for model=%s budget=%d: expect %v got %v", cs.model, cs.budget, cs.ok, ok)
|
t.Fatalf("ok mismatch for model=%s budget=%d: expect %v got %v", cs.model, cs.budget, cs.ok, ok)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user