mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-02 20:40:52 +08:00
test(thinking): remove legacy unit and integration tests
This commit is contained in:
@@ -1,144 +0,0 @@
|
|||||||
// Package thinking provides unified thinking configuration processing logic.
|
|
||||||
package thinking
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
// setupTestModels registers test models in the global registry for testing.
|
|
||||||
// This is required because ApplyThinking now looks up models by name.
|
|
||||||
func setupTestModels(t *testing.T) func() {
|
|
||||||
t.Helper()
|
|
||||||
reg := registry.GetGlobalRegistry()
|
|
||||||
|
|
||||||
// Register test models via RegisterClient (the correct API)
|
|
||||||
clientID := "test-thinking-client"
|
|
||||||
testModels := []*registry.ModelInfo{
|
|
||||||
{ID: "test-thinking-model", Thinking: ®istry.ThinkingSupport{Min: 1, Max: 10}},
|
|
||||||
{ID: "test-no-thinking", Type: "gemini"},
|
|
||||||
{ID: "gpt-5.2-test", Thinking: ®istry.ThinkingSupport{Min: 128, Max: 32768, Levels: []string{"low", "medium", "high"}}},
|
|
||||||
}
|
|
||||||
|
|
||||||
reg.RegisterClient(clientID, "test", testModels)
|
|
||||||
|
|
||||||
// Return cleanup function
|
|
||||||
return func() {
|
|
||||||
reg.UnregisterClient(clientID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyThinkingPassthrough(t *testing.T) {
|
|
||||||
cleanup := setupTestModels(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
model string
|
|
||||||
provider string
|
|
||||||
}{
|
|
||||||
{"unknown provider", `{"a":1}`, "test-thinking-model", "unknown"},
|
|
||||||
{"unknown model", `{"a":1}`, "nonexistent-model", "gemini"},
|
|
||||||
{"nil thinking support", `{"a":1}`, "test-no-thinking", "gemini"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := ApplyThinking([]byte(tt.body), tt.model, tt.provider)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ApplyThinking() error = %v", err)
|
|
||||||
}
|
|
||||||
if string(got) != tt.body {
|
|
||||||
t.Fatalf("ApplyThinking() = %s, want %s", string(got), tt.body)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyThinkingValidationError(t *testing.T) {
|
|
||||||
cleanup := setupTestModels(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
model string
|
|
||||||
provider string
|
|
||||||
}{
|
|
||||||
{"unsupported level", `{"reasoning_effort":"ultra"}`, "gpt-5.2-test", "openai"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := ApplyThinking([]byte(tt.body), tt.model, tt.provider)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("ApplyThinking() error = nil, want error")
|
|
||||||
}
|
|
||||||
// On validation error, ApplyThinking returns original body (defensive programming)
|
|
||||||
if string(got) != tt.body {
|
|
||||||
t.Fatalf("ApplyThinking() body = %s, want original body %s", string(got), tt.body)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyThinkingSuffixPriority(t *testing.T) {
|
|
||||||
cleanup := setupTestModels(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
// Register a model that supports thinking with budget
|
|
||||||
reg := registry.GetGlobalRegistry()
|
|
||||||
suffixClientID := "test-suffix-client"
|
|
||||||
testModels := []*registry.ModelInfo{
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-pro-suffix-test",
|
|
||||||
Thinking: ®istry.ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: true},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
reg.RegisterClient(suffixClientID, "gemini", testModels)
|
|
||||||
defer reg.UnregisterClient(suffixClientID)
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
model string
|
|
||||||
provider string
|
|
||||||
checkPath string
|
|
||||||
expectedValue int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"suffix overrides body config",
|
|
||||||
`{"generationConfig":{"thinkingConfig":{"thinkingBudget":1000}}}`,
|
|
||||||
"gemini-2.5-pro-suffix-test(8192)",
|
|
||||||
"gemini",
|
|
||||||
"generationConfig.thinkingConfig.thinkingBudget",
|
|
||||||
8192,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"suffix none disables thinking",
|
|
||||||
`{"generationConfig":{"thinkingConfig":{"thinkingBudget":1000}}}`,
|
|
||||||
"gemini-2.5-pro-suffix-test(none)",
|
|
||||||
"gemini",
|
|
||||||
"generationConfig.thinkingConfig.thinkingBudget",
|
|
||||||
0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := ApplyThinking([]byte(tt.body), tt.model, tt.provider)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ApplyThinking() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use gjson to check the value
|
|
||||||
result := int(gjson.GetBytes(got, tt.checkPath).Int())
|
|
||||||
if result != tt.expectedValue {
|
|
||||||
t.Fatalf("ApplyThinking() %s = %v, want %v", tt.checkPath, result, tt.expectedValue)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,501 +0,0 @@
|
|||||||
// Package thinking_test provides external tests for the thinking package.
|
|
||||||
package thinking_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/claude"
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/gemini"
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/geminicli"
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/iflow"
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/openai"
|
|
||||||
)
|
|
||||||
|
|
||||||
// registerTestModels sets up test models in the registry and returns a cleanup function.
|
|
||||||
func registerTestModels(t *testing.T) func() {
|
|
||||||
t.Helper()
|
|
||||||
reg := registry.GetGlobalRegistry()
|
|
||||||
|
|
||||||
testModels := []*registry.ModelInfo{
|
|
||||||
geminiBudgetModel(),
|
|
||||||
geminiLevelModel(),
|
|
||||||
claudeBudgetModel(),
|
|
||||||
openAILevelModel(),
|
|
||||||
iFlowModel(),
|
|
||||||
{ID: "claude-3"},
|
|
||||||
{ID: "gemini-2.5-pro-strip"},
|
|
||||||
{ID: "glm-4.6-strip"},
|
|
||||||
}
|
|
||||||
|
|
||||||
clientID := "test-thinking-models"
|
|
||||||
reg.RegisterClient(clientID, "test", testModels)
|
|
||||||
|
|
||||||
return func() {
|
|
||||||
reg.UnregisterClient(clientID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestApplyThinking tests the main ApplyThinking entry point.
|
|
||||||
//
|
|
||||||
// ApplyThinking is the unified entry point for applying thinking configuration.
|
|
||||||
// It routes to the appropriate provider-specific applier based on model.
|
|
||||||
//
|
|
||||||
// Depends on: Epic 10 Story 10-2 (apply-thinking main entry)
|
|
||||||
func TestApplyThinking(t *testing.T) {
|
|
||||||
cleanup := registerTestModels(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
model string
|
|
||||||
provider string
|
|
||||||
check string
|
|
||||||
}{
|
|
||||||
{"gemini budget", `{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}`, "gemini-2.5-pro-test", "gemini", "geminiBudget"},
|
|
||||||
{"gemini level", `{"generationConfig":{"thinkingConfig":{"thinkingLevel":"high"}}}`, "gemini-3-pro-preview-test", "gemini", "geminiLevel"},
|
|
||||||
{"gemini-cli budget", `{"request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}}`, "gemini-2.5-pro-test", "gemini-cli", "geminiCliBudget"},
|
|
||||||
{"antigravity budget", `{"request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}}`, "gemini-2.5-pro-test", "antigravity", "geminiCliBudget"},
|
|
||||||
{"claude budget", `{"thinking":{"budget_tokens":16384}}`, "claude-sonnet-4-5-test", "claude", "claudeBudget"},
|
|
||||||
{"claude enabled type auto", `{"thinking":{"type":"enabled"}}`, "claude-sonnet-4-5-test", "claude", "claudeAuto"},
|
|
||||||
{"openai level", `{"reasoning_effort":"high"}`, "gpt-5.2-test", "openai", "openaiLevel"},
|
|
||||||
{"iflow enable", `{"chat_template_kwargs":{"enable_thinking":true}}`, "glm-4.6-test", "iflow", "iflowEnable"},
|
|
||||||
{"unknown provider passthrough", `{"a":1}`, "gemini-2.5-pro-test", "unknown", "passthrough"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := thinking.ApplyThinking([]byte(tt.body), tt.model, tt.provider)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ApplyThinking() error = %v", err)
|
|
||||||
}
|
|
||||||
assertApplyThinkingCheck(t, tt.check, tt.body, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyThinkingErrors(t *testing.T) {
|
|
||||||
cleanup := registerTestModels(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
model string
|
|
||||||
provider string
|
|
||||||
}{
|
|
||||||
{"unsupported level openai", `{"reasoning_effort":"ultra"}`, "gpt-5.2-test", "openai"},
|
|
||||||
{"unsupported level gemini", `{"generationConfig":{"thinkingConfig":{"thinkingLevel":"ultra"}}}`, "gemini-3-pro-preview-test", "gemini"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := thinking.ApplyThinking([]byte(tt.body), tt.model, tt.provider)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("ApplyThinking() error = nil, want error")
|
|
||||||
}
|
|
||||||
// On validation error, ApplyThinking returns original body (defensive programming)
|
|
||||||
if string(got) != tt.body {
|
|
||||||
t.Fatalf("ApplyThinking() body = %s, want original body %s", string(got), tt.body)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyThinkingStripOnUnsupportedModel(t *testing.T) {
|
|
||||||
cleanup := registerTestModels(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
model string
|
|
||||||
provider string
|
|
||||||
stripped []string
|
|
||||||
preserved []string
|
|
||||||
}{
|
|
||||||
{"claude strip", `{"thinking":{"budget_tokens":8192},"model":"claude-3"}`, "claude-3", "claude", []string{"thinking"}, []string{"model"}},
|
|
||||||
{"gemini strip", `{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192},"temperature":0.7}}`, "gemini-2.5-pro-strip", "gemini", []string{"generationConfig.thinkingConfig"}, []string{"generationConfig.temperature"}},
|
|
||||||
{"iflow strip", `{"chat_template_kwargs":{"enable_thinking":true,"clear_thinking":false,"other":"value"}}`, "glm-4.6-strip", "iflow", []string{"chat_template_kwargs.enable_thinking", "chat_template_kwargs.clear_thinking"}, []string{"chat_template_kwargs.other"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := thinking.ApplyThinking([]byte(tt.body), tt.model, tt.provider)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ApplyThinking() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, path := range tt.stripped {
|
|
||||||
if gjson.GetBytes(got, path).Exists() {
|
|
||||||
t.Fatalf("expected %s to be stripped, got %s", path, string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, path := range tt.preserved {
|
|
||||||
if !gjson.GetBytes(got, path).Exists() {
|
|
||||||
t.Fatalf("expected %s to be preserved, got %s", path, string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsUserDefinedModel(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
modelInfo *registry.ModelInfo
|
|
||||||
want bool
|
|
||||||
}{
|
|
||||||
{"nil modelInfo", nil, true},
|
|
||||||
{"not user-defined no flag", ®istry.ModelInfo{ID: "test"}, false},
|
|
||||||
{"not user-defined with type", ®istry.ModelInfo{ID: "test", Type: "openai"}, false},
|
|
||||||
{"user-defined with flag", ®istry.ModelInfo{ID: "test", Type: "openai", UserDefined: true}, true},
|
|
||||||
{"user-defined flag only", ®istry.ModelInfo{ID: "test", UserDefined: true}, true},
|
|
||||||
{"has thinking not user-defined", ®istry.ModelInfo{ID: "test", Type: "openai", Thinking: ®istry.ThinkingSupport{Min: 1024}}, false},
|
|
||||||
{"has thinking with user-defined flag", ®istry.ModelInfo{ID: "test", Type: "openai", Thinking: ®istry.ThinkingSupport{Min: 1024}, UserDefined: true}, true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := thinking.IsUserDefinedModel(tt.modelInfo); got != tt.want {
|
|
||||||
t.Fatalf("IsUserDefinedModel() = %v, want %v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyThinking_UserDefinedModel(t *testing.T) {
|
|
||||||
// Register user-defined test models
|
|
||||||
reg := registry.GetGlobalRegistry()
|
|
||||||
userDefinedModels := []*registry.ModelInfo{
|
|
||||||
{ID: "custom-gpt", Type: "openai", UserDefined: true},
|
|
||||||
{ID: "or-claude", Type: "openai", UserDefined: true},
|
|
||||||
{ID: "custom-gemini", Type: "gemini", UserDefined: true},
|
|
||||||
{ID: "vertex-flash", Type: "gemini", UserDefined: true},
|
|
||||||
{ID: "cli-gemini", Type: "gemini", UserDefined: true},
|
|
||||||
{ID: "ag-gemini", Type: "gemini", UserDefined: true},
|
|
||||||
{ID: "custom-claude", Type: "claude", UserDefined: true},
|
|
||||||
{ID: "unknown"},
|
|
||||||
}
|
|
||||||
clientID := "test-user-defined-models"
|
|
||||||
reg.RegisterClient(clientID, "test", userDefinedModels)
|
|
||||||
defer reg.UnregisterClient(clientID)
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
model string
|
|
||||||
provider string
|
|
||||||
check string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"openai user-defined with reasoning_effort",
|
|
||||||
`{"model":"custom-gpt","reasoning_effort":"high"}`,
|
|
||||||
"custom-gpt",
|
|
||||||
"openai",
|
|
||||||
"openaiCompatible",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"openai-compatibility model with reasoning_effort",
|
|
||||||
`{"model":"or-claude","reasoning_effort":"high"}`,
|
|
||||||
"or-claude",
|
|
||||||
"openai",
|
|
||||||
"openaiCompatible",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"gemini user-defined with thinkingBudget",
|
|
||||||
`{"model":"custom-gemini","generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}`,
|
|
||||||
"custom-gemini",
|
|
||||||
"gemini",
|
|
||||||
"geminiCompatibleBudget",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"vertex user-defined with thinkingBudget",
|
|
||||||
`{"model":"vertex-flash","generationConfig":{"thinkingConfig":{"thinkingBudget":16384}}}`,
|
|
||||||
"vertex-flash",
|
|
||||||
"gemini",
|
|
||||||
"geminiCompatibleBudget16384",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"gemini-cli user-defined with thinkingBudget",
|
|
||||||
`{"model":"cli-gemini","request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}}`,
|
|
||||||
"cli-gemini",
|
|
||||||
"gemini-cli",
|
|
||||||
"geminiCliCompatibleBudget",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"antigravity user-defined with thinkingBudget",
|
|
||||||
`{"model":"ag-gemini","request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}}`,
|
|
||||||
"ag-gemini",
|
|
||||||
"antigravity",
|
|
||||||
"geminiCliCompatibleBudget",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"claude user-defined with thinking",
|
|
||||||
`{"model":"custom-claude","thinking":{"type":"enabled","budget_tokens":8192}}`,
|
|
||||||
"custom-claude",
|
|
||||||
"claude",
|
|
||||||
"claudeCompatibleBudget",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"user-defined model no config",
|
|
||||||
`{"model":"custom-gpt","messages":[]}`,
|
|
||||||
"custom-gpt",
|
|
||||||
"openai",
|
|
||||||
"passthrough",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"non-user-defined model strips config",
|
|
||||||
`{"model":"unknown","reasoning_effort":"high"}`,
|
|
||||||
"unknown",
|
|
||||||
"openai",
|
|
||||||
"stripReasoning",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"user-defined model unknown provider",
|
|
||||||
`{"model":"custom-gpt","reasoning_effort":"high"}`,
|
|
||||||
"custom-gpt",
|
|
||||||
"unknown",
|
|
||||||
"passthrough",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"unknown model passthrough",
|
|
||||||
`{"model":"nonexistent","reasoning_effort":"high"}`,
|
|
||||||
"nonexistent",
|
|
||||||
"openai",
|
|
||||||
"passthrough",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := thinking.ApplyThinking([]byte(tt.body), tt.model, tt.provider)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ApplyThinking() error = %v", err)
|
|
||||||
}
|
|
||||||
assertCompatibleModelCheck(t, tt.check, tt.body, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestApplyThinkingSuffixPriority tests suffix priority over body config.
|
|
||||||
func TestApplyThinkingSuffixPriority(t *testing.T) {
|
|
||||||
// Register test model
|
|
||||||
reg := registry.GetGlobalRegistry()
|
|
||||||
testModels := []*registry.ModelInfo{
|
|
||||||
{
|
|
||||||
ID: "gemini-suffix-test",
|
|
||||||
Thinking: ®istry.ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: true},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
clientID := "test-suffix-priority"
|
|
||||||
reg.RegisterClient(clientID, "gemini", testModels)
|
|
||||||
defer reg.UnregisterClient(clientID)
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
model string
|
|
||||||
provider string
|
|
||||||
checkPath string
|
|
||||||
expectedValue int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"suffix overrides body budget",
|
|
||||||
`{"generationConfig":{"thinkingConfig":{"thinkingBudget":1000}}}`,
|
|
||||||
"gemini-suffix-test(8192)",
|
|
||||||
"gemini",
|
|
||||||
"generationConfig.thinkingConfig.thinkingBudget",
|
|
||||||
8192,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"suffix none sets budget to 0",
|
|
||||||
`{"generationConfig":{"thinkingConfig":{"thinkingBudget":1000}}}`,
|
|
||||||
"gemini-suffix-test(none)",
|
|
||||||
"gemini",
|
|
||||||
"generationConfig.thinkingConfig.thinkingBudget",
|
|
||||||
0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"no suffix uses body config",
|
|
||||||
`{"generationConfig":{"thinkingConfig":{"thinkingBudget":5000}}}`,
|
|
||||||
"gemini-suffix-test",
|
|
||||||
"gemini",
|
|
||||||
"generationConfig.thinkingConfig.thinkingBudget",
|
|
||||||
5000,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := thinking.ApplyThinking([]byte(tt.body), tt.model, tt.provider)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ApplyThinking() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
result := int(gjson.GetBytes(got, tt.checkPath).Int())
|
|
||||||
if result != tt.expectedValue {
|
|
||||||
t.Fatalf("ApplyThinking() %s = %v, want %v\nbody: %s", tt.checkPath, result, tt.expectedValue, string(got))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertApplyThinkingCheck(t *testing.T, checkName, input string, body []byte) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
switch checkName {
|
|
||||||
case "geminiBudget":
|
|
||||||
assertJSONInt(t, body, "generationConfig.thinkingConfig.thinkingBudget", 8192)
|
|
||||||
assertJSONBool(t, body, "generationConfig.thinkingConfig.includeThoughts", true)
|
|
||||||
case "geminiLevel":
|
|
||||||
assertJSONString(t, body, "generationConfig.thinkingConfig.thinkingLevel", "high")
|
|
||||||
assertJSONBool(t, body, "generationConfig.thinkingConfig.includeThoughts", true)
|
|
||||||
case "geminiCliBudget":
|
|
||||||
assertJSONInt(t, body, "request.generationConfig.thinkingConfig.thinkingBudget", 8192)
|
|
||||||
assertJSONBool(t, body, "request.generationConfig.thinkingConfig.includeThoughts", true)
|
|
||||||
case "claudeBudget":
|
|
||||||
assertJSONString(t, body, "thinking.type", "enabled")
|
|
||||||
assertJSONInt(t, body, "thinking.budget_tokens", 16384)
|
|
||||||
case "claudeAuto":
|
|
||||||
// When type=enabled without budget, auto mode is applied using mid-range budget
|
|
||||||
assertJSONString(t, body, "thinking.type", "enabled")
|
|
||||||
// Budget should be mid-range: (1024 + 128000) / 2 = 64512
|
|
||||||
assertJSONInt(t, body, "thinking.budget_tokens", 64512)
|
|
||||||
case "openaiLevel":
|
|
||||||
assertJSONString(t, body, "reasoning_effort", "high")
|
|
||||||
case "iflowEnable":
|
|
||||||
assertJSONBool(t, body, "chat_template_kwargs.enable_thinking", true)
|
|
||||||
assertJSONBool(t, body, "chat_template_kwargs.clear_thinking", false)
|
|
||||||
case "passthrough":
|
|
||||||
if string(body) != input {
|
|
||||||
t.Fatalf("ApplyThinking() = %s, want %s", string(body), input)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
t.Fatalf("unknown check: %s", checkName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertCompatibleModelCheck(t *testing.T, checkName, input string, body []byte) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
switch checkName {
|
|
||||||
case "openaiCompatible":
|
|
||||||
assertJSONString(t, body, "reasoning_effort", "high")
|
|
||||||
case "geminiCompatibleBudget":
|
|
||||||
assertJSONInt(t, body, "generationConfig.thinkingConfig.thinkingBudget", 8192)
|
|
||||||
assertJSONBool(t, body, "generationConfig.thinkingConfig.includeThoughts", true)
|
|
||||||
case "geminiCompatibleBudget16384":
|
|
||||||
assertJSONInt(t, body, "generationConfig.thinkingConfig.thinkingBudget", 16384)
|
|
||||||
assertJSONBool(t, body, "generationConfig.thinkingConfig.includeThoughts", true)
|
|
||||||
case "geminiCliCompatibleBudget":
|
|
||||||
assertJSONInt(t, body, "request.generationConfig.thinkingConfig.thinkingBudget", 8192)
|
|
||||||
assertJSONBool(t, body, "request.generationConfig.thinkingConfig.includeThoughts", true)
|
|
||||||
case "claudeCompatibleBudget":
|
|
||||||
assertJSONString(t, body, "thinking.type", "enabled")
|
|
||||||
assertJSONInt(t, body, "thinking.budget_tokens", 8192)
|
|
||||||
case "stripReasoning":
|
|
||||||
if gjson.GetBytes(body, "reasoning_effort").Exists() {
|
|
||||||
t.Fatalf("expected reasoning_effort to be stripped, got %s", string(body))
|
|
||||||
}
|
|
||||||
case "passthrough":
|
|
||||||
if string(body) != input {
|
|
||||||
t.Fatalf("ApplyThinking() = %s, want %s", string(body), input)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
t.Fatalf("unknown check: %s", checkName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertJSONString(t *testing.T, body []byte, path, want string) {
|
|
||||||
t.Helper()
|
|
||||||
value := gjson.GetBytes(body, path)
|
|
||||||
if !value.Exists() {
|
|
||||||
t.Fatalf("expected %s to exist", path)
|
|
||||||
}
|
|
||||||
if value.String() != want {
|
|
||||||
t.Fatalf("value at %s = %s, want %s", path, value.String(), want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertJSONInt(t *testing.T, body []byte, path string, want int) {
|
|
||||||
t.Helper()
|
|
||||||
value := gjson.GetBytes(body, path)
|
|
||||||
if !value.Exists() {
|
|
||||||
t.Fatalf("expected %s to exist", path)
|
|
||||||
}
|
|
||||||
if int(value.Int()) != want {
|
|
||||||
t.Fatalf("value at %s = %d, want %d", path, value.Int(), want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertJSONBool(t *testing.T, body []byte, path string, want bool) {
|
|
||||||
t.Helper()
|
|
||||||
value := gjson.GetBytes(body, path)
|
|
||||||
if !value.Exists() {
|
|
||||||
t.Fatalf("expected %s to exist", path)
|
|
||||||
}
|
|
||||||
if value.Bool() != want {
|
|
||||||
t.Fatalf("value at %s = %t, want %t", path, value.Bool(), want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func geminiBudgetModel() *registry.ModelInfo {
|
|
||||||
return ®istry.ModelInfo{
|
|
||||||
ID: "gemini-2.5-pro-test",
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Min: 128,
|
|
||||||
Max: 32768,
|
|
||||||
ZeroAllowed: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func geminiLevelModel() *registry.ModelInfo {
|
|
||||||
return ®istry.ModelInfo{
|
|
||||||
ID: "gemini-3-pro-preview-test",
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Min: 128,
|
|
||||||
Max: 32768,
|
|
||||||
Levels: []string{"minimal", "low", "medium", "high"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func claudeBudgetModel() *registry.ModelInfo {
|
|
||||||
return ®istry.ModelInfo{
|
|
||||||
ID: "claude-sonnet-4-5-test",
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Min: 1024,
|
|
||||||
Max: 128000,
|
|
||||||
ZeroAllowed: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func openAILevelModel() *registry.ModelInfo {
|
|
||||||
return ®istry.ModelInfo{
|
|
||||||
ID: "gpt-5.2-test",
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Min: 128,
|
|
||||||
Max: 32768,
|
|
||||||
ZeroAllowed: true,
|
|
||||||
Levels: []string{"low", "medium", "high"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func iFlowModel() *registry.ModelInfo {
|
|
||||||
return ®istry.ModelInfo{
|
|
||||||
ID: "glm-4.6-test",
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Min: 1,
|
|
||||||
Max: 10,
|
|
||||||
ZeroAllowed: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,277 +0,0 @@
|
|||||||
// Package thinking provides unified thinking configuration processing logic.
|
|
||||||
package thinking
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestConvertLevelToBudget tests the ConvertLevelToBudget function.
|
|
||||||
//
|
|
||||||
// ConvertLevelToBudget converts a thinking level to a budget value.
|
|
||||||
// This is a semantic conversion - it does NOT apply clamping.
|
|
||||||
//
|
|
||||||
// Level → Budget mapping:
|
|
||||||
// - none → 0
|
|
||||||
// - auto → -1
|
|
||||||
// - minimal → 512
|
|
||||||
// - low → 1024
|
|
||||||
// - medium → 8192
|
|
||||||
// - high → 24576
|
|
||||||
// - xhigh → 32768
|
|
||||||
func TestConvertLevelToBudget(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
level string
|
|
||||||
want int
|
|
||||||
wantOK bool
|
|
||||||
}{
|
|
||||||
// Standard levels
|
|
||||||
{"none", "none", 0, true},
|
|
||||||
{"auto", "auto", -1, true},
|
|
||||||
{"minimal", "minimal", 512, true},
|
|
||||||
{"low", "low", 1024, true},
|
|
||||||
{"medium", "medium", 8192, true},
|
|
||||||
{"high", "high", 24576, true},
|
|
||||||
{"xhigh", "xhigh", 32768, true},
|
|
||||||
|
|
||||||
// Case insensitive
|
|
||||||
{"case insensitive HIGH", "HIGH", 24576, true},
|
|
||||||
{"case insensitive High", "High", 24576, true},
|
|
||||||
{"case insensitive NONE", "NONE", 0, true},
|
|
||||||
{"case insensitive Auto", "Auto", -1, true},
|
|
||||||
|
|
||||||
// Invalid levels
|
|
||||||
{"invalid ultra", "ultra", 0, false},
|
|
||||||
{"invalid maximum", "maximum", 0, false},
|
|
||||||
{"empty string", "", 0, false},
|
|
||||||
{"whitespace", " ", 0, false},
|
|
||||||
{"numeric string", "1000", 0, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
budget, ok := ConvertLevelToBudget(tt.level)
|
|
||||||
if ok != tt.wantOK {
|
|
||||||
t.Errorf("ConvertLevelToBudget(%q) ok = %v, want %v", tt.level, ok, tt.wantOK)
|
|
||||||
}
|
|
||||||
if budget != tt.want {
|
|
||||||
t.Errorf("ConvertLevelToBudget(%q) = %d, want %d", tt.level, budget, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestConvertBudgetToLevel tests the ConvertBudgetToLevel function.
|
|
||||||
//
|
|
||||||
// ConvertBudgetToLevel converts a budget value to the nearest level.
|
|
||||||
// Uses threshold-based mapping for range conversion.
|
|
||||||
//
|
|
||||||
// Budget → Level thresholds:
|
|
||||||
// - -1 → auto
|
|
||||||
// - 0 → none
|
|
||||||
// - 1-512 → minimal
|
|
||||||
// - 513-1024 → low
|
|
||||||
// - 1025-8192 → medium
|
|
||||||
// - 8193-24576 → high
|
|
||||||
// - 24577+ → xhigh
|
|
||||||
//
|
|
||||||
// Depends on: Epic 4 Story 4-2 (budget to level conversion)
|
|
||||||
func TestConvertBudgetToLevel(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
budget int
|
|
||||||
want string
|
|
||||||
wantOK bool
|
|
||||||
}{
|
|
||||||
// Special values
|
|
||||||
{"auto", -1, "auto", true},
|
|
||||||
{"none", 0, "none", true},
|
|
||||||
|
|
||||||
// Invalid negative values
|
|
||||||
{"invalid negative -2", -2, "", false},
|
|
||||||
{"invalid negative -100", -100, "", false},
|
|
||||||
{"invalid negative extreme", -999999, "", false},
|
|
||||||
|
|
||||||
// Minimal range (1-512)
|
|
||||||
{"minimal min", 1, "minimal", true},
|
|
||||||
{"minimal mid", 256, "minimal", true},
|
|
||||||
{"minimal max", 512, "minimal", true},
|
|
||||||
|
|
||||||
// Low range (513-1024)
|
|
||||||
{"low start", 513, "low", true},
|
|
||||||
{"low boundary", 1024, "low", true},
|
|
||||||
|
|
||||||
// Medium range (1025-8192)
|
|
||||||
{"medium start", 1025, "medium", true},
|
|
||||||
{"medium mid", 4096, "medium", true},
|
|
||||||
{"medium boundary", 8192, "medium", true},
|
|
||||||
|
|
||||||
// High range (8193-24576)
|
|
||||||
{"high start", 8193, "high", true},
|
|
||||||
{"high mid", 16384, "high", true},
|
|
||||||
{"high boundary", 24576, "high", true},
|
|
||||||
|
|
||||||
// XHigh range (24577+)
|
|
||||||
{"xhigh start", 24577, "xhigh", true},
|
|
||||||
{"xhigh mid", 32768, "xhigh", true},
|
|
||||||
{"xhigh large", 100000, "xhigh", true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
level, ok := ConvertBudgetToLevel(tt.budget)
|
|
||||||
if ok != tt.wantOK {
|
|
||||||
t.Errorf("ConvertBudgetToLevel(%d) ok = %v, want %v", tt.budget, ok, tt.wantOK)
|
|
||||||
}
|
|
||||||
if level != tt.want {
|
|
||||||
t.Errorf("ConvertBudgetToLevel(%d) = %q, want %q", tt.budget, level, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestConvertMixedFormat tests mixed format handling.
|
|
||||||
//
|
|
||||||
// Tests scenarios where both level and budget might be present,
|
|
||||||
// or where format conversion requires special handling.
|
|
||||||
//
|
|
||||||
// Depends on: Epic 4 Story 4-3 (mixed format handling)
|
|
||||||
func TestConvertMixedFormat(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
inputBudget int
|
|
||||||
inputLevel string
|
|
||||||
wantMode ThinkingMode
|
|
||||||
wantBudget int
|
|
||||||
wantLevel ThinkingLevel
|
|
||||||
}{
|
|
||||||
// Level takes precedence when both present
|
|
||||||
{"level and budget - level wins", 8192, "high", ModeLevel, 0, LevelHigh},
|
|
||||||
{"level and zero budget", 0, "high", ModeLevel, 0, LevelHigh},
|
|
||||||
|
|
||||||
// Budget only
|
|
||||||
{"budget only", 16384, "", ModeBudget, 16384, ""},
|
|
||||||
|
|
||||||
// Level only
|
|
||||||
{"level only", 0, "medium", ModeLevel, 0, LevelMedium},
|
|
||||||
|
|
||||||
// Neither (default)
|
|
||||||
{"neither", 0, "", ModeNone, 0, ""},
|
|
||||||
|
|
||||||
// Special values
|
|
||||||
{"auto level", 0, "auto", ModeAuto, -1, LevelAuto},
|
|
||||||
{"none level", 0, "none", ModeNone, 0, LevelNone},
|
|
||||||
{"auto budget", -1, "", ModeAuto, -1, ""},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := normalizeMixedConfig(tt.inputBudget, tt.inputLevel)
|
|
||||||
if got.Mode != tt.wantMode {
|
|
||||||
t.Errorf("normalizeMixedConfig(%d, %q) Mode = %v, want %v", tt.inputBudget, tt.inputLevel, got.Mode, tt.wantMode)
|
|
||||||
}
|
|
||||||
if got.Budget != tt.wantBudget {
|
|
||||||
t.Errorf("normalizeMixedConfig(%d, %q) Budget = %d, want %d", tt.inputBudget, tt.inputLevel, got.Budget, tt.wantBudget)
|
|
||||||
}
|
|
||||||
if got.Level != tt.wantLevel {
|
|
||||||
t.Errorf("normalizeMixedConfig(%d, %q) Level = %q, want %q", tt.inputBudget, tt.inputLevel, got.Level, tt.wantLevel)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestNormalizeForModel tests model-aware format normalization.
|
|
||||||
func TestNormalizeForModel(t *testing.T) {
|
|
||||||
budgetOnlyModel := ®istry.ModelInfo{
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Min: 1024,
|
|
||||||
Max: 128000,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
levelOnlyModel := ®istry.ModelInfo{
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Levels: []string{"low", "medium", "high"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
hybridModel := ®istry.ModelInfo{
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Min: 128,
|
|
||||||
Max: 32768,
|
|
||||||
Levels: []string{"minimal", "low", "medium", "high"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config ThinkingConfig
|
|
||||||
model *registry.ModelInfo
|
|
||||||
want ThinkingConfig
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{"budget-only keeps budget", ThinkingConfig{Mode: ModeBudget, Budget: 8192}, budgetOnlyModel, ThinkingConfig{Mode: ModeBudget, Budget: 8192}, false},
|
|
||||||
{"budget-only converts level", ThinkingConfig{Mode: ModeLevel, Level: LevelHigh}, budgetOnlyModel, ThinkingConfig{Mode: ModeBudget, Budget: 24576}, false},
|
|
||||||
{"level-only converts budget", ThinkingConfig{Mode: ModeBudget, Budget: 8192}, levelOnlyModel, ThinkingConfig{Mode: ModeLevel, Level: LevelMedium}, false},
|
|
||||||
{"level-only keeps level", ThinkingConfig{Mode: ModeLevel, Level: LevelLow}, levelOnlyModel, ThinkingConfig{Mode: ModeLevel, Level: LevelLow}, false},
|
|
||||||
{"hybrid keeps budget", ThinkingConfig{Mode: ModeBudget, Budget: 16384}, hybridModel, ThinkingConfig{Mode: ModeBudget, Budget: 16384}, false},
|
|
||||||
{"hybrid keeps level", ThinkingConfig{Mode: ModeLevel, Level: LevelMinimal}, hybridModel, ThinkingConfig{Mode: ModeLevel, Level: LevelMinimal}, false},
|
|
||||||
{"auto passthrough", ThinkingConfig{Mode: ModeAuto, Budget: -1}, levelOnlyModel, ThinkingConfig{Mode: ModeAuto, Budget: -1}, false},
|
|
||||||
{"none passthrough", ThinkingConfig{Mode: ModeNone, Budget: 0}, budgetOnlyModel, ThinkingConfig{Mode: ModeNone, Budget: 0}, false},
|
|
||||||
{"invalid level", ThinkingConfig{Mode: ModeLevel, Level: "ultra"}, budgetOnlyModel, ThinkingConfig{}, true},
|
|
||||||
{"invalid budget", ThinkingConfig{Mode: ModeBudget, Budget: -2}, levelOnlyModel, ThinkingConfig{}, true},
|
|
||||||
{"nil modelInfo passthrough budget", ThinkingConfig{Mode: ModeBudget, Budget: 8192}, nil, ThinkingConfig{Mode: ModeBudget, Budget: 8192}, false},
|
|
||||||
{"nil modelInfo passthrough level", ThinkingConfig{Mode: ModeLevel, Level: LevelHigh}, nil, ThinkingConfig{Mode: ModeLevel, Level: LevelHigh}, false},
|
|
||||||
{"nil thinking degrades to none", ThinkingConfig{Mode: ModeBudget, Budget: 4096}, ®istry.ModelInfo{}, ThinkingConfig{Mode: ModeNone, Budget: 0}, false},
|
|
||||||
{"nil thinking level degrades to none", ThinkingConfig{Mode: ModeLevel, Level: LevelHigh}, ®istry.ModelInfo{}, ThinkingConfig{Mode: ModeNone, Budget: 0}, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := NormalizeForModel(&tt.config, tt.model)
|
|
||||||
if (err != nil) != tt.wantErr {
|
|
||||||
t.Fatalf("NormalizeForModel(%+v) error = %v, wantErr %v", tt.config, err, tt.wantErr)
|
|
||||||
}
|
|
||||||
if tt.wantErr {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("NormalizeForModel(%+v) returned nil config", tt.config)
|
|
||||||
}
|
|
||||||
if got.Mode != tt.want.Mode {
|
|
||||||
t.Errorf("NormalizeForModel(%+v) Mode = %v, want %v", tt.config, got.Mode, tt.want.Mode)
|
|
||||||
}
|
|
||||||
if got.Budget != tt.want.Budget {
|
|
||||||
t.Errorf("NormalizeForModel(%+v) Budget = %d, want %d", tt.config, got.Budget, tt.want.Budget)
|
|
||||||
}
|
|
||||||
if got.Level != tt.want.Level {
|
|
||||||
t.Errorf("NormalizeForModel(%+v) Level = %q, want %q", tt.config, got.Level, tt.want.Level)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLevelToBudgetRoundTrip tests level → budget → level round trip.
|
|
||||||
//
|
|
||||||
// Verifies that converting level to budget and back produces consistent results.
|
|
||||||
//
|
|
||||||
// Depends on: Epic 4 Story 4-1, 4-2
|
|
||||||
func TestLevelToBudgetRoundTrip(t *testing.T) {
|
|
||||||
levels := []string{"none", "auto", "minimal", "low", "medium", "high", "xhigh"}
|
|
||||||
|
|
||||||
for _, level := range levels {
|
|
||||||
t.Run(level, func(t *testing.T) {
|
|
||||||
budget, ok := ConvertLevelToBudget(level)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("ConvertLevelToBudget(%q) returned ok=false", level)
|
|
||||||
}
|
|
||||||
resultLevel, ok := ConvertBudgetToLevel(budget)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("ConvertBudgetToLevel(%d) returned ok=false", budget)
|
|
||||||
}
|
|
||||||
if resultLevel != level {
|
|
||||||
t.Errorf("round trip: %q → %d → %q, want %q", level, budget, resultLevel, level)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
// Package thinking provides unified thinking configuration processing logic.
|
|
||||||
package thinking
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
// TestThinkingErrorError tests the Error() method of ThinkingError.
|
|
||||||
//
|
|
||||||
// Error() returns the message directly without code prefix.
|
|
||||||
// Use Code field for programmatic error handling.
|
|
||||||
func TestThinkingErrorError(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
err *ThinkingError
|
|
||||||
wantMsg string
|
|
||||||
wantCode ErrorCode
|
|
||||||
}{
|
|
||||||
{"invalid suffix format", NewThinkingError(ErrInvalidSuffix, "invalid suffix format: model(abc"), "invalid suffix format: model(abc", ErrInvalidSuffix},
|
|
||||||
{"unknown level", NewThinkingError(ErrUnknownLevel, "unknown level: ultra"), "unknown level: ultra", ErrUnknownLevel},
|
|
||||||
{"level not supported", NewThinkingError(ErrLevelNotSupported, "level \"xhigh\" not supported, valid levels: low, medium, high"), "level \"xhigh\" not supported, valid levels: low, medium, high", ErrLevelNotSupported},
|
|
||||||
{"thinking not supported", NewThinkingErrorWithModel(ErrThinkingNotSupported, "thinking not supported for this model", "claude-haiku"), "thinking not supported for this model", ErrThinkingNotSupported},
|
|
||||||
{"provider mismatch", NewThinkingError(ErrProviderMismatch, "provider mismatch: expected claude, got gemini"), "provider mismatch: expected claude, got gemini", ErrProviderMismatch},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := tt.err.Error(); got != tt.wantMsg {
|
|
||||||
t.Errorf("Error() = %q, want %q", got, tt.wantMsg)
|
|
||||||
}
|
|
||||||
if tt.err.Code != tt.wantCode {
|
|
||||||
t.Errorf("Code = %q, want %q", tt.err.Code, tt.wantCode)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
// Package thinking provides unified thinking configuration processing logic.
|
|
||||||
package thinking
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestExtractThinkingConfig(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
provider string
|
|
||||||
want ThinkingConfig
|
|
||||||
}{
|
|
||||||
{"claude budget", `{"thinking":{"budget_tokens":16384}}`, "claude", ThinkingConfig{Mode: ModeBudget, Budget: 16384}},
|
|
||||||
{"claude disabled type", `{"thinking":{"type":"disabled"}}`, "claude", ThinkingConfig{Mode: ModeNone, Budget: 0}},
|
|
||||||
{"claude auto budget", `{"thinking":{"budget_tokens":-1}}`, "claude", ThinkingConfig{Mode: ModeAuto, Budget: -1}},
|
|
||||||
{"claude enabled type without budget", `{"thinking":{"type":"enabled"}}`, "claude", ThinkingConfig{Mode: ModeAuto, Budget: -1}},
|
|
||||||
{"claude enabled type with budget", `{"thinking":{"type":"enabled","budget_tokens":8192}}`, "claude", ThinkingConfig{Mode: ModeBudget, Budget: 8192}},
|
|
||||||
{"claude disabled type overrides budget", `{"thinking":{"type":"disabled","budget_tokens":8192}}`, "claude", ThinkingConfig{Mode: ModeNone, Budget: 0}},
|
|
||||||
{"gemini budget", `{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}`, "gemini", ThinkingConfig{Mode: ModeBudget, Budget: 8192}},
|
|
||||||
{"gemini level", `{"generationConfig":{"thinkingConfig":{"thinkingLevel":"high"}}}`, "gemini", ThinkingConfig{Mode: ModeLevel, Level: LevelHigh}},
|
|
||||||
{"gemini cli auto", `{"request":{"generationConfig":{"thinkingConfig":{"thinkingLevel":"auto"}}}}`, "gemini-cli", ThinkingConfig{Mode: ModeAuto, Budget: -1}},
|
|
||||||
{"openai level", `{"reasoning_effort":"medium"}`, "openai", ThinkingConfig{Mode: ModeLevel, Level: LevelMedium}},
|
|
||||||
{"openai none", `{"reasoning_effort":"none"}`, "openai", ThinkingConfig{Mode: ModeNone, Budget: 0}},
|
|
||||||
{"codex effort high", `{"reasoning":{"effort":"high"}}`, "codex", ThinkingConfig{Mode: ModeLevel, Level: LevelHigh}},
|
|
||||||
{"codex effort none", `{"reasoning":{"effort":"none"}}`, "codex", ThinkingConfig{Mode: ModeNone, Budget: 0}},
|
|
||||||
{"iflow enable", `{"chat_template_kwargs":{"enable_thinking":true}}`, "iflow", ThinkingConfig{Mode: ModeBudget, Budget: 1}},
|
|
||||||
{"iflow disable", `{"reasoning_split":false}`, "iflow", ThinkingConfig{Mode: ModeNone, Budget: 0}},
|
|
||||||
{"unknown provider", `{"thinking":{"budget_tokens":123}}`, "unknown", ThinkingConfig{}},
|
|
||||||
{"invalid json", `{"thinking":`, "claude", ThinkingConfig{}},
|
|
||||||
{"empty body", "", "claude", ThinkingConfig{}},
|
|
||||||
{"no config", `{}`, "gemini", ThinkingConfig{}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := extractThinkingConfig([]byte(tt.body), tt.provider)
|
|
||||||
if got != tt.want {
|
|
||||||
t.Fatalf("extractThinkingConfig() = %+v, want %+v", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,288 +0,0 @@
|
|||||||
// Package claude implements thinking configuration for Claude models.
|
|
||||||
package claude
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Unit Tests: Applier Creation and Interface
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
func TestNewApplier(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
if applier == nil {
|
|
||||||
t.Fatal("NewApplier() returned nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplierImplementsInterface(t *testing.T) {
|
|
||||||
var _ thinking.ProviderApplier = (*Applier)(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Unit Tests: Budget and Disable Logic (Pre-validated Config)
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
// TestClaudeApplyBudgetAndNone tests budget values and disable modes.
|
|
||||||
// NOTE: These tests assume config has been pre-validated by ValidateConfig.
|
|
||||||
// Apply trusts the input and does not perform clamping.
|
|
||||||
func TestClaudeApplyBudgetAndNone(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildClaudeModelInfo()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantType string
|
|
||||||
wantBudget int
|
|
||||||
wantBudgetOK bool
|
|
||||||
}{
|
|
||||||
// Valid pre-validated budget values
|
|
||||||
{"budget 16k", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 16384}, "enabled", 16384, true},
|
|
||||||
{"budget min", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 1024}, "enabled", 1024, true},
|
|
||||||
{"budget max", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 128000}, "enabled", 128000, true},
|
|
||||||
{"budget mid", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 50000}, "enabled", 50000, true},
|
|
||||||
// Disable cases
|
|
||||||
{"budget zero disables", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 0}, "disabled", 0, false},
|
|
||||||
{"mode none disables", thinking.ThinkingConfig{Mode: thinking.ModeNone, Budget: 0}, "disabled", 0, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := applier.Apply([]byte(`{}`), tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
thinkingType := gjson.GetBytes(result, "thinking.type").String()
|
|
||||||
if thinkingType != tt.wantType {
|
|
||||||
t.Fatalf("thinking.type = %q, want %q", thinkingType, tt.wantType)
|
|
||||||
}
|
|
||||||
|
|
||||||
budgetValue := gjson.GetBytes(result, "thinking.budget_tokens")
|
|
||||||
if budgetValue.Exists() != tt.wantBudgetOK {
|
|
||||||
t.Fatalf("thinking.budget_tokens exists = %v, want %v", budgetValue.Exists(), tt.wantBudgetOK)
|
|
||||||
}
|
|
||||||
if tt.wantBudgetOK {
|
|
||||||
if got := int(budgetValue.Int()); got != tt.wantBudget {
|
|
||||||
t.Fatalf("thinking.budget_tokens = %d, want %d", got, tt.wantBudget)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestClaudeApplyPassthroughBudget tests that Apply trusts pre-validated budget values.
|
|
||||||
// It does NOT perform clamping - that's ValidateConfig's responsibility.
|
|
||||||
func TestClaudeApplyPassthroughBudget(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildClaudeModelInfo()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantBudget int
|
|
||||||
}{
|
|
||||||
// Apply should pass through the budget value as-is
|
|
||||||
// (ValidateConfig would have clamped these, but Apply trusts the input)
|
|
||||||
{"passes through any budget", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 500}, 500},
|
|
||||||
{"passes through large budget", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 200000}, 200000},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := applier.Apply([]byte(`{}`), tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if got := int(gjson.GetBytes(result, "thinking.budget_tokens").Int()); got != tt.wantBudget {
|
|
||||||
t.Fatalf("thinking.budget_tokens = %d, want %d (passthrough)", got, tt.wantBudget)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Unit Tests: Mode Passthrough (Strict Layering)
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
// TestClaudeApplyModePassthrough tests that non-Budget/None modes pass through unchanged.
|
|
||||||
// Apply expects ValidateConfig to have already converted Level/Auto to Budget.
|
|
||||||
func TestClaudeApplyModePassthrough(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildClaudeModelInfo()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
body string
|
|
||||||
}{
|
|
||||||
{"ModeLevel passes through", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: "high"}, `{"model":"test"}`},
|
|
||||||
{"ModeAuto passes through", thinking.ThinkingConfig{Mode: thinking.ModeAuto, Budget: -1}, `{"model":"test"}`},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := applier.Apply([]byte(tt.body), tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should return body unchanged
|
|
||||||
if string(result) != tt.body {
|
|
||||||
t.Fatalf("Apply() = %s, want %s (passthrough)", string(result), tt.body)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Unit Tests: Output Format
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
// TestClaudeApplyOutputFormat tests the exact JSON output format.
|
|
||||||
//
|
|
||||||
// Claude expects:
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "thinking": {
|
|
||||||
// "type": "enabled",
|
|
||||||
// "budget_tokens": 16384
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
func TestClaudeApplyOutputFormat(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantJSON string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"enabled with budget",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 16384},
|
|
||||||
`{"thinking":{"type":"enabled","budget_tokens":16384}}`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"disabled",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeNone, Budget: 0},
|
|
||||||
`{"thinking":{"type":"disabled"}}`,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildClaudeModelInfo()
|
|
||||||
|
|
||||||
result, err := applier.Apply([]byte(`{}`), tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if string(result) != tt.wantJSON {
|
|
||||||
t.Fatalf("Apply() = %s, want %s", result, tt.wantJSON)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Unit Tests: Body Merging
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
// TestClaudeApplyWithExistingBody tests applying config to existing request body.
|
|
||||||
func TestClaudeApplyWithExistingBody(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantBody string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"add to empty body",
|
|
||||||
`{}`,
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 16384},
|
|
||||||
`{"thinking":{"type":"enabled","budget_tokens":16384}}`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"preserve existing fields",
|
|
||||||
`{"model":"claude-sonnet-4-5","messages":[]}`,
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192},
|
|
||||||
`{"model":"claude-sonnet-4-5","messages":[],"thinking":{"type":"enabled","budget_tokens":8192}}`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"override existing thinking",
|
|
||||||
`{"thinking":{"type":"enabled","budget_tokens":1000}}`,
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 16384},
|
|
||||||
`{"thinking":{"type":"enabled","budget_tokens":16384}}`,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildClaudeModelInfo()
|
|
||||||
|
|
||||||
result, err := applier.Apply([]byte(tt.body), tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if string(result) != tt.wantBody {
|
|
||||||
t.Fatalf("Apply() = %s, want %s", result, tt.wantBody)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestClaudeApplyWithNilBody tests handling of nil/empty body.
|
|
||||||
func TestClaudeApplyWithNilBody(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildClaudeModelInfo()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body []byte
|
|
||||||
wantBudget int
|
|
||||||
}{
|
|
||||||
{"nil body", nil, 16384},
|
|
||||||
{"empty body", []byte{}, 16384},
|
|
||||||
{"empty object", []byte(`{}`), 16384},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 16384}
|
|
||||||
result, err := applier.Apply(tt.body, config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if got := gjson.GetBytes(result, "thinking.type").String(); got != "enabled" {
|
|
||||||
t.Fatalf("thinking.type = %q, want %q", got, "enabled")
|
|
||||||
}
|
|
||||||
if got := int(gjson.GetBytes(result, "thinking.budget_tokens").Int()); got != tt.wantBudget {
|
|
||||||
t.Fatalf("thinking.budget_tokens = %d, want %d", got, tt.wantBudget)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// Helper Functions
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
func buildClaudeModelInfo() *registry.ModelInfo {
|
|
||||||
return ®istry.ModelInfo{
|
|
||||||
ID: "claude-sonnet-4-5",
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Min: 1024,
|
|
||||||
Max: 128000,
|
|
||||||
ZeroAllowed: true,
|
|
||||||
DynamicAllowed: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,519 +0,0 @@
|
|||||||
// Package gemini implements thinking configuration for Gemini models.
|
|
||||||
package gemini
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewApplier(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
if applier == nil {
|
|
||||||
t.Fatal("NewApplier() returned nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseConfigFromSuffix parses a raw suffix into a ThinkingConfig.
|
|
||||||
// This helper reduces code duplication in end-to-end tests (L1 fix).
|
|
||||||
func parseConfigFromSuffix(rawSuffix string) (thinking.ThinkingConfig, bool) {
|
|
||||||
if budget, ok := thinking.ParseNumericSuffix(rawSuffix); ok {
|
|
||||||
return thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: budget}, true
|
|
||||||
}
|
|
||||||
if level, ok := thinking.ParseLevelSuffix(rawSuffix); ok {
|
|
||||||
return thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: level}, true
|
|
||||||
}
|
|
||||||
if mode, ok := thinking.ParseSpecialSuffix(rawSuffix); ok {
|
|
||||||
config := thinking.ThinkingConfig{Mode: mode}
|
|
||||||
if mode == thinking.ModeAuto {
|
|
||||||
config.Budget = -1
|
|
||||||
}
|
|
||||||
return config, true
|
|
||||||
}
|
|
||||||
return thinking.ThinkingConfig{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplierImplementsInterface(t *testing.T) {
|
|
||||||
// Compile-time check: if Applier doesn't implement the interface, this won't compile
|
|
||||||
var _ thinking.ProviderApplier = (*Applier)(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiApply tests the Gemini thinking applier.
|
|
||||||
//
|
|
||||||
// Gemini-specific behavior:
|
|
||||||
// - Gemini 2.5: thinkingBudget format (numeric)
|
|
||||||
// - Gemini 3.x: thinkingLevel format (string)
|
|
||||||
// - Flash series: ZeroAllowed=true
|
|
||||||
// - Pro series: ZeroAllowed=false, Min=128
|
|
||||||
// - CRITICAL: When budget=0/none, set includeThoughts=false
|
|
||||||
//
|
|
||||||
// Depends on: Epic 7 Story 7-2, 7-3
|
|
||||||
func TestGeminiApply(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantField string
|
|
||||||
wantValue interface{}
|
|
||||||
wantIncludeThoughts bool // CRITICAL: includeThoughts field
|
|
||||||
}{
|
|
||||||
// Gemini 2.5 Flash (ZeroAllowed=true)
|
|
||||||
{"flash budget 8k", "gemini-2.5-flash", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}, "thinkingBudget", 8192, true},
|
|
||||||
{"flash zero", "gemini-2.5-flash", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 0}, "thinkingBudget", 0, false},
|
|
||||||
{"flash none", "gemini-2.5-flash", thinking.ThinkingConfig{Mode: thinking.ModeNone, Budget: 0}, "thinkingBudget", 0, false},
|
|
||||||
|
|
||||||
// Gemini 2.5 Pro (ZeroAllowed=false, Min=128)
|
|
||||||
{"pro budget 8k", "gemini-2.5-pro", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}, "thinkingBudget", 8192, true},
|
|
||||||
{"pro zero - clamp", "gemini-2.5-pro", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 0}, "thinkingBudget", 128, false},
|
|
||||||
{"pro none - clamp", "gemini-2.5-pro", thinking.ThinkingConfig{Mode: thinking.ModeNone, Budget: 0}, "thinkingBudget", 128, false},
|
|
||||||
{"pro below min", "gemini-2.5-pro", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 50}, "thinkingBudget", 128, true},
|
|
||||||
{"pro above max", "gemini-2.5-pro", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 50000}, "thinkingBudget", 32768, true},
|
|
||||||
{"pro auto", "gemini-2.5-pro", thinking.ThinkingConfig{Mode: thinking.ModeAuto, Budget: -1}, "thinkingBudget", -1, true},
|
|
||||||
|
|
||||||
// Gemini 3 Pro (Level mode, ZeroAllowed=false)
|
|
||||||
{"g3-pro high", "gemini-3-pro-preview", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, "thinkingLevel", "high", true},
|
|
||||||
{"g3-pro low", "gemini-3-pro-preview", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelLow}, "thinkingLevel", "low", true},
|
|
||||||
{"g3-pro auto", "gemini-3-pro-preview", thinking.ThinkingConfig{Mode: thinking.ModeAuto, Budget: -1}, "thinkingBudget", -1, true},
|
|
||||||
|
|
||||||
// Gemini 3 Flash (Level mode, minimal is lowest)
|
|
||||||
{"g3-flash high", "gemini-3-flash-preview", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, "thinkingLevel", "high", true},
|
|
||||||
{"g3-flash medium", "gemini-3-flash-preview", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMedium}, "thinkingLevel", "medium", true},
|
|
||||||
{"g3-flash minimal", "gemini-3-flash-preview", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMinimal}, "thinkingLevel", "minimal", true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := buildGeminiModelInfo(tt.model)
|
|
||||||
normalized, err := thinking.ValidateConfig(tt.config, modelInfo.Thinking)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ValidateConfig() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := applier.Apply([]byte(`{}`), *normalized, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotField := gjson.GetBytes(result, "generationConfig.thinkingConfig."+tt.wantField)
|
|
||||||
switch want := tt.wantValue.(type) {
|
|
||||||
case int:
|
|
||||||
if int(gotField.Int()) != want {
|
|
||||||
t.Fatalf("%s = %d, want %d", tt.wantField, gotField.Int(), want)
|
|
||||||
}
|
|
||||||
case string:
|
|
||||||
if gotField.String() != want {
|
|
||||||
t.Fatalf("%s = %q, want %q", tt.wantField, gotField.String(), want)
|
|
||||||
}
|
|
||||||
case bool:
|
|
||||||
if gotField.Bool() != want {
|
|
||||||
t.Fatalf("%s = %v, want %v", tt.wantField, gotField.Bool(), want)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
t.Fatalf("unsupported wantValue type %T", tt.wantValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotIncludeThoughts := gjson.GetBytes(result, "generationConfig.thinkingConfig.includeThoughts").Bool()
|
|
||||||
if gotIncludeThoughts != tt.wantIncludeThoughts {
|
|
||||||
t.Fatalf("includeThoughts = %v, want %v", gotIncludeThoughts, tt.wantIncludeThoughts)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiApplyEndToEndBudgetZero tests suffix parsing + validation + apply for budget=0.
|
|
||||||
//
|
|
||||||
// This test covers the complete flow from suffix parsing to Apply output:
|
|
||||||
// - AC#1: ModeBudget+Budget=0 → ModeNone conversion
|
|
||||||
// - AC#3: Gemini 3 ModeNone+Budget>0 → includeThoughts=false + thinkingLevel=low
|
|
||||||
// - AC#4: Gemini 2.5 Pro (0) → clamped to 128 + includeThoughts=false
|
|
||||||
func TestGeminiApplyEndToEndBudgetZero(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
wantModel string
|
|
||||||
wantField string // "thinkingBudget" or "thinkingLevel"
|
|
||||||
wantValue interface{}
|
|
||||||
wantIncludeThoughts bool
|
|
||||||
}{
|
|
||||||
// AC#4: Gemini 2.5 Pro - Budget format
|
|
||||||
{"gemini-25-pro zero", "gemini-2.5-pro(0)", "gemini-2.5-pro", "thinkingBudget", 128, false},
|
|
||||||
// AC#3: Gemini 3 Pro - Level format, ModeNone clamped to Budget=128, uses lowest level
|
|
||||||
{"gemini-3-pro zero", "gemini-3-pro-preview(0)", "gemini-3-pro-preview", "thinkingLevel", "low", false},
|
|
||||||
{"gemini-3-pro none", "gemini-3-pro-preview(none)", "gemini-3-pro-preview", "thinkingLevel", "low", false},
|
|
||||||
// Gemini 3 Flash - Level format, lowest level is "minimal"
|
|
||||||
{"gemini-3-flash zero", "gemini-3-flash-preview(0)", "gemini-3-flash-preview", "thinkingLevel", "minimal", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
suffix := thinking.ParseSuffix(tt.model)
|
|
||||||
if !suffix.HasSuffix {
|
|
||||||
t.Fatalf("ParseSuffix(%q) HasSuffix = false, want true", tt.model)
|
|
||||||
}
|
|
||||||
if suffix.ModelName != tt.wantModel {
|
|
||||||
t.Fatalf("ParseSuffix(%q) ModelName = %q, want %q", tt.model, suffix.ModelName, tt.wantModel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse suffix value using helper function (L1 fix)
|
|
||||||
config, ok := parseConfigFromSuffix(suffix.RawSuffix)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("ParseSuffix(%q) RawSuffix = %q is not a valid suffix", tt.model, suffix.RawSuffix)
|
|
||||||
}
|
|
||||||
|
|
||||||
modelInfo := buildGeminiModelInfo(suffix.ModelName)
|
|
||||||
normalized, err := thinking.ValidateConfig(config, modelInfo.Thinking)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ValidateConfig() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
applier := NewApplier()
|
|
||||||
result, err := applier.Apply([]byte(`{}`), *normalized, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the output field value
|
|
||||||
gotField := gjson.GetBytes(result, "generationConfig.thinkingConfig."+tt.wantField)
|
|
||||||
switch want := tt.wantValue.(type) {
|
|
||||||
case int:
|
|
||||||
if int(gotField.Int()) != want {
|
|
||||||
t.Fatalf("%s = %d, want %d", tt.wantField, gotField.Int(), want)
|
|
||||||
}
|
|
||||||
case string:
|
|
||||||
if gotField.String() != want {
|
|
||||||
t.Fatalf("%s = %q, want %q", tt.wantField, gotField.String(), want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
gotIncludeThoughts := gjson.GetBytes(result, "generationConfig.thinkingConfig.includeThoughts").Bool()
|
|
||||||
if gotIncludeThoughts != tt.wantIncludeThoughts {
|
|
||||||
t.Fatalf("includeThoughts = %v, want %v", gotIncludeThoughts, tt.wantIncludeThoughts)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiApplyEndToEndAuto tests auto mode through both suffix parsing and direct config.
|
|
||||||
//
|
|
||||||
// This test covers:
|
|
||||||
// - AC#2: Gemini 2.5 auto uses thinkingBudget=-1
|
|
||||||
// - AC#3: Gemini 3 auto uses thinkingBudget=-1 (not thinkingLevel)
|
|
||||||
// - Suffix parsing path: (auto) and (-1) suffixes
|
|
||||||
// - Direct config path: ModeLevel + Level=auto → ModeAuto conversion
|
|
||||||
func TestGeminiApplyEndToEndAuto(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string // model name (with suffix for parsing, or plain for direct config)
|
|
||||||
directConfig *thinking.ThinkingConfig // if not nil, use direct config instead of suffix parsing
|
|
||||||
wantField string
|
|
||||||
wantValue int
|
|
||||||
wantIncludeThoughts bool
|
|
||||||
}{
|
|
||||||
// Suffix parsing path - Budget-only model (Gemini 2.5)
|
|
||||||
{"suffix auto g25", "gemini-2.5-pro(auto)", nil, "thinkingBudget", -1, true},
|
|
||||||
{"suffix -1 g25", "gemini-2.5-pro(-1)", nil, "thinkingBudget", -1, true},
|
|
||||||
// Suffix parsing path - Hybrid model (Gemini 3)
|
|
||||||
{"suffix auto g3", "gemini-3-pro-preview(auto)", nil, "thinkingBudget", -1, true},
|
|
||||||
{"suffix -1 g3", "gemini-3-pro-preview(-1)", nil, "thinkingBudget", -1, true},
|
|
||||||
// Direct config path - Level=auto → ModeAuto conversion
|
|
||||||
{"direct level=auto g25", "gemini-2.5-pro", &thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelAuto}, "thinkingBudget", -1, true},
|
|
||||||
{"direct level=auto g3", "gemini-3-pro-preview", &thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelAuto}, "thinkingBudget", -1, true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
var config thinking.ThinkingConfig
|
|
||||||
var modelName string
|
|
||||||
|
|
||||||
if tt.directConfig != nil {
|
|
||||||
// Direct config path
|
|
||||||
config = *tt.directConfig
|
|
||||||
modelName = tt.model
|
|
||||||
} else {
|
|
||||||
// Suffix parsing path
|
|
||||||
suffix := thinking.ParseSuffix(tt.model)
|
|
||||||
if !suffix.HasSuffix {
|
|
||||||
t.Fatalf("ParseSuffix(%q) HasSuffix = false", tt.model)
|
|
||||||
}
|
|
||||||
modelName = suffix.ModelName
|
|
||||||
var ok bool
|
|
||||||
config, ok = parseConfigFromSuffix(suffix.RawSuffix)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("parseConfigFromSuffix(%q) failed", suffix.RawSuffix)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
modelInfo := buildGeminiModelInfo(modelName)
|
|
||||||
normalized, err := thinking.ValidateConfig(config, modelInfo.Thinking)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ValidateConfig() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify ModeAuto after validation
|
|
||||||
if normalized.Mode != thinking.ModeAuto {
|
|
||||||
t.Fatalf("ValidateConfig() Mode = %v, want ModeAuto", normalized.Mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
applier := NewApplier()
|
|
||||||
result, err := applier.Apply([]byte(`{}`), *normalized, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotField := gjson.GetBytes(result, "generationConfig.thinkingConfig."+tt.wantField)
|
|
||||||
if int(gotField.Int()) != tt.wantValue {
|
|
||||||
t.Fatalf("%s = %d, want %d", tt.wantField, gotField.Int(), tt.wantValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotIncludeThoughts := gjson.GetBytes(result, "generationConfig.thinkingConfig.includeThoughts").Bool()
|
|
||||||
if gotIncludeThoughts != tt.wantIncludeThoughts {
|
|
||||||
t.Fatalf("includeThoughts = %v, want %v", gotIncludeThoughts, tt.wantIncludeThoughts)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGeminiApplyInvalidBody(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildGeminiModelInfo("gemini-2.5-flash")
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
normalized, err := thinking.ValidateConfig(config, modelInfo.Thinking)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ValidateConfig() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body []byte
|
|
||||||
}{
|
|
||||||
{"nil body", nil},
|
|
||||||
{"empty body", []byte{}},
|
|
||||||
{"invalid json", []byte("{\"not json\"")},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := applier.Apply(tt.body, *normalized, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotBudget := int(gjson.GetBytes(result, "generationConfig.thinkingConfig.thinkingBudget").Int())
|
|
||||||
if gotBudget != 8192 {
|
|
||||||
t.Fatalf("thinkingBudget = %d, want %d", gotBudget, 8192)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotIncludeThoughts := gjson.GetBytes(result, "generationConfig.thinkingConfig.includeThoughts").Bool()
|
|
||||||
if !gotIncludeThoughts {
|
|
||||||
t.Fatalf("includeThoughts = %v, want %v", gotIncludeThoughts, true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiApplyConflictingFields tests that conflicting fields are removed.
|
|
||||||
//
|
|
||||||
// When applying Budget format, any existing thinkingLevel should be removed.
|
|
||||||
// When applying Level format, any existing thinkingBudget should be removed.
|
|
||||||
func TestGeminiApplyConflictingFields(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
existingBody string
|
|
||||||
wantField string // expected field to exist
|
|
||||||
wantNoField string // expected field to NOT exist
|
|
||||||
}{
|
|
||||||
// Budget format should remove existing thinkingLevel
|
|
||||||
{
|
|
||||||
"budget removes level",
|
|
||||||
"gemini-2.5-pro",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192},
|
|
||||||
`{"generationConfig":{"thinkingConfig":{"thinkingLevel":"high"}}}`,
|
|
||||||
"thinkingBudget",
|
|
||||||
"thinkingLevel",
|
|
||||||
},
|
|
||||||
// Level format should remove existing thinkingBudget
|
|
||||||
{
|
|
||||||
"level removes budget",
|
|
||||||
"gemini-3-pro-preview",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh},
|
|
||||||
`{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}`,
|
|
||||||
"thinkingLevel",
|
|
||||||
"thinkingBudget",
|
|
||||||
},
|
|
||||||
// ModeAuto uses budget format, should remove thinkingLevel
|
|
||||||
{
|
|
||||||
"auto removes level",
|
|
||||||
"gemini-3-pro-preview",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeAuto, Budget: -1},
|
|
||||||
`{"generationConfig":{"thinkingConfig":{"thinkingLevel":"high"}}}`,
|
|
||||||
"thinkingBudget",
|
|
||||||
"thinkingLevel",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := buildGeminiModelInfo(tt.model)
|
|
||||||
result, err := applier.Apply([]byte(tt.existingBody), tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify expected field exists
|
|
||||||
wantPath := "generationConfig.thinkingConfig." + tt.wantField
|
|
||||||
if !gjson.GetBytes(result, wantPath).Exists() {
|
|
||||||
t.Fatalf("%s should exist in result: %s", tt.wantField, string(result))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify conflicting field was removed
|
|
||||||
noPath := "generationConfig.thinkingConfig." + tt.wantNoField
|
|
||||||
if gjson.GetBytes(result, noPath).Exists() {
|
|
||||||
t.Fatalf("%s should NOT exist in result: %s", tt.wantNoField, string(result))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiApplyThinkingNotSupported tests passthrough handling when modelInfo.Thinking is nil.
|
|
||||||
func TestGeminiApplyThinkingNotSupported(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
body := []byte(`{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}`)
|
|
||||||
|
|
||||||
// Model with nil Thinking support
|
|
||||||
modelInfo := ®istry.ModelInfo{ID: "gemini-unknown", Thinking: nil}
|
|
||||||
|
|
||||||
got, err := applier.Apply(body, config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() expected nil error for nil Thinking, got %v", err)
|
|
||||||
}
|
|
||||||
if string(got) != string(body) {
|
|
||||||
t.Fatalf("expected body unchanged, got %s", string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildGeminiModelInfo(modelID string) *registry.ModelInfo {
|
|
||||||
support := ®istry.ThinkingSupport{}
|
|
||||||
switch modelID {
|
|
||||||
case "gemini-2.5-pro":
|
|
||||||
support.Min = 128
|
|
||||||
support.Max = 32768
|
|
||||||
support.ZeroAllowed = false
|
|
||||||
support.DynamicAllowed = true
|
|
||||||
case "gemini-2.5-flash", "gemini-2.5-flash-lite":
|
|
||||||
support.Min = 0
|
|
||||||
support.Max = 24576
|
|
||||||
support.ZeroAllowed = true
|
|
||||||
support.DynamicAllowed = true
|
|
||||||
case "gemini-3-pro-preview":
|
|
||||||
support.Min = 128
|
|
||||||
support.Max = 32768
|
|
||||||
support.ZeroAllowed = false
|
|
||||||
support.DynamicAllowed = true
|
|
||||||
support.Levels = []string{"low", "high"}
|
|
||||||
case "gemini-3-flash-preview":
|
|
||||||
support.Min = 128
|
|
||||||
support.Max = 32768
|
|
||||||
support.ZeroAllowed = false
|
|
||||||
support.DynamicAllowed = true
|
|
||||||
support.Levels = []string{"minimal", "low", "medium", "high"}
|
|
||||||
default:
|
|
||||||
// Unknown model - return nil Thinking to trigger error path
|
|
||||||
return ®istry.ModelInfo{ID: modelID, Thinking: nil}
|
|
||||||
}
|
|
||||||
return ®istry.ModelInfo{
|
|
||||||
ID: modelID,
|
|
||||||
Thinking: support,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiApplyNilModelInfo tests Apply behavior when modelInfo is nil.
|
|
||||||
// Coverage: apply.go:56-58 (H1)
|
|
||||||
func TestGeminiApplyNilModelInfo(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
body := []byte(`{"existing": "data"}`)
|
|
||||||
|
|
||||||
result, err := applier.Apply(body, config, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() with nil modelInfo should not error, got: %v", err)
|
|
||||||
}
|
|
||||||
// nil modelInfo now applies compatible config
|
|
||||||
if !gjson.GetBytes(result, "generationConfig.thinkingConfig.thinkingBudget").Exists() {
|
|
||||||
t.Fatalf("Apply() with nil modelInfo should apply thinking config, got: %s", result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiApplyEmptyModelID tests Apply when modelID is empty.
|
|
||||||
// Coverage: apply.go:61-63 (H2)
|
|
||||||
func TestGeminiApplyEmptyModelID(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
modelInfo := ®istry.ModelInfo{ID: "", Thinking: nil}
|
|
||||||
body := []byte(`{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}`)
|
|
||||||
|
|
||||||
got, err := applier.Apply(body, config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() expected nil error, got %v", err)
|
|
||||||
}
|
|
||||||
if string(got) != string(body) {
|
|
||||||
t.Fatalf("expected body unchanged, got %s", string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiApplyModeBudgetWithLevels tests that ModeBudget is applied with budget format
|
|
||||||
// even for models with Levels. The Apply layer handles ModeBudget by applying thinkingBudget.
|
|
||||||
// Coverage: apply.go:88-90
|
|
||||||
func TestGeminiApplyModeBudgetWithLevels(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildGeminiModelInfo("gemini-3-flash-preview")
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
body := []byte(`{"existing": "data"}`)
|
|
||||||
|
|
||||||
result, err := applier.Apply(body, config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
// ModeBudget applies budget format
|
|
||||||
budget := gjson.GetBytes(result, "generationConfig.thinkingConfig.thinkingBudget").Int()
|
|
||||||
if budget != 8192 {
|
|
||||||
t.Fatalf("Apply() expected thinkingBudget=8192, got: %d", budget)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiApplyUnsupportedMode tests behavior with unsupported Mode types.
|
|
||||||
// Coverage: apply.go:67-69 and 97-98 (H5, L2)
|
|
||||||
func TestGeminiApplyUnsupportedMode(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
body := []byte(`{"existing": "data"}`)
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
}{
|
|
||||||
{"unknown mode with budget model", "gemini-2.5-pro", thinking.ThinkingConfig{Mode: thinking.ThinkingMode(99), Budget: 8192}},
|
|
||||||
{"unknown mode with level model", "gemini-3-pro-preview", thinking.ThinkingConfig{Mode: thinking.ThinkingMode(99), Level: thinking.LevelHigh}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := buildGeminiModelInfo(tt.model)
|
|
||||||
result, err := applier.Apply(body, tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
// Unsupported modes return original body unchanged
|
|
||||||
if string(result) != string(body) {
|
|
||||||
t.Fatalf("Apply() with unsupported mode should return original body, got: %s", result)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,375 +0,0 @@
|
|||||||
// Package geminicli implements thinking configuration for Gemini CLI API format.
|
|
||||||
package geminicli
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewApplier(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
if applier == nil {
|
|
||||||
t.Fatal("NewApplier() returned nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplierImplementsInterface(t *testing.T) {
|
|
||||||
// Compile-time check: if Applier doesn't implement the interface, this won't compile
|
|
||||||
var _ thinking.ProviderApplier = (*Applier)(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiCLIApply tests the Gemini CLI thinking applier.
|
|
||||||
//
|
|
||||||
// Gemini CLI uses request.generationConfig.thinkingConfig.* path.
|
|
||||||
// Behavior mirrors Gemini applier but with different JSON path prefix.
|
|
||||||
func TestGeminiCLIApply(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantField string
|
|
||||||
wantValue interface{}
|
|
||||||
wantIncludeThoughts bool
|
|
||||||
}{
|
|
||||||
// Budget mode (no Levels)
|
|
||||||
{"budget 8k", "gemini-cli-budget", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}, "thinkingBudget", 8192, true},
|
|
||||||
{"budget zero", "gemini-cli-budget", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 0}, "thinkingBudget", 0, false},
|
|
||||||
{"none mode", "gemini-cli-budget", thinking.ThinkingConfig{Mode: thinking.ModeNone, Budget: 0}, "thinkingBudget", 0, false},
|
|
||||||
{"auto mode", "gemini-cli-budget", thinking.ThinkingConfig{Mode: thinking.ModeAuto, Budget: -1}, "thinkingBudget", -1, true},
|
|
||||||
|
|
||||||
// Level mode (has Levels)
|
|
||||||
{"level high", "gemini-cli-level", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, "thinkingLevel", "high", true},
|
|
||||||
{"level low", "gemini-cli-level", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelLow}, "thinkingLevel", "low", true},
|
|
||||||
{"level minimal", "gemini-cli-level", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMinimal}, "thinkingLevel", "minimal", true},
|
|
||||||
// ModeAuto with Levels model still uses thinkingBudget=-1
|
|
||||||
{"auto with levels", "gemini-cli-level", thinking.ThinkingConfig{Mode: thinking.ModeAuto, Budget: -1}, "thinkingBudget", -1, true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := buildGeminiCLIModelInfo(tt.model)
|
|
||||||
result, err := applier.Apply([]byte(`{}`), tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotField := gjson.GetBytes(result, "request.generationConfig.thinkingConfig."+tt.wantField)
|
|
||||||
switch want := tt.wantValue.(type) {
|
|
||||||
case int:
|
|
||||||
if int(gotField.Int()) != want {
|
|
||||||
t.Fatalf("%s = %d, want %d", tt.wantField, gotField.Int(), want)
|
|
||||||
}
|
|
||||||
case string:
|
|
||||||
if gotField.String() != want {
|
|
||||||
t.Fatalf("%s = %q, want %q", tt.wantField, gotField.String(), want)
|
|
||||||
}
|
|
||||||
case bool:
|
|
||||||
if gotField.Bool() != want {
|
|
||||||
t.Fatalf("%s = %v, want %v", tt.wantField, gotField.Bool(), want)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
t.Fatalf("unsupported wantValue type %T", tt.wantValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotIncludeThoughts := gjson.GetBytes(result, "request.generationConfig.thinkingConfig.includeThoughts").Bool()
|
|
||||||
if gotIncludeThoughts != tt.wantIncludeThoughts {
|
|
||||||
t.Fatalf("includeThoughts = %v, want %v", gotIncludeThoughts, tt.wantIncludeThoughts)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiCLIApplyModeNoneWithLevel tests ModeNone with Level model.
|
|
||||||
// When ModeNone is used with a model that has Levels, includeThoughts should be false.
|
|
||||||
func TestGeminiCLIApplyModeNoneWithLevel(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildGeminiCLIModelInfo("gemini-cli-level")
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeNone, Level: thinking.LevelLow}
|
|
||||||
|
|
||||||
result, err := applier.Apply([]byte(`{}`), config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotIncludeThoughts := gjson.GetBytes(result, "request.generationConfig.thinkingConfig.includeThoughts").Bool()
|
|
||||||
if gotIncludeThoughts != false {
|
|
||||||
t.Fatalf("includeThoughts = %v, want %v", gotIncludeThoughts, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotLevel := gjson.GetBytes(result, "request.generationConfig.thinkingConfig.thinkingLevel").String()
|
|
||||||
if gotLevel != "low" {
|
|
||||||
t.Fatalf("thinkingLevel = %q, want %q", gotLevel, "low")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiCLIApplyInvalidBody tests Apply behavior with invalid body inputs.
|
|
||||||
func TestGeminiCLIApplyInvalidBody(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildGeminiCLIModelInfo("gemini-cli-budget")
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body []byte
|
|
||||||
}{
|
|
||||||
{"nil body", nil},
|
|
||||||
{"empty body", []byte{}},
|
|
||||||
{"invalid json", []byte("{\"not json\"")},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := applier.Apply(tt.body, config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotBudget := int(gjson.GetBytes(result, "request.generationConfig.thinkingConfig.thinkingBudget").Int())
|
|
||||||
if gotBudget != 8192 {
|
|
||||||
t.Fatalf("thinkingBudget = %d, want %d", gotBudget, 8192)
|
|
||||||
}
|
|
||||||
|
|
||||||
gotIncludeThoughts := gjson.GetBytes(result, "request.generationConfig.thinkingConfig.includeThoughts").Bool()
|
|
||||||
if !gotIncludeThoughts {
|
|
||||||
t.Fatalf("includeThoughts = %v, want %v", gotIncludeThoughts, true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiCLIApplyConflictingFields tests that conflicting fields are removed.
|
|
||||||
//
|
|
||||||
// When applying Budget format, any existing thinkingLevel should be removed.
|
|
||||||
// When applying Level format, any existing thinkingBudget should be removed.
|
|
||||||
func TestGeminiCLIApplyConflictingFields(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
existingBody string
|
|
||||||
wantField string // expected field to exist
|
|
||||||
wantNoField string // expected field to NOT exist
|
|
||||||
}{
|
|
||||||
// Budget format should remove existing thinkingLevel
|
|
||||||
{
|
|
||||||
"budget removes level",
|
|
||||||
"gemini-cli-budget",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192},
|
|
||||||
`{"request":{"generationConfig":{"thinkingConfig":{"thinkingLevel":"high"}}}}`,
|
|
||||||
"thinkingBudget",
|
|
||||||
"thinkingLevel",
|
|
||||||
},
|
|
||||||
// Level format should remove existing thinkingBudget
|
|
||||||
{
|
|
||||||
"level removes budget",
|
|
||||||
"gemini-cli-level",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh},
|
|
||||||
`{"request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}}`,
|
|
||||||
"thinkingLevel",
|
|
||||||
"thinkingBudget",
|
|
||||||
},
|
|
||||||
// ModeAuto uses budget format, should remove thinkingLevel
|
|
||||||
{
|
|
||||||
"auto removes level",
|
|
||||||
"gemini-cli-level",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeAuto, Budget: -1},
|
|
||||||
`{"request":{"generationConfig":{"thinkingConfig":{"thinkingLevel":"high"}}}}`,
|
|
||||||
"thinkingBudget",
|
|
||||||
"thinkingLevel",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := buildGeminiCLIModelInfo(tt.model)
|
|
||||||
result, err := applier.Apply([]byte(tt.existingBody), tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify expected field exists
|
|
||||||
wantPath := "request.generationConfig.thinkingConfig." + tt.wantField
|
|
||||||
if !gjson.GetBytes(result, wantPath).Exists() {
|
|
||||||
t.Fatalf("%s should exist in result: %s", tt.wantField, string(result))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify conflicting field was removed
|
|
||||||
noPath := "request.generationConfig.thinkingConfig." + tt.wantNoField
|
|
||||||
if gjson.GetBytes(result, noPath).Exists() {
|
|
||||||
t.Fatalf("%s should NOT exist in result: %s", tt.wantNoField, string(result))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiCLIApplyThinkingNotSupported tests passthrough handling when modelInfo.Thinking is nil.
|
|
||||||
func TestGeminiCLIApplyThinkingNotSupported(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
body := []byte(`{"request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}}`)
|
|
||||||
|
|
||||||
// Model with nil Thinking support
|
|
||||||
modelInfo := ®istry.ModelInfo{ID: "gemini-cli-unknown", Thinking: nil}
|
|
||||||
|
|
||||||
got, err := applier.Apply(body, config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() expected nil error for nil Thinking, got %v", err)
|
|
||||||
}
|
|
||||||
if string(got) != string(body) {
|
|
||||||
t.Fatalf("expected body unchanged, got %s", string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiCLIApplyNilModelInfo tests Apply behavior when modelInfo is nil.
|
|
||||||
func TestGeminiCLIApplyNilModelInfo(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
body := []byte(`{"existing": "data"}`)
|
|
||||||
|
|
||||||
result, err := applier.Apply(body, config, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() with nil modelInfo should not error, got: %v", err)
|
|
||||||
}
|
|
||||||
// nil modelInfo now applies compatible config
|
|
||||||
if !gjson.GetBytes(result, "request.generationConfig.thinkingConfig.thinkingBudget").Exists() {
|
|
||||||
t.Fatalf("Apply() with nil modelInfo should apply thinking config, got: %s", result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiCLIApplyEmptyModelID tests Apply when modelID is empty.
|
|
||||||
func TestGeminiCLIApplyEmptyModelID(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
modelInfo := ®istry.ModelInfo{ID: "", Thinking: nil}
|
|
||||||
body := []byte(`{"request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192}}}}`)
|
|
||||||
|
|
||||||
got, err := applier.Apply(body, config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() expected nil error, got %v", err)
|
|
||||||
}
|
|
||||||
if string(got) != string(body) {
|
|
||||||
t.Fatalf("expected body unchanged, got %s", string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiCLIApplyModeBudgetWithLevels tests that ModeBudget with Levels model passes through.
|
|
||||||
// Apply layer doesn't convert - upper layer should handle Budget→Level conversion.
|
|
||||||
func TestGeminiCLIApplyModeBudgetWithLevels(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildGeminiCLIModelInfo("gemini-cli-level")
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}
|
|
||||||
body := []byte(`{"existing": "data"}`)
|
|
||||||
|
|
||||||
result, err := applier.Apply(body, config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
// ModeBudget applies budget format directly without conversion to levels
|
|
||||||
if !gjson.GetBytes(result, "request.generationConfig.thinkingConfig.thinkingBudget").Exists() {
|
|
||||||
t.Fatalf("Apply() ModeBudget should apply budget format, got: %s", result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGeminiCLIApplyUnsupportedMode tests behavior with unsupported Mode types.
|
|
||||||
func TestGeminiCLIApplyUnsupportedMode(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
body := []byte(`{"existing": "data"}`)
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
}{
|
|
||||||
{"unknown mode with budget model", "gemini-cli-budget", thinking.ThinkingConfig{Mode: thinking.ThinkingMode(99), Budget: 8192}},
|
|
||||||
{"unknown mode with level model", "gemini-cli-level", thinking.ThinkingConfig{Mode: thinking.ThinkingMode(99), Level: thinking.LevelHigh}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := buildGeminiCLIModelInfo(tt.model)
|
|
||||||
result, err := applier.Apply(body, tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
// Unsupported modes return original body unchanged
|
|
||||||
if string(result) != string(body) {
|
|
||||||
t.Fatalf("Apply() with unsupported mode should return original body, got: %s", result)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestAntigravityUsesGeminiCLIFormat tests that antigravity provider uses gemini-cli format.
|
|
||||||
// Antigravity is registered with the same applier as gemini-cli.
|
|
||||||
func TestAntigravityUsesGeminiCLIFormat(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
modelInfo *registry.ModelInfo
|
|
||||||
wantField string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"claude model budget",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 16384},
|
|
||||||
®istry.ModelInfo{ID: "gemini-claude-sonnet-4-5-thinking", Thinking: ®istry.ThinkingSupport{Min: 1024, Max: 200000}},
|
|
||||||
"request.generationConfig.thinkingConfig.thinkingBudget",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"opus model budget",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 32768},
|
|
||||||
®istry.ModelInfo{ID: "gemini-claude-opus-4-5-thinking", Thinking: ®istry.ThinkingSupport{Min: 1024, Max: 200000}},
|
|
||||||
"request.generationConfig.thinkingConfig.thinkingBudget",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"model with levels",
|
|
||||||
thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh},
|
|
||||||
®istry.ModelInfo{ID: "some-model-with-levels", Thinking: ®istry.ThinkingSupport{Min: 1024, Max: 200000, Levels: []string{"low", "high"}}},
|
|
||||||
"request.generationConfig.thinkingConfig.thinkingLevel",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := applier.Apply([]byte(`{}`), tt.config, tt.modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !gjson.GetBytes(got, tt.wantField).Exists() {
|
|
||||||
t.Fatalf("expected field %s in output: %s", tt.wantField, string(got))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildGeminiCLIModelInfo(modelID string) *registry.ModelInfo {
|
|
||||||
support := ®istry.ThinkingSupport{}
|
|
||||||
switch modelID {
|
|
||||||
case "gemini-cli-budget":
|
|
||||||
support.Min = 0
|
|
||||||
support.Max = 32768
|
|
||||||
support.ZeroAllowed = true
|
|
||||||
support.DynamicAllowed = true
|
|
||||||
case "gemini-cli-level":
|
|
||||||
support.Min = 128
|
|
||||||
support.Max = 32768
|
|
||||||
support.ZeroAllowed = false
|
|
||||||
support.DynamicAllowed = true
|
|
||||||
support.Levels = []string{"minimal", "low", "medium", "high"}
|
|
||||||
default:
|
|
||||||
// Unknown model - return nil Thinking to trigger error path
|
|
||||||
return ®istry.ModelInfo{ID: modelID, Thinking: nil}
|
|
||||||
}
|
|
||||||
return ®istry.ModelInfo{
|
|
||||||
ID: modelID,
|
|
||||||
Thinking: support,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,318 +0,0 @@
|
|||||||
// Package iflow implements thinking configuration for iFlow models (GLM, MiniMax).
|
|
||||||
package iflow
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewApplier(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
}{
|
|
||||||
{"default"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
if applier == nil {
|
|
||||||
t.Fatalf("expected non-nil applier")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplierImplementsInterface(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
applier thinking.ProviderApplier
|
|
||||||
}{
|
|
||||||
{"default", NewApplier()},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if tt.applier == nil {
|
|
||||||
t.Fatalf("expected thinking.ProviderApplier implementation")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyNilModelInfo(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body []byte
|
|
||||||
}{
|
|
||||||
{"nil body", nil},
|
|
||||||
{"empty body", []byte{}},
|
|
||||||
{"json body", []byte(`{"model":"glm-4.6"}`)},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := applier.Apply(tt.body, thinking.ThinkingConfig{}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected nil error, got %v", err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(got, tt.body) {
|
|
||||||
t.Fatalf("expected body unchanged, got %s", string(got))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyMissingThinkingSupport(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
modelID string
|
|
||||||
}{
|
|
||||||
{"model id", "glm-4.6"},
|
|
||||||
{"empty model id", ""},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := ®istry.ModelInfo{ID: tt.modelID}
|
|
||||||
body := []byte(`{"model":"` + tt.modelID + `"}`)
|
|
||||||
got, err := applier.Apply(body, thinking.ThinkingConfig{}, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected nil error, got %v", err)
|
|
||||||
}
|
|
||||||
if string(got) != string(body) {
|
|
||||||
t.Fatalf("expected body unchanged, got %s", string(got))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigToBoolean(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
want bool
|
|
||||||
}{
|
|
||||||
{"mode none", thinking.ThinkingConfig{Mode: thinking.ModeNone}, false},
|
|
||||||
{"mode auto", thinking.ThinkingConfig{Mode: thinking.ModeAuto}, true},
|
|
||||||
{"budget zero", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 0}, false},
|
|
||||||
{"budget positive", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 1000}, true},
|
|
||||||
{"level none", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelNone}, false},
|
|
||||||
{"level minimal", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMinimal}, true},
|
|
||||||
{"level low", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelLow}, true},
|
|
||||||
{"level medium", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMedium}, true},
|
|
||||||
{"level high", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, true},
|
|
||||||
{"level xhigh", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelXHigh}, true},
|
|
||||||
{"zero value config", thinking.ThinkingConfig{}, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := configToBoolean(tt.config); got != tt.want {
|
|
||||||
t.Fatalf("configToBoolean(%+v) = %v, want %v", tt.config, got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyGLM(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
modelID string
|
|
||||||
body []byte
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantEnable bool
|
|
||||||
wantPreserve string
|
|
||||||
}{
|
|
||||||
{"mode none", "glm-4.6", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeNone}, false, ""},
|
|
||||||
{"level none", "glm-4.7", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelNone}, false, ""},
|
|
||||||
{"mode auto", "glm-4.6", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeAuto}, true, ""},
|
|
||||||
{"level minimal", "glm-4.6", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMinimal}, true, ""},
|
|
||||||
{"level low", "glm-4.7", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelLow}, true, ""},
|
|
||||||
{"level medium", "glm-4.6", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMedium}, true, ""},
|
|
||||||
{"level high", "GLM-4.6", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, true, ""},
|
|
||||||
{"level xhigh", "glm-z1-preview", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelXHigh}, true, ""},
|
|
||||||
{"budget zero", "glm-4.6", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 0}, false, ""},
|
|
||||||
{"budget 1000", "glm-4.6", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 1000}, true, ""},
|
|
||||||
{"preserve fields", "glm-4.6", []byte(`{"model":"glm-4.6","extra":{"keep":true}}`), thinking.ThinkingConfig{Mode: thinking.ModeAuto}, true, "glm-4.6"},
|
|
||||||
{"empty body", "glm-4.6", nil, thinking.ThinkingConfig{Mode: thinking.ModeAuto}, true, ""},
|
|
||||||
{"malformed json", "glm-4.6", []byte(`{invalid`), thinking.ThinkingConfig{Mode: thinking.ModeAuto}, true, ""},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := ®istry.ModelInfo{
|
|
||||||
ID: tt.modelID,
|
|
||||||
Thinking: ®istry.ThinkingSupport{},
|
|
||||||
}
|
|
||||||
got, err := applier.Apply(tt.body, tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if !gjson.ValidBytes(got) {
|
|
||||||
t.Fatalf("expected valid JSON, got %s", string(got))
|
|
||||||
}
|
|
||||||
|
|
||||||
enableResult := gjson.GetBytes(got, "chat_template_kwargs.enable_thinking")
|
|
||||||
if !enableResult.Exists() {
|
|
||||||
t.Fatalf("enable_thinking missing")
|
|
||||||
}
|
|
||||||
gotEnable := enableResult.Bool()
|
|
||||||
if gotEnable != tt.wantEnable {
|
|
||||||
t.Fatalf("enable_thinking = %v, want %v", gotEnable, tt.wantEnable)
|
|
||||||
}
|
|
||||||
|
|
||||||
// clear_thinking only set when enable_thinking=true
|
|
||||||
clearResult := gjson.GetBytes(got, "chat_template_kwargs.clear_thinking")
|
|
||||||
if tt.wantEnable {
|
|
||||||
if !clearResult.Exists() {
|
|
||||||
t.Fatalf("clear_thinking missing when enable_thinking=true")
|
|
||||||
}
|
|
||||||
if clearResult.Bool() {
|
|
||||||
t.Fatalf("clear_thinking = %v, want false", clearResult.Bool())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if clearResult.Exists() {
|
|
||||||
t.Fatalf("clear_thinking should not exist when enable_thinking=false")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tt.wantPreserve != "" {
|
|
||||||
gotModel := gjson.GetBytes(got, "model").String()
|
|
||||||
if gotModel != tt.wantPreserve {
|
|
||||||
t.Fatalf("model = %q, want %q", gotModel, tt.wantPreserve)
|
|
||||||
}
|
|
||||||
if !gjson.GetBytes(got, "extra.keep").Bool() {
|
|
||||||
t.Fatalf("expected extra.keep preserved")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyMiniMax(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
modelID string
|
|
||||||
body []byte
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantSplit bool
|
|
||||||
wantModel string
|
|
||||||
wantKeep bool
|
|
||||||
}{
|
|
||||||
{"mode none", "minimax-m2", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeNone}, false, "", false},
|
|
||||||
{"level none", "minimax-m2.1", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelNone}, false, "", false},
|
|
||||||
{"mode auto", "minimax-m2", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeAuto}, true, "", false},
|
|
||||||
{"level high", "MINIMAX-M2", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, true, "", false},
|
|
||||||
{"level low", "minimax-m2.1", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelLow}, true, "", false},
|
|
||||||
{"level minimal", "minimax-m2", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMinimal}, true, "", false},
|
|
||||||
{"level medium", "minimax-m2", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMedium}, true, "", false},
|
|
||||||
{"level xhigh", "minimax-m2", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelXHigh}, true, "", false},
|
|
||||||
{"budget zero", "minimax-m2", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 0}, false, "", false},
|
|
||||||
{"budget 1000", "minimax-m2.1", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 1000}, true, "", false},
|
|
||||||
{"unknown level", "minimax-m2", []byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: "unknown"}, true, "", false},
|
|
||||||
{"preserve fields", "minimax-m2", []byte(`{"model":"minimax-m2","extra":{"keep":true}}`), thinking.ThinkingConfig{Mode: thinking.ModeAuto}, true, "minimax-m2", true},
|
|
||||||
{"empty body", "minimax-m2", nil, thinking.ThinkingConfig{Mode: thinking.ModeAuto}, true, "", false},
|
|
||||||
{"malformed json", "minimax-m2", []byte(`{invalid`), thinking.ThinkingConfig{Mode: thinking.ModeAuto}, true, "", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := ®istry.ModelInfo{
|
|
||||||
ID: tt.modelID,
|
|
||||||
Thinking: ®istry.ThinkingSupport{},
|
|
||||||
}
|
|
||||||
got, err := applier.Apply(tt.body, tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if !gjson.ValidBytes(got) {
|
|
||||||
t.Fatalf("expected valid JSON, got %s", string(got))
|
|
||||||
}
|
|
||||||
|
|
||||||
splitResult := gjson.GetBytes(got, "reasoning_split")
|
|
||||||
if !splitResult.Exists() {
|
|
||||||
t.Fatalf("reasoning_split missing")
|
|
||||||
}
|
|
||||||
// Verify JSON type is boolean, not string
|
|
||||||
if splitResult.Type != gjson.True && splitResult.Type != gjson.False {
|
|
||||||
t.Fatalf("reasoning_split should be boolean, got type %v", splitResult.Type)
|
|
||||||
}
|
|
||||||
gotSplit := splitResult.Bool()
|
|
||||||
if gotSplit != tt.wantSplit {
|
|
||||||
t.Fatalf("reasoning_split = %v, want %v", gotSplit, tt.wantSplit)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tt.wantModel != "" {
|
|
||||||
gotModel := gjson.GetBytes(got, "model").String()
|
|
||||||
if gotModel != tt.wantModel {
|
|
||||||
t.Fatalf("model = %q, want %q", gotModel, tt.wantModel)
|
|
||||||
}
|
|
||||||
if tt.wantKeep && !gjson.GetBytes(got, "extra.keep").Bool() {
|
|
||||||
t.Fatalf("expected extra.keep preserved")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIsGLMModel tests the GLM model detection.
|
|
||||||
//
|
|
||||||
// Depends on: Epic 9 Story 9-1
|
|
||||||
func TestIsGLMModel(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
wantGLM bool
|
|
||||||
}{
|
|
||||||
{"glm-4.6", "glm-4.6", true},
|
|
||||||
{"glm-z1-preview", "glm-z1-preview", true},
|
|
||||||
{"glm uppercase", "GLM-4.7", true},
|
|
||||||
{"minimax-01", "minimax-01", false},
|
|
||||||
{"gpt-5.2", "gpt-5.2", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := isGLMModel(tt.model); got != tt.wantGLM {
|
|
||||||
t.Fatalf("isGLMModel(%q) = %v, want %v", tt.model, got, tt.wantGLM)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIsMiniMaxModel tests the MiniMax model detection.
|
|
||||||
//
|
|
||||||
// Depends on: Epic 9 Story 9-1
|
|
||||||
func TestIsMiniMaxModel(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
wantMiniMax bool
|
|
||||||
}{
|
|
||||||
{"minimax-01", "minimax-01", true},
|
|
||||||
{"minimax uppercase", "MINIMAX-M2", true},
|
|
||||||
{"glm-4.6", "glm-4.6", false},
|
|
||||||
{"gpt-5.2", "gpt-5.2", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := isMiniMaxModel(tt.model); got != tt.wantMiniMax {
|
|
||||||
t.Fatalf("isMiniMaxModel(%q) = %v, want %v", tt.model, got, tt.wantMiniMax)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,336 +0,0 @@
|
|||||||
// Package openai implements thinking configuration for OpenAI/Codex models.
|
|
||||||
package openai
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
func buildOpenAIModelInfo(modelID string) *registry.ModelInfo {
|
|
||||||
info := registry.LookupStaticModelInfo(modelID)
|
|
||||||
if info != nil {
|
|
||||||
return info
|
|
||||||
}
|
|
||||||
// Fallback with complete ThinkingSupport matching real OpenAI model capabilities
|
|
||||||
return ®istry.ModelInfo{
|
|
||||||
ID: modelID,
|
|
||||||
Thinking: ®istry.ThinkingSupport{
|
|
||||||
Min: 1024,
|
|
||||||
Max: 32768,
|
|
||||||
ZeroAllowed: true,
|
|
||||||
Levels: []string{"none", "low", "medium", "high", "xhigh"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewApplier(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
if applier == nil {
|
|
||||||
t.Fatalf("expected non-nil applier")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplierImplementsInterface(t *testing.T) {
|
|
||||||
_, ok := interface{}(NewApplier()).(thinking.ProviderApplier)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected Applier to implement thinking.ProviderApplier")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyNilModelInfo(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
body := []byte(`{"model":"gpt-5.2"}`)
|
|
||||||
config := thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}
|
|
||||||
got, err := applier.Apply(body, config, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected nil error, got %v", err)
|
|
||||||
}
|
|
||||||
// nil modelInfo now applies compatible config
|
|
||||||
if !gjson.GetBytes(got, "reasoning_effort").Exists() {
|
|
||||||
t.Fatalf("expected reasoning_effort applied, got %s", string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyMissingThinkingSupport(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := ®istry.ModelInfo{ID: "gpt-5.2"}
|
|
||||||
body := []byte(`{"model":"gpt-5.2"}`)
|
|
||||||
got, err := applier.Apply(body, thinking.ThinkingConfig{}, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected nil error, got %v", err)
|
|
||||||
}
|
|
||||||
if string(got) != string(body) {
|
|
||||||
t.Fatalf("expected body unchanged, got %s", string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestApplyLevel tests Apply with ModeLevel (unit test, no ValidateConfig).
|
|
||||||
func TestApplyLevel(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildOpenAIModelInfo("gpt-5.2")
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
level thinking.ThinkingLevel
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{"high", thinking.LevelHigh, "high"},
|
|
||||||
{"medium", thinking.LevelMedium, "medium"},
|
|
||||||
{"low", thinking.LevelLow, "low"},
|
|
||||||
{"xhigh", thinking.LevelXHigh, "xhigh"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := applier.Apply([]byte(`{}`), thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: tt.level}, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if got := gjson.GetBytes(result, "reasoning_effort").String(); got != tt.want {
|
|
||||||
t.Fatalf("reasoning_effort = %q, want %q", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestApplyModeNone tests Apply with ModeNone (unit test).
|
|
||||||
func TestApplyModeNone(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
modelInfo *registry.ModelInfo
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{"zero allowed", thinking.ThinkingConfig{Mode: thinking.ModeNone, Budget: 0}, ®istry.ModelInfo{ID: "gpt-5.2", Thinking: ®istry.ThinkingSupport{ZeroAllowed: true, Levels: []string{"none", "low"}}}, "none"},
|
|
||||||
{"clamped to level", thinking.ThinkingConfig{Mode: thinking.ModeNone, Budget: 128, Level: thinking.LevelLow}, ®istry.ModelInfo{ID: "gpt-5", Thinking: ®istry.ThinkingSupport{Levels: []string{"minimal", "low"}}}, "low"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := applier.Apply([]byte(`{}`), tt.config, tt.modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if got := gjson.GetBytes(result, "reasoning_effort").String(); got != tt.want {
|
|
||||||
t.Fatalf("reasoning_effort = %q, want %q", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestApplyPassthrough tests that unsupported modes pass through unchanged.
|
|
||||||
func TestApplyPassthrough(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildOpenAIModelInfo("gpt-5.2")
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
}{
|
|
||||||
{"mode auto", thinking.ThinkingConfig{Mode: thinking.ModeAuto}},
|
|
||||||
{"mode budget", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
body := []byte(`{"model":"gpt-5.2"}`)
|
|
||||||
result, err := applier.Apply(body, tt.config, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if string(result) != string(body) {
|
|
||||||
t.Fatalf("Apply() result = %s, want %s", string(result), string(body))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestApplyInvalidBody tests Apply with invalid body input.
|
|
||||||
func TestApplyInvalidBody(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildOpenAIModelInfo("gpt-5.2")
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body []byte
|
|
||||||
}{
|
|
||||||
{"nil body", nil},
|
|
||||||
{"empty body", []byte{}},
|
|
||||||
{"invalid json", []byte(`{"not json"`)},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := applier.Apply(tt.body, thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if !gjson.ValidBytes(result) {
|
|
||||||
t.Fatalf("Apply() result is not valid JSON: %s", string(result))
|
|
||||||
}
|
|
||||||
if got := gjson.GetBytes(result, "reasoning_effort").String(); got != "high" {
|
|
||||||
t.Fatalf("reasoning_effort = %q, want %q", got, "high")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestApplyPreservesFields tests that existing body fields are preserved.
|
|
||||||
func TestApplyPreservesFields(t *testing.T) {
|
|
||||||
applier := NewApplier()
|
|
||||||
modelInfo := buildOpenAIModelInfo("gpt-5.2")
|
|
||||||
|
|
||||||
body := []byte(`{"model":"gpt-5.2","messages":[]}`)
|
|
||||||
result, err := applier.Apply(body, thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelLow}, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if got := gjson.GetBytes(result, "model").String(); got != "gpt-5.2" {
|
|
||||||
t.Fatalf("model = %q, want %q", got, "gpt-5.2")
|
|
||||||
}
|
|
||||||
if !gjson.GetBytes(result, "messages").Exists() {
|
|
||||||
t.Fatalf("messages missing from result: %s", string(result))
|
|
||||||
}
|
|
||||||
if got := gjson.GetBytes(result, "reasoning_effort").String(); got != "low" {
|
|
||||||
t.Fatalf("reasoning_effort = %q, want %q", got, "low")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestHasLevel tests the hasLevel helper function.
|
|
||||||
func TestHasLevel(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
levels []string
|
|
||||||
target string
|
|
||||||
want bool
|
|
||||||
}{
|
|
||||||
{"exact match", []string{"low", "medium", "high"}, "medium", true},
|
|
||||||
{"case insensitive", []string{"low", "medium", "high"}, "MEDIUM", true},
|
|
||||||
{"with spaces", []string{"low", " medium ", "high"}, "medium", true},
|
|
||||||
{"not found", []string{"low", "medium", "high"}, "xhigh", false},
|
|
||||||
{"empty levels", []string{}, "medium", false},
|
|
||||||
{"none level", []string{"none", "low", "medium"}, "none", true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
if got := hasLevel(tt.levels, tt.target); got != tt.want {
|
|
||||||
t.Fatalf("hasLevel(%v, %q) = %v, want %v", tt.levels, tt.target, got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- End-to-End Tests (ValidateConfig → Apply) ---
|
|
||||||
|
|
||||||
// TestE2EApply tests the full flow: ValidateConfig → Apply.
|
|
||||||
func TestE2EApply(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{"level high", "gpt-5.2", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, "high"},
|
|
||||||
{"level medium", "gpt-5.2", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMedium}, "medium"},
|
|
||||||
{"level low", "gpt-5.2", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelLow}, "low"},
|
|
||||||
{"level xhigh", "gpt-5.2", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelXHigh}, "xhigh"},
|
|
||||||
{"mode none", "gpt-5.2", thinking.ThinkingConfig{Mode: thinking.ModeNone, Budget: 0}, "none"},
|
|
||||||
{"budget to level", "gpt-5.2", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}, "medium"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := buildOpenAIModelInfo(tt.model)
|
|
||||||
normalized, err := thinking.ValidateConfig(tt.config, modelInfo.Thinking)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ValidateConfig() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
applier := NewApplier()
|
|
||||||
result, err := applier.Apply([]byte(`{}`), *normalized, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if got := gjson.GetBytes(result, "reasoning_effort").String(); got != tt.want {
|
|
||||||
t.Fatalf("reasoning_effort = %q, want %q", got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestE2EApplyOutputFormat tests the full flow with exact JSON output verification.
|
|
||||||
func TestE2EApplyOutputFormat(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantJSON string
|
|
||||||
}{
|
|
||||||
{"level high", "gpt-5.2", thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, `{"reasoning_effort":"high"}`},
|
|
||||||
{"level none", "gpt-5.2", thinking.ThinkingConfig{Mode: thinking.ModeNone, Budget: 0}, `{"reasoning_effort":"none"}`},
|
|
||||||
{"budget converted", "gpt-5.2", thinking.ThinkingConfig{Mode: thinking.ModeBudget, Budget: 8192}, `{"reasoning_effort":"medium"}`},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := buildOpenAIModelInfo(tt.model)
|
|
||||||
normalized, err := thinking.ValidateConfig(tt.config, modelInfo.Thinking)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ValidateConfig() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
applier := NewApplier()
|
|
||||||
result, err := applier.Apply([]byte(`{}`), *normalized, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if string(result) != tt.wantJSON {
|
|
||||||
t.Fatalf("Apply() result = %s, want %s", string(result), tt.wantJSON)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestE2EApplyWithExistingBody tests the full flow with existing body fields.
|
|
||||||
func TestE2EApplyWithExistingBody(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
config thinking.ThinkingConfig
|
|
||||||
wantEffort string
|
|
||||||
wantModel string
|
|
||||||
}{
|
|
||||||
{"empty body", `{}`, thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelHigh}, "high", ""},
|
|
||||||
{"preserve fields", `{"model":"gpt-5.2","messages":[]}`, thinking.ThinkingConfig{Mode: thinking.ModeLevel, Level: thinking.LevelMedium}, "medium", "gpt-5.2"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
modelInfo := buildOpenAIModelInfo("gpt-5.2")
|
|
||||||
normalized, err := thinking.ValidateConfig(tt.config, modelInfo.Thinking)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ValidateConfig() error = %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
applier := NewApplier()
|
|
||||||
result, err := applier.Apply([]byte(tt.body), *normalized, modelInfo)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Apply() error = %v", err)
|
|
||||||
}
|
|
||||||
if got := gjson.GetBytes(result, "reasoning_effort").String(); got != tt.wantEffort {
|
|
||||||
t.Fatalf("reasoning_effort = %q, want %q", got, tt.wantEffort)
|
|
||||||
}
|
|
||||||
if tt.wantModel != "" {
|
|
||||||
if got := gjson.GetBytes(result, "model").String(); got != tt.wantModel {
|
|
||||||
t.Fatalf("model = %q, want %q", got, tt.wantModel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
// Package thinking_test provides external tests for the thinking package.
|
|
||||||
//
|
|
||||||
// This file uses package thinking_test (external) to allow importing provider
|
|
||||||
// subpackages, which triggers their init() functions to register appliers.
|
|
||||||
// This avoids import cycles that would occur if thinking package imported providers directly.
|
|
||||||
package thinking_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
|
||||||
|
|
||||||
// Blank imports to trigger provider init() registration
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/claude"
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/gemini"
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/geminicli"
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/iflow"
|
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/thinking/provider/openai"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestProviderAppliersBasic(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
provider string
|
|
||||||
wantNil bool
|
|
||||||
}{
|
|
||||||
{"gemini provider", "gemini", false},
|
|
||||||
{"gemini-cli provider", "gemini-cli", false},
|
|
||||||
{"claude provider", "claude", false},
|
|
||||||
{"openai provider", "openai", false},
|
|
||||||
{"iflow provider", "iflow", false},
|
|
||||||
{"antigravity provider", "antigravity", false},
|
|
||||||
{"unknown provider", "unknown", true},
|
|
||||||
{"empty provider", "", true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := thinking.GetProviderApplier(tt.provider)
|
|
||||||
if tt.wantNil {
|
|
||||||
if got != nil {
|
|
||||||
t.Fatalf("GetProviderApplier(%q) = %T, want nil", tt.provider, got)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("GetProviderApplier(%q) = nil, want non-nil", tt.provider)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,66 +0,0 @@
|
|||||||
// Package thinking_test provides tests for thinking config stripping.
|
|
||||||
package thinking_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestStripThinkingConfig(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
provider string
|
|
||||||
stripped []string
|
|
||||||
preserved []string
|
|
||||||
}{
|
|
||||||
{"claude thinking", `{"thinking":{"budget_tokens":8192},"model":"claude-3"}`, "claude", []string{"thinking"}, []string{"model"}},
|
|
||||||
{"gemini thinkingConfig", `{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192},"temperature":0.7}}`, "gemini", []string{"generationConfig.thinkingConfig"}, []string{"generationConfig.temperature"}},
|
|
||||||
{"gemini-cli thinkingConfig", `{"request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":8192},"temperature":0.7}}}`, "gemini-cli", []string{"request.generationConfig.thinkingConfig"}, []string{"request.generationConfig.temperature"}},
|
|
||||||
{"antigravity thinkingConfig", `{"request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":4096},"maxTokens":1024}}}`, "antigravity", []string{"request.generationConfig.thinkingConfig"}, []string{"request.generationConfig.maxTokens"}},
|
|
||||||
{"openai reasoning_effort", `{"reasoning_effort":"high","model":"gpt-5"}`, "openai", []string{"reasoning_effort"}, []string{"model"}},
|
|
||||||
{"iflow glm", `{"chat_template_kwargs":{"enable_thinking":true,"clear_thinking":false,"other":"value"}}`, "iflow", []string{"chat_template_kwargs.enable_thinking", "chat_template_kwargs.clear_thinking"}, []string{"chat_template_kwargs.other"}},
|
|
||||||
{"iflow minimax", `{"reasoning_split":true,"model":"minimax"}`, "iflow", []string{"reasoning_split"}, []string{"model"}},
|
|
||||||
{"iflow both formats", `{"chat_template_kwargs":{"enable_thinking":true,"clear_thinking":false},"reasoning_split":true,"model":"mixed"}`, "iflow", []string{"chat_template_kwargs.enable_thinking", "chat_template_kwargs.clear_thinking", "reasoning_split"}, []string{"model"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := thinking.StripThinkingConfig([]byte(tt.body), tt.provider)
|
|
||||||
|
|
||||||
for _, path := range tt.stripped {
|
|
||||||
if gjson.GetBytes(got, path).Exists() {
|
|
||||||
t.Fatalf("expected %s to be stripped, got %s", path, string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, path := range tt.preserved {
|
|
||||||
if !gjson.GetBytes(got, path).Exists() {
|
|
||||||
t.Fatalf("expected %s to be preserved, got %s", path, string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStripThinkingConfigPassthrough(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
body string
|
|
||||||
provider string
|
|
||||||
}{
|
|
||||||
{"empty body", ``, "claude"},
|
|
||||||
{"invalid json", `{not valid`, "claude"},
|
|
||||||
{"unknown provider", `{"thinking":{"budget_tokens":8192}}`, "unknown"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := thinking.StripThinkingConfig([]byte(tt.body), tt.provider)
|
|
||||||
if string(got) != tt.body {
|
|
||||||
t.Fatalf("StripThinkingConfig() = %s, want passthrough %s", string(got), tt.body)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,313 +0,0 @@
|
|||||||
// Package thinking provides unified thinking configuration processing.
|
|
||||||
package thinking
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestParseSuffix tests the ParseSuffix function.
|
|
||||||
//
|
|
||||||
// ParseSuffix extracts thinking suffix from model name.
|
|
||||||
// Format: model-name(value) where value is the raw suffix content.
|
|
||||||
// This function only extracts; interpretation is done by other functions.
|
|
||||||
func TestParseSuffix(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
wantModel string
|
|
||||||
wantSuffix bool
|
|
||||||
wantRaw string
|
|
||||||
}{
|
|
||||||
{"no suffix", "claude-sonnet-4-5", "claude-sonnet-4-5", false, ""},
|
|
||||||
{"numeric suffix", "model(1000)", "model", true, "1000"},
|
|
||||||
{"level suffix", "gpt-5(high)", "gpt-5", true, "high"},
|
|
||||||
{"auto suffix", "gemini-2.5-pro(auto)", "gemini-2.5-pro", true, "auto"},
|
|
||||||
{"none suffix", "model(none)", "model", true, "none"},
|
|
||||||
{"complex model name", "gemini-2.5-flash-lite(8192)", "gemini-2.5-flash-lite", true, "8192"},
|
|
||||||
{"alias with suffix", "g25p(1000)", "g25p", true, "1000"},
|
|
||||||
{"empty suffix", "model()", "model", true, ""},
|
|
||||||
{"nested parens", "model(a(b))", "model(a", true, "b)"},
|
|
||||||
{"no model name", "(1000)", "", true, "1000"},
|
|
||||||
{"unmatched open", "model(", "model(", false, ""},
|
|
||||||
{"unmatched close", "model)", "model)", false, ""},
|
|
||||||
{"paren not at end", "model(1000)extra", "model(1000)extra", false, ""},
|
|
||||||
{"empty string", "", "", false, ""},
|
|
||||||
{"large budget", "claude-opus(128000)", "claude-opus", true, "128000"},
|
|
||||||
{"xhigh level", "gpt-5.2(xhigh)", "gpt-5.2", true, "xhigh"},
|
|
||||||
{"minimal level", "model(minimal)", "model", true, "minimal"},
|
|
||||||
{"medium level", "model(medium)", "model", true, "medium"},
|
|
||||||
{"low level", "model(low)", "model", true, "low"},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := ParseSuffix(tt.model)
|
|
||||||
if got.ModelName != tt.wantModel {
|
|
||||||
t.Errorf("ModelName = %q, want %q", got.ModelName, tt.wantModel)
|
|
||||||
}
|
|
||||||
if got.HasSuffix != tt.wantSuffix {
|
|
||||||
t.Errorf("HasSuffix = %v, want %v", got.HasSuffix, tt.wantSuffix)
|
|
||||||
}
|
|
||||||
if got.RawSuffix != tt.wantRaw {
|
|
||||||
t.Errorf("RawSuffix = %q, want %q", got.RawSuffix, tt.wantRaw)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestParseSuffixWithError tests invalid suffix error reporting.
|
|
||||||
func TestParseSuffixWithError(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
wantHasSuffix bool
|
|
||||||
}{
|
|
||||||
{"missing close paren", "model(abc", false},
|
|
||||||
{"unmatched close paren", "model)", false},
|
|
||||||
{"paren not at end", "model(1000)extra", false},
|
|
||||||
{"no suffix", "gpt-5", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := ParseSuffixWithError(tt.model)
|
|
||||||
if tt.name == "no suffix" {
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ParseSuffixWithError(%q) error = %v, want nil", tt.model, err)
|
|
||||||
}
|
|
||||||
if got.HasSuffix != tt.wantHasSuffix {
|
|
||||||
t.Errorf("HasSuffix = %v, want %v", got.HasSuffix, tt.wantHasSuffix)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("ParseSuffixWithError(%q) error = nil, want error", tt.model)
|
|
||||||
}
|
|
||||||
thinkingErr, ok := err.(*ThinkingError)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("ParseSuffixWithError(%q) error type = %T, want *ThinkingError", tt.model, err)
|
|
||||||
}
|
|
||||||
if thinkingErr.Code != ErrInvalidSuffix {
|
|
||||||
t.Errorf("error code = %v, want %v", thinkingErr.Code, ErrInvalidSuffix)
|
|
||||||
}
|
|
||||||
if !strings.Contains(thinkingErr.Message, tt.model) {
|
|
||||||
t.Errorf("message %q does not include input %q", thinkingErr.Message, tt.model)
|
|
||||||
}
|
|
||||||
if got.HasSuffix != tt.wantHasSuffix {
|
|
||||||
t.Errorf("HasSuffix = %v, want %v", got.HasSuffix, tt.wantHasSuffix)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestParseSuffixNumeric tests numeric suffix parsing.
|
|
||||||
//
|
|
||||||
// ParseNumericSuffix parses raw suffix content as integer budget.
|
|
||||||
// Only non-negative integers are valid. Negative numbers return ok=false.
|
|
||||||
func TestParseSuffixNumeric(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
rawSuffix string
|
|
||||||
wantBudget int
|
|
||||||
wantOK bool
|
|
||||||
}{
|
|
||||||
{"small budget", "512", 512, true},
|
|
||||||
{"standard budget", "8192", 8192, true},
|
|
||||||
{"large budget", "100000", 100000, true},
|
|
||||||
{"max int32", "2147483647", 2147483647, true},
|
|
||||||
{"max int64", "9223372036854775807", 9223372036854775807, true},
|
|
||||||
{"zero", "0", 0, true},
|
|
||||||
{"negative one", "-1", 0, false},
|
|
||||||
{"negative", "-100", 0, false},
|
|
||||||
{"int64 overflow", "9223372036854775808", 0, false},
|
|
||||||
{"large overflow", "99999999999999999999", 0, false},
|
|
||||||
{"not a number", "abc", 0, false},
|
|
||||||
{"level string", "high", 0, false},
|
|
||||||
{"float", "1.5", 0, false},
|
|
||||||
{"empty", "", 0, false},
|
|
||||||
{"leading zero", "08192", 8192, true},
|
|
||||||
{"whitespace", " 8192 ", 0, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
budget, ok := ParseNumericSuffix(tt.rawSuffix)
|
|
||||||
if budget != tt.wantBudget {
|
|
||||||
t.Errorf("budget = %d, want %d", budget, tt.wantBudget)
|
|
||||||
}
|
|
||||||
if ok != tt.wantOK {
|
|
||||||
t.Errorf("ok = %v, want %v", ok, tt.wantOK)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestParseSuffixLevel tests level suffix parsing.
|
|
||||||
//
|
|
||||||
// ParseLevelSuffix parses raw suffix content as discrete thinking level.
|
|
||||||
// Only effort levels (minimal, low, medium, high, xhigh) are valid.
|
|
||||||
// Special values (none, auto) return ok=false - use ParseSpecialSuffix instead.
|
|
||||||
func TestParseSuffixLevel(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
rawSuffix string
|
|
||||||
wantLevel ThinkingLevel
|
|
||||||
wantOK bool
|
|
||||||
}{
|
|
||||||
{"minimal", "minimal", LevelMinimal, true},
|
|
||||||
{"low", "low", LevelLow, true},
|
|
||||||
{"medium", "medium", LevelMedium, true},
|
|
||||||
{"high", "high", LevelHigh, true},
|
|
||||||
{"xhigh", "xhigh", LevelXHigh, true},
|
|
||||||
{"case HIGH", "HIGH", LevelHigh, true},
|
|
||||||
{"case High", "High", LevelHigh, true},
|
|
||||||
{"case hIgH", "hIgH", LevelHigh, true},
|
|
||||||
{"case MINIMAL", "MINIMAL", LevelMinimal, true},
|
|
||||||
{"case XHigh", "XHigh", LevelXHigh, true},
|
|
||||||
{"none special", "none", "", false},
|
|
||||||
{"auto special", "auto", "", false},
|
|
||||||
{"unknown ultra", "ultra", "", false},
|
|
||||||
{"unknown maximum", "maximum", "", false},
|
|
||||||
{"unknown invalid", "invalid", "", false},
|
|
||||||
{"numeric", "8192", "", false},
|
|
||||||
{"numeric zero", "0", "", false},
|
|
||||||
{"empty", "", "", false},
|
|
||||||
{"whitespace", " high ", "", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
level, ok := ParseLevelSuffix(tt.rawSuffix)
|
|
||||||
if level != tt.wantLevel {
|
|
||||||
t.Errorf("level = %q, want %q", level, tt.wantLevel)
|
|
||||||
}
|
|
||||||
if ok != tt.wantOK {
|
|
||||||
t.Errorf("ok = %v, want %v", ok, tt.wantOK)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestParseSuffixSpecialValues tests special value suffix parsing.
|
|
||||||
//
|
|
||||||
// Depends on: Epic 3 Story 3-4 (special value suffix parsing)
|
|
||||||
func TestParseSuffixSpecialValues(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
rawSuffix string
|
|
||||||
wantMode ThinkingMode
|
|
||||||
wantOK bool
|
|
||||||
}{
|
|
||||||
{"none", "none", ModeNone, true},
|
|
||||||
{"auto", "auto", ModeAuto, true},
|
|
||||||
{"negative one", "-1", ModeAuto, true},
|
|
||||||
{"case NONE", "NONE", ModeNone, true},
|
|
||||||
{"case Auto", "Auto", ModeAuto, true},
|
|
||||||
{"case aUtO", "aUtO", ModeAuto, true},
|
|
||||||
{"case NoNe", "NoNe", ModeNone, true},
|
|
||||||
{"empty", "", ModeBudget, false},
|
|
||||||
{"level high", "high", ModeBudget, false},
|
|
||||||
{"numeric", "8192", ModeBudget, false},
|
|
||||||
{"negative other", "-2", ModeBudget, false},
|
|
||||||
{"whitespace", " none ", ModeBudget, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
mode, ok := ParseSpecialSuffix(tt.rawSuffix)
|
|
||||||
if mode != tt.wantMode {
|
|
||||||
t.Errorf("mode = %q, want %q", mode, tt.wantMode)
|
|
||||||
}
|
|
||||||
if ok != tt.wantOK {
|
|
||||||
t.Errorf("ok = %v, want %v", ok, tt.wantOK)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestParseSuffixAliasFormats tests alias model suffix parsing.
|
|
||||||
//
|
|
||||||
// This test validates that short model aliases (e.g., g25p, cs45) work correctly
|
|
||||||
// with all suffix types. Alias-to-canonical-model mapping is caller's responsibility.
|
|
||||||
func TestParseSuffixAliasFormats(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string // test case description
|
|
||||||
model string // input model string with optional suffix
|
|
||||||
wantName string // expected ModelName after parsing
|
|
||||||
wantSuffix bool // expected HasSuffix value
|
|
||||||
wantRaw string // expected RawSuffix value
|
|
||||||
checkBudget bool // if true, verify ParseNumericSuffix result
|
|
||||||
wantBudget int // expected budget (only when checkBudget=true)
|
|
||||||
checkLevel bool // if true, verify ParseLevelSuffix result
|
|
||||||
wantLevel ThinkingLevel // expected level (only when checkLevel=true)
|
|
||||||
checkMode bool // if true, verify ParseSpecialSuffix result
|
|
||||||
wantMode ThinkingMode // expected mode (only when checkMode=true)
|
|
||||||
}{
|
|
||||||
// Alias + numeric suffix
|
|
||||||
{"alias numeric g25p", "g25p(1000)", "g25p", true, "1000", true, 1000, false, "", false, 0},
|
|
||||||
{"alias numeric cs45", "cs45(16384)", "cs45", true, "16384", true, 16384, false, "", false, 0},
|
|
||||||
{"alias numeric g3f", "g3f(8192)", "g3f", true, "8192", true, 8192, false, "", false, 0},
|
|
||||||
// Alias + level suffix
|
|
||||||
{"alias level gpt52", "gpt52(high)", "gpt52", true, "high", false, 0, true, LevelHigh, false, 0},
|
|
||||||
{"alias level g25f", "g25f(medium)", "g25f", true, "medium", false, 0, true, LevelMedium, false, 0},
|
|
||||||
{"alias level cs4", "cs4(low)", "cs4", true, "low", false, 0, true, LevelLow, false, 0},
|
|
||||||
// Alias + special suffix
|
|
||||||
{"alias auto g3f", "g3f(auto)", "g3f", true, "auto", false, 0, false, "", true, ModeAuto},
|
|
||||||
{"alias none claude", "claude(none)", "claude", true, "none", false, 0, false, "", true, ModeNone},
|
|
||||||
{"alias -1 g25p", "g25p(-1)", "g25p", true, "-1", false, 0, false, "", true, ModeAuto},
|
|
||||||
// Single char alias
|
|
||||||
{"single char c", "c(1024)", "c", true, "1024", true, 1024, false, "", false, 0},
|
|
||||||
{"single char g", "g(high)", "g", true, "high", false, 0, true, LevelHigh, false, 0},
|
|
||||||
// Alias containing numbers
|
|
||||||
{"alias with num gpt5", "gpt5(medium)", "gpt5", true, "medium", false, 0, true, LevelMedium, false, 0},
|
|
||||||
{"alias with num g25", "g25(1000)", "g25", true, "1000", true, 1000, false, "", false, 0},
|
|
||||||
// Edge cases
|
|
||||||
{"no suffix", "g25p", "g25p", false, "", false, 0, false, "", false, 0},
|
|
||||||
{"empty alias", "(1000)", "", true, "1000", true, 1000, false, "", false, 0},
|
|
||||||
{"hyphen alias", "g-25-p(1000)", "g-25-p", true, "1000", true, 1000, false, "", false, 0},
|
|
||||||
{"underscore alias", "g_25_p(high)", "g_25_p", true, "high", false, 0, true, LevelHigh, false, 0},
|
|
||||||
{"nested parens", "g25p(test)(1000)", "g25p(test)", true, "1000", true, 1000, false, "", false, 0},
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseSuffix only extracts alias and suffix; mapping to canonical model is caller responsibility.
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result := ParseSuffix(tt.model)
|
|
||||||
|
|
||||||
if result.ModelName != tt.wantName {
|
|
||||||
t.Errorf("ParseSuffix(%q).ModelName = %q, want %q", tt.model, result.ModelName, tt.wantName)
|
|
||||||
}
|
|
||||||
if result.HasSuffix != tt.wantSuffix {
|
|
||||||
t.Errorf("ParseSuffix(%q).HasSuffix = %v, want %v", tt.model, result.HasSuffix, tt.wantSuffix)
|
|
||||||
}
|
|
||||||
if result.RawSuffix != tt.wantRaw {
|
|
||||||
t.Errorf("ParseSuffix(%q).RawSuffix = %q, want %q", tt.model, result.RawSuffix, tt.wantRaw)
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.HasSuffix {
|
|
||||||
if tt.checkBudget {
|
|
||||||
budget, ok := ParseNumericSuffix(result.RawSuffix)
|
|
||||||
if !ok || budget != tt.wantBudget {
|
|
||||||
t.Errorf("ParseNumericSuffix(%q) = (%d, %v), want (%d, true)",
|
|
||||||
result.RawSuffix, budget, ok, tt.wantBudget)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tt.checkLevel {
|
|
||||||
level, ok := ParseLevelSuffix(result.RawSuffix)
|
|
||||||
if !ok || level != tt.wantLevel {
|
|
||||||
t.Errorf("ParseLevelSuffix(%q) = (%q, %v), want (%q, true)",
|
|
||||||
result.RawSuffix, level, ok, tt.wantLevel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tt.checkMode {
|
|
||||||
mode, ok := ParseSpecialSuffix(result.RawSuffix)
|
|
||||||
if !ok || mode != tt.wantMode {
|
|
||||||
t.Errorf("ParseSpecialSuffix(%q) = (%v, %v), want (%v, true)",
|
|
||||||
result.RawSuffix, mode, ok, tt.wantMode)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,349 +0,0 @@
|
|||||||
// Package thinking provides unified thinking configuration processing logic.
|
|
||||||
package thinking
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
logtest "github.com/sirupsen/logrus/hooks/test"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestClampBudget tests the ClampBudget function.
|
|
||||||
//
|
|
||||||
// ClampBudget applies range constraints to a budget value:
|
|
||||||
// - budget < Min → clamp to Min (with Debug log)
|
|
||||||
// - budget > Max → clamp to Max (with Debug log)
|
|
||||||
// - Auto value (-1) passes through unchanged
|
|
||||||
func TestClampBudget(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
value int
|
|
||||||
min int
|
|
||||||
max int
|
|
||||||
want int
|
|
||||||
}{
|
|
||||||
// Within range - no clamping
|
|
||||||
{"within range", 8192, 128, 32768, 8192},
|
|
||||||
{"at min", 128, 128, 32768, 128},
|
|
||||||
{"at max", 32768, 128, 32768, 32768},
|
|
||||||
|
|
||||||
// Below min - clamp to min
|
|
||||||
{"below min", 100, 128, 32768, 128},
|
|
||||||
|
|
||||||
// Above max - clamp to max
|
|
||||||
{"above max", 50000, 128, 32768, 32768},
|
|
||||||
|
|
||||||
// Edge cases
|
|
||||||
{"min equals max", 5000, 5000, 5000, 5000},
|
|
||||||
{"zero min zero value", 0, 0, 100, 0},
|
|
||||||
|
|
||||||
// Auto value (-1) - passes through
|
|
||||||
{"auto value", -1, 128, 32768, -1},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := ClampBudget(tt.value, tt.min, tt.max)
|
|
||||||
if got != tt.want {
|
|
||||||
t.Errorf("ClampBudget(%d, %d, %d) = %d, want %d",
|
|
||||||
tt.value, tt.min, tt.max, got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestZeroAllowedBoundaryHandling tests ZeroAllowed=false edge cases.
|
|
||||||
//
|
|
||||||
// When ZeroAllowed=false and user requests 0, clamp to Min + log Warn.
|
|
||||||
func TestZeroAllowedBoundaryHandling(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
value int
|
|
||||||
min int
|
|
||||||
max int
|
|
||||||
zeroAllowed bool
|
|
||||||
want int
|
|
||||||
}{
|
|
||||||
// ZeroAllowed=true: 0 stays 0
|
|
||||||
{"zero allowed - keep zero", 0, 128, 32768, true, 0},
|
|
||||||
|
|
||||||
// ZeroAllowed=false: 0 clamps to min
|
|
||||||
{"zero not allowed - clamp to min", 0, 128, 32768, false, 128},
|
|
||||||
|
|
||||||
// ZeroAllowed=false but non-zero value: normal clamping
|
|
||||||
{"zero not allowed - positive value", 8192, 1024, 100000, false, 8192},
|
|
||||||
|
|
||||||
// Auto value (-1) always passes through
|
|
||||||
{"auto value", -1, 128, 32768, false, -1},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := ClampBudgetWithZeroCheck(tt.value, tt.min, tt.max, tt.zeroAllowed)
|
|
||||||
if got != tt.want {
|
|
||||||
t.Errorf("ClampBudgetWithZeroCheck(%d, %d, %d, %v) = %d, want %d",
|
|
||||||
tt.value, tt.min, tt.max, tt.zeroAllowed, got, tt.want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestValidateConfigFramework verifies the ValidateConfig function framework.
|
|
||||||
// This test is merged into TestValidateConfig for consolidation.
|
|
||||||
|
|
||||||
// TestValidateConfigNotSupported verifies nil support handling.
|
|
||||||
// This test is merged into TestValidateConfig for consolidation.
|
|
||||||
|
|
||||||
// TestValidateConfigConversion verifies mode conversion based on capability.
|
|
||||||
// This test is merged into TestValidateConfig for consolidation.
|
|
||||||
|
|
||||||
// TestValidateConfigLevelSupport verifies level list validation.
|
|
||||||
// This test is merged into TestValidateConfig for consolidation.
|
|
||||||
|
|
||||||
// TestValidateConfigClamping verifies budget clamping behavior.
|
|
||||||
// This test is merged into TestValidateConfig for consolidation.
|
|
||||||
|
|
||||||
// TestValidateConfig is the comprehensive test for ValidateConfig function.
|
|
||||||
//
|
|
||||||
// ValidateConfig checks if a ThinkingConfig is valid for a given model.
|
|
||||||
// This test covers all validation scenarios including:
|
|
||||||
// - Framework basics (nil support with ModeNone)
|
|
||||||
// - Error cases (thinking not supported, level not supported, dynamic not allowed)
|
|
||||||
// - Mode conversion (budget-only, level-only, hybrid)
|
|
||||||
// - Budget clamping (to max, to min)
|
|
||||||
// - ZeroAllowed boundary handling (ModeNone with ZeroAllowed=false)
|
|
||||||
// - DynamicAllowed validation
|
|
||||||
//
|
|
||||||
// Depends on: Epic 5 Story 5-3 (config validity validation)
|
|
||||||
func TestValidateConfig(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
config ThinkingConfig
|
|
||||||
support *registry.ThinkingSupport
|
|
||||||
wantMode ThinkingMode
|
|
||||||
wantBudget int
|
|
||||||
wantLevel ThinkingLevel
|
|
||||||
wantErr bool
|
|
||||||
wantCode ErrorCode
|
|
||||||
}{
|
|
||||||
// Framework basics
|
|
||||||
{"nil support mode none", ThinkingConfig{Mode: ModeNone, Budget: 0}, nil, ModeNone, 0, "", false, ""},
|
|
||||||
|
|
||||||
// Valid configs - no conversion needed
|
|
||||||
{"budget-only keeps budget", ThinkingConfig{Mode: ModeBudget, Budget: 8192}, ®istry.ThinkingSupport{Min: 1024, Max: 100000}, ModeBudget, 8192, "", false, ""},
|
|
||||||
|
|
||||||
// Auto-conversion: Level → Budget
|
|
||||||
{"budget-only converts level", ThinkingConfig{Mode: ModeLevel, Level: LevelHigh}, ®istry.ThinkingSupport{Min: 1024, Max: 100000}, ModeBudget, 24576, "", false, ""},
|
|
||||||
|
|
||||||
// Auto-conversion: Budget → Level
|
|
||||||
{"level-only converts budget", ThinkingConfig{Mode: ModeBudget, Budget: 5000}, ®istry.ThinkingSupport{Levels: []string{"low", "medium", "high"}}, ModeLevel, 0, LevelMedium, false, ""},
|
|
||||||
|
|
||||||
// Hybrid preserves original format
|
|
||||||
{"hybrid preserves level", ThinkingConfig{Mode: ModeLevel, Level: LevelLow}, ®istry.ThinkingSupport{Min: 128, Max: 32768, Levels: []string{"low", "high"}}, ModeLevel, 0, LevelLow, false, ""},
|
|
||||||
|
|
||||||
// Budget clamping
|
|
||||||
{"budget clamped to max", ThinkingConfig{Mode: ModeBudget, Budget: 200000}, ®istry.ThinkingSupport{Min: 1024, Max: 100000}, ModeBudget, 100000, "", false, ""},
|
|
||||||
{"budget clamped to min", ThinkingConfig{Mode: ModeBudget, Budget: 100}, ®istry.ThinkingSupport{Min: 1024, Max: 100000}, ModeBudget, 1024, "", false, ""},
|
|
||||||
|
|
||||||
// Error: thinking not supported
|
|
||||||
{"thinking not supported", ThinkingConfig{Mode: ModeBudget, Budget: 8192}, nil, 0, 0, "", true, ErrThinkingNotSupported},
|
|
||||||
|
|
||||||
// Error: level not in list
|
|
||||||
{"level not supported", ThinkingConfig{Mode: ModeLevel, Level: LevelXHigh}, ®istry.ThinkingSupport{Levels: []string{"low", "medium", "high"}}, 0, 0, "", true, ErrLevelNotSupported},
|
|
||||||
|
|
||||||
// Level case-insensitive
|
|
||||||
{"level supported case-insensitive", ThinkingConfig{Mode: ModeLevel, Level: ThinkingLevel("HIGH")}, ®istry.ThinkingSupport{Levels: []string{"low", "medium", "high"}}, ModeLevel, 0, ThinkingLevel("HIGH"), false, ""},
|
|
||||||
|
|
||||||
// ModeAuto with DynamicAllowed
|
|
||||||
{"auto with dynamic allowed", ThinkingConfig{Mode: ModeAuto, Budget: -1}, ®istry.ThinkingSupport{Min: 128, Max: 32768, DynamicAllowed: true}, ModeAuto, -1, "", false, ""},
|
|
||||||
|
|
||||||
// ModeAuto with DynamicAllowed=false - converts to mid-range (M3)
|
|
||||||
{"auto with dynamic not allowed", ThinkingConfig{Mode: ModeAuto, Budget: -1}, ®istry.ThinkingSupport{Min: 128, Max: 32768, DynamicAllowed: false}, ModeBudget, 16448, "", false, ""},
|
|
||||||
|
|
||||||
// ModeNone with ZeroAllowed=true - stays as ModeNone
|
|
||||||
{"mode none with zero allowed", ThinkingConfig{Mode: ModeNone, Budget: 0}, ®istry.ThinkingSupport{Min: 1024, Max: 100000, ZeroAllowed: true}, ModeNone, 0, "", false, ""},
|
|
||||||
|
|
||||||
// Budget=0 converts to ModeNone before clamping (M1)
|
|
||||||
{"budget zero converts to none", ThinkingConfig{Mode: ModeBudget, Budget: 0}, ®istry.ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false}, ModeNone, 128, "", false, ""},
|
|
||||||
|
|
||||||
// Level=none converts to ModeNone before clamping, then Level set to lowest
|
|
||||||
{"level none converts to none", ThinkingConfig{Mode: ModeLevel, Level: LevelNone}, ®istry.ThinkingSupport{Min: 128, Max: 32768, Levels: []string{"low", "high"}, ZeroAllowed: false}, ModeNone, 128, ThinkingLevel("low"), false, ""},
|
|
||||||
{"level auto converts to auto", ThinkingConfig{Mode: ModeLevel, Level: LevelAuto}, ®istry.ThinkingSupport{Min: 128, Max: 32768, Levels: []string{"low", "high"}, DynamicAllowed: true}, ModeAuto, -1, "", false, ""},
|
|
||||||
// M1: Level=auto with DynamicAllowed=false - converts to mid-range budget
|
|
||||||
{"level auto with dynamic not allowed", ThinkingConfig{Mode: ModeLevel, Level: LevelAuto}, ®istry.ThinkingSupport{Min: 128, Max: 32768, Levels: []string{"low", "high"}, DynamicAllowed: false}, ModeBudget, 16448, "", false, ""},
|
|
||||||
// M2: Level=auto on Budget-only model (no Levels)
|
|
||||||
{"level auto on budget-only model", ThinkingConfig{Mode: ModeLevel, Level: LevelAuto}, ®istry.ThinkingSupport{Min: 128, Max: 32768, DynamicAllowed: true}, ModeAuto, -1, "", false, ""},
|
|
||||||
|
|
||||||
// ModeNone with ZeroAllowed=false - clamps to min but preserves ModeNone (M1)
|
|
||||||
{"mode none with zero not allowed - preserve mode", ThinkingConfig{Mode: ModeNone, Budget: 0}, ®istry.ThinkingSupport{Min: 1024, Max: 100000, ZeroAllowed: false}, ModeNone, 1024, "", false, ""},
|
|
||||||
|
|
||||||
// ModeNone with clamped Budget > 0 and Levels: sets Level to lowest
|
|
||||||
{"mode none clamped with levels", ThinkingConfig{Mode: ModeNone, Budget: 0}, ®istry.ThinkingSupport{Min: 128, Max: 32768, Levels: []string{"low", "high"}, ZeroAllowed: false}, ModeNone, 128, ThinkingLevel("low"), false, ""},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got, err := ValidateConfig(tt.config, tt.support)
|
|
||||||
if tt.wantErr {
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("ValidateConfig(%+v, support) error = nil, want %v", tt.config, tt.wantCode)
|
|
||||||
}
|
|
||||||
thinkingErr, ok := err.(*ThinkingError)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("ValidateConfig(%+v, support) error type = %T, want *ThinkingError", tt.config, err)
|
|
||||||
}
|
|
||||||
if thinkingErr.Code != tt.wantCode {
|
|
||||||
t.Errorf("ValidateConfig(%+v, support) code = %v, want %v", tt.config, thinkingErr.Code, tt.wantCode)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ValidateConfig(%+v, support) returned error: %v", tt.config, err)
|
|
||||||
}
|
|
||||||
if got == nil {
|
|
||||||
t.Fatalf("ValidateConfig(%+v, support) returned nil config", tt.config)
|
|
||||||
}
|
|
||||||
if got.Mode != tt.wantMode {
|
|
||||||
t.Errorf("ValidateConfig(%+v, support) Mode = %v, want %v", tt.config, got.Mode, tt.wantMode)
|
|
||||||
}
|
|
||||||
if got.Budget != tt.wantBudget {
|
|
||||||
t.Errorf("ValidateConfig(%+v, support) Budget = %d, want %d", tt.config, got.Budget, tt.wantBudget)
|
|
||||||
}
|
|
||||||
if got.Level != tt.wantLevel {
|
|
||||||
t.Errorf("ValidateConfig(%+v, support) Level = %q, want %q", tt.config, got.Level, tt.wantLevel)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestValidationErrorMessages tests error message formatting.
|
|
||||||
//
|
|
||||||
// Error messages should:
|
|
||||||
// - Be lowercase
|
|
||||||
// - Have no trailing period
|
|
||||||
// - Include context with %s/%d
|
|
||||||
//
|
|
||||||
// Depends on: Epic 5 Story 5-4 (validation error messages)
|
|
||||||
func TestValidationErrorMessages(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
getErr func() error
|
|
||||||
wantCode ErrorCode
|
|
||||||
wantContains string
|
|
||||||
}{
|
|
||||||
{"invalid suffix", func() error {
|
|
||||||
_, err := ParseSuffixWithError("model(abc")
|
|
||||||
return err
|
|
||||||
}, ErrInvalidSuffix, "model(abc"},
|
|
||||||
{"level not supported", func() error {
|
|
||||||
_, err := ValidateConfig(ThinkingConfig{Mode: ModeLevel, Level: LevelXHigh}, ®istry.ThinkingSupport{Levels: []string{"low", "medium", "high"}})
|
|
||||||
return err
|
|
||||||
}, ErrLevelNotSupported, "valid levels: low, medium, high"},
|
|
||||||
{"thinking not supported", func() error {
|
|
||||||
_, err := ValidateConfig(ThinkingConfig{Mode: ModeBudget, Budget: 1024}, nil)
|
|
||||||
return err
|
|
||||||
}, ErrThinkingNotSupported, "thinking not supported for this model"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
err := tt.getErr()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("error = nil, want ThinkingError")
|
|
||||||
}
|
|
||||||
thinkingErr, ok := err.(*ThinkingError)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("error type = %T, want *ThinkingError", err)
|
|
||||||
}
|
|
||||||
if thinkingErr.Code != tt.wantCode {
|
|
||||||
t.Errorf("code = %v, want %v", thinkingErr.Code, tt.wantCode)
|
|
||||||
}
|
|
||||||
if thinkingErr.Message == "" {
|
|
||||||
t.Fatalf("message is empty")
|
|
||||||
}
|
|
||||||
first, _ := utf8.DecodeRuneInString(thinkingErr.Message)
|
|
||||||
if unicode.IsLetter(first) && !unicode.IsLower(first) {
|
|
||||||
t.Errorf("message does not start with lowercase: %q", thinkingErr.Message)
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(thinkingErr.Message, ".") {
|
|
||||||
t.Errorf("message has trailing period: %q", thinkingErr.Message)
|
|
||||||
}
|
|
||||||
if !strings.Contains(thinkingErr.Message, tt.wantContains) {
|
|
||||||
t.Errorf("message %q does not contain %q", thinkingErr.Message, tt.wantContains)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestClampingLogging tests that clamping produces correct log entries.
|
|
||||||
//
|
|
||||||
// Clamping behavior:
|
|
||||||
// - Normal clamp (budget outside range) → Debug log
|
|
||||||
// - ZeroAllowed=false + zero request → Warn log
|
|
||||||
//
|
|
||||||
// Depends on: Epic 5 Story 5-1, 5-2
|
|
||||||
func TestClampingLogging(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
useZeroCheck bool
|
|
||||||
budget int
|
|
||||||
min int
|
|
||||||
max int
|
|
||||||
zeroAllowed bool
|
|
||||||
wantLevel log.Level
|
|
||||||
wantReason string
|
|
||||||
wantClamped int
|
|
||||||
}{
|
|
||||||
{"above max - debug", false, 50000, 128, 32768, false, log.DebugLevel, "", 32768},
|
|
||||||
{"below min - debug", false, 50, 128, 32768, false, log.DebugLevel, "", 128},
|
|
||||||
{"zero not allowed - warn", true, 0, 128, 32768, false, log.WarnLevel, "zero_not_allowed", 128},
|
|
||||||
}
|
|
||||||
|
|
||||||
logger := log.StandardLogger()
|
|
||||||
originalLevel := logger.GetLevel()
|
|
||||||
logger.SetLevel(log.DebugLevel)
|
|
||||||
hook := logtest.NewLocal(logger)
|
|
||||||
t.Cleanup(func() {
|
|
||||||
logger.SetLevel(originalLevel)
|
|
||||||
hook.Reset()
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
hook.Reset()
|
|
||||||
var got int
|
|
||||||
if tt.useZeroCheck {
|
|
||||||
got = ClampBudgetWithZeroCheck(tt.budget, tt.min, tt.max, tt.zeroAllowed)
|
|
||||||
} else {
|
|
||||||
got = ClampBudget(tt.budget, tt.min, tt.max)
|
|
||||||
}
|
|
||||||
if got != tt.wantClamped {
|
|
||||||
t.Fatalf("clamped budget = %d, want %d", got, tt.wantClamped)
|
|
||||||
}
|
|
||||||
|
|
||||||
entry := hook.LastEntry()
|
|
||||||
if entry == nil {
|
|
||||||
t.Fatalf("no log entry captured")
|
|
||||||
}
|
|
||||||
if entry.Level != tt.wantLevel {
|
|
||||||
t.Errorf("log level = %v, want %v", entry.Level, tt.wantLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := []string{"original_value", "clamped_to", "min", "max"}
|
|
||||||
for _, key := range fields {
|
|
||||||
if _, ok := entry.Data[key]; !ok {
|
|
||||||
t.Errorf("missing field %q", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tt.wantReason != "" {
|
|
||||||
if value, ok := entry.Data["reason"]; !ok || value != tt.wantReason {
|
|
||||||
t.Errorf("reason = %v, want %v", value, tt.wantReason)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,423 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
// registerGemini3Models loads Gemini 3 models into the registry for testing.
|
|
||||||
func registerGemini3Models(t *testing.T) func() {
|
|
||||||
t.Helper()
|
|
||||||
reg := registry.GetGlobalRegistry()
|
|
||||||
uid := fmt.Sprintf("gemini3-test-%d", time.Now().UnixNano())
|
|
||||||
reg.RegisterClient(uid+"-gemini", "gemini", registry.GetGeminiModels())
|
|
||||||
reg.RegisterClient(uid+"-aistudio", "aistudio", registry.GetAIStudioModels())
|
|
||||||
return func() {
|
|
||||||
reg.UnregisterClient(uid + "-gemini")
|
|
||||||
reg.UnregisterClient(uid + "-aistudio")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsGemini3Model(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
model string
|
|
||||||
expected bool
|
|
||||||
}{
|
|
||||||
{"gemini-3-pro-preview", true},
|
|
||||||
{"gemini-3-flash-preview", true},
|
|
||||||
{"gemini_3_pro_preview", true},
|
|
||||||
{"gemini-3-pro", true},
|
|
||||||
{"gemini-3-flash", true},
|
|
||||||
{"GEMINI-3-PRO-PREVIEW", true},
|
|
||||||
{"gemini-2.5-pro", false},
|
|
||||||
{"gemini-2.5-flash", false},
|
|
||||||
{"gpt-5", false},
|
|
||||||
{"claude-sonnet-4-5", false},
|
|
||||||
{"", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range cases {
|
|
||||||
t.Run(cs.model, func(t *testing.T) {
|
|
||||||
got := util.IsGemini3Model(cs.model)
|
|
||||||
if got != cs.expected {
|
|
||||||
t.Fatalf("IsGemini3Model(%q) = %v, want %v", cs.model, got, cs.expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsGemini3ProModel(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
model string
|
|
||||||
expected bool
|
|
||||||
}{
|
|
||||||
{"gemini-3-pro-preview", true},
|
|
||||||
{"gemini_3_pro_preview", true},
|
|
||||||
{"gemini-3-pro", true},
|
|
||||||
{"GEMINI-3-PRO-PREVIEW", true},
|
|
||||||
{"gemini-3-flash-preview", false},
|
|
||||||
{"gemini-3-flash", false},
|
|
||||||
{"gemini-2.5-pro", false},
|
|
||||||
{"", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range cases {
|
|
||||||
t.Run(cs.model, func(t *testing.T) {
|
|
||||||
got := util.IsGemini3ProModel(cs.model)
|
|
||||||
if got != cs.expected {
|
|
||||||
t.Fatalf("IsGemini3ProModel(%q) = %v, want %v", cs.model, got, cs.expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsGemini3FlashModel(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
model string
|
|
||||||
expected bool
|
|
||||||
}{
|
|
||||||
{"gemini-3-flash-preview", true},
|
|
||||||
{"gemini_3_flash_preview", true},
|
|
||||||
{"gemini-3-flash", true},
|
|
||||||
{"GEMINI-3-FLASH-PREVIEW", true},
|
|
||||||
{"gemini-3-pro-preview", false},
|
|
||||||
{"gemini-3-pro", false},
|
|
||||||
{"gemini-2.5-flash", false},
|
|
||||||
{"", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range cases {
|
|
||||||
t.Run(cs.model, func(t *testing.T) {
|
|
||||||
got := util.IsGemini3FlashModel(cs.model)
|
|
||||||
if got != cs.expected {
|
|
||||||
t.Fatalf("IsGemini3FlashModel(%q) = %v, want %v", cs.model, got, cs.expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateGemini3ThinkingLevel(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
level string
|
|
||||||
wantOK bool
|
|
||||||
wantVal string
|
|
||||||
}{
|
|
||||||
// Gemini 3 Pro: supports "low", "high"
|
|
||||||
{"pro-low", "gemini-3-pro-preview", "low", true, "low"},
|
|
||||||
{"pro-high", "gemini-3-pro-preview", "high", true, "high"},
|
|
||||||
{"pro-minimal-invalid", "gemini-3-pro-preview", "minimal", false, ""},
|
|
||||||
{"pro-medium-invalid", "gemini-3-pro-preview", "medium", false, ""},
|
|
||||||
|
|
||||||
// Gemini 3 Flash: supports "minimal", "low", "medium", "high"
|
|
||||||
{"flash-minimal", "gemini-3-flash-preview", "minimal", true, "minimal"},
|
|
||||||
{"flash-low", "gemini-3-flash-preview", "low", true, "low"},
|
|
||||||
{"flash-medium", "gemini-3-flash-preview", "medium", true, "medium"},
|
|
||||||
{"flash-high", "gemini-3-flash-preview", "high", true, "high"},
|
|
||||||
|
|
||||||
// Case insensitivity
|
|
||||||
{"flash-LOW-case", "gemini-3-flash-preview", "LOW", true, "low"},
|
|
||||||
{"flash-High-case", "gemini-3-flash-preview", "High", true, "high"},
|
|
||||||
{"pro-HIGH-case", "gemini-3-pro-preview", "HIGH", true, "high"},
|
|
||||||
|
|
||||||
// Invalid levels
|
|
||||||
{"flash-invalid", "gemini-3-flash-preview", "xhigh", false, ""},
|
|
||||||
{"flash-invalid-auto", "gemini-3-flash-preview", "auto", false, ""},
|
|
||||||
{"flash-empty", "gemini-3-flash-preview", "", false, ""},
|
|
||||||
|
|
||||||
// Non-Gemini 3 models
|
|
||||||
{"non-gemini3", "gemini-2.5-pro", "high", false, ""},
|
|
||||||
{"gpt5", "gpt-5", "high", false, ""},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range cases {
|
|
||||||
t.Run(cs.name, func(t *testing.T) {
|
|
||||||
got, ok := util.ValidateGemini3ThinkingLevel(cs.model, cs.level)
|
|
||||||
if ok != cs.wantOK {
|
|
||||||
t.Fatalf("ValidateGemini3ThinkingLevel(%q, %q) ok = %v, want %v", cs.model, cs.level, ok, cs.wantOK)
|
|
||||||
}
|
|
||||||
if got != cs.wantVal {
|
|
||||||
t.Fatalf("ValidateGemini3ThinkingLevel(%q, %q) = %q, want %q", cs.model, cs.level, got, cs.wantVal)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestThinkingBudgetToGemini3Level(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
budget int
|
|
||||||
wantOK bool
|
|
||||||
wantVal string
|
|
||||||
}{
|
|
||||||
// Gemini 3 Pro: maps to "low" or "high"
|
|
||||||
{"pro-dynamic", "gemini-3-pro-preview", -1, true, "high"},
|
|
||||||
{"pro-zero", "gemini-3-pro-preview", 0, true, "low"},
|
|
||||||
{"pro-small", "gemini-3-pro-preview", 1000, true, "low"},
|
|
||||||
{"pro-medium", "gemini-3-pro-preview", 8000, true, "low"},
|
|
||||||
{"pro-large", "gemini-3-pro-preview", 20000, true, "high"},
|
|
||||||
{"pro-huge", "gemini-3-pro-preview", 50000, true, "high"},
|
|
||||||
|
|
||||||
// Gemini 3 Flash: maps to "minimal", "low", "medium", "high"
|
|
||||||
{"flash-dynamic", "gemini-3-flash-preview", -1, true, "high"},
|
|
||||||
{"flash-zero", "gemini-3-flash-preview", 0, true, "minimal"},
|
|
||||||
{"flash-tiny", "gemini-3-flash-preview", 500, true, "minimal"},
|
|
||||||
{"flash-small", "gemini-3-flash-preview", 1000, true, "low"},
|
|
||||||
{"flash-medium-val", "gemini-3-flash-preview", 8000, true, "medium"},
|
|
||||||
{"flash-large", "gemini-3-flash-preview", 20000, true, "high"},
|
|
||||||
{"flash-huge", "gemini-3-flash-preview", 50000, true, "high"},
|
|
||||||
|
|
||||||
// Non-Gemini 3 models should return false
|
|
||||||
{"gemini25-budget", "gemini-2.5-pro", 8000, false, ""},
|
|
||||||
{"gpt5-budget", "gpt-5", 8000, false, ""},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range cases {
|
|
||||||
t.Run(cs.name, func(t *testing.T) {
|
|
||||||
got, ok := util.ThinkingBudgetToGemini3Level(cs.model, cs.budget)
|
|
||||||
if ok != cs.wantOK {
|
|
||||||
t.Fatalf("ThinkingBudgetToGemini3Level(%q, %d) ok = %v, want %v", cs.model, cs.budget, ok, cs.wantOK)
|
|
||||||
}
|
|
||||||
if got != cs.wantVal {
|
|
||||||
t.Fatalf("ThinkingBudgetToGemini3Level(%q, %d) = %q, want %q", cs.model, cs.budget, got, cs.wantVal)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyGemini3ThinkingLevelFromMetadata(t *testing.T) {
|
|
||||||
cleanup := registerGemini3Models(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
metadata map[string]any
|
|
||||||
inputBody string
|
|
||||||
wantLevel string
|
|
||||||
wantInclude bool
|
|
||||||
wantNoChange bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "flash-minimal-from-suffix",
|
|
||||||
model: "gemini-3-flash-preview",
|
|
||||||
metadata: map[string]any{"reasoning_effort": "minimal"},
|
|
||||||
inputBody: `{"generationConfig":{"thinkingConfig":{"includeThoughts":true}}}`,
|
|
||||||
wantLevel: "minimal",
|
|
||||||
wantInclude: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "flash-medium-from-suffix",
|
|
||||||
model: "gemini-3-flash-preview",
|
|
||||||
metadata: map[string]any{"reasoning_effort": "medium"},
|
|
||||||
inputBody: `{"generationConfig":{"thinkingConfig":{"includeThoughts":true}}}`,
|
|
||||||
wantLevel: "medium",
|
|
||||||
wantInclude: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "pro-high-from-suffix",
|
|
||||||
model: "gemini-3-pro-preview",
|
|
||||||
metadata: map[string]any{"reasoning_effort": "high"},
|
|
||||||
inputBody: `{"generationConfig":{"thinkingConfig":{"includeThoughts":true}}}`,
|
|
||||||
wantLevel: "high",
|
|
||||||
wantInclude: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no-metadata-no-change",
|
|
||||||
model: "gemini-3-flash-preview",
|
|
||||||
metadata: nil,
|
|
||||||
inputBody: `{"generationConfig":{"thinkingConfig":{"includeThoughts":true}}}`,
|
|
||||||
wantNoChange: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "non-gemini3-no-change",
|
|
||||||
model: "gemini-2.5-pro",
|
|
||||||
metadata: map[string]any{"reasoning_effort": "high"},
|
|
||||||
inputBody: `{"generationConfig":{"thinkingConfig":{"thinkingBudget":-1}}}`,
|
|
||||||
wantNoChange: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid-level-no-change",
|
|
||||||
model: "gemini-3-flash-preview",
|
|
||||||
metadata: map[string]any{"reasoning_effort": "xhigh"},
|
|
||||||
inputBody: `{"generationConfig":{"thinkingConfig":{"includeThoughts":true}}}`,
|
|
||||||
wantNoChange: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range cases {
|
|
||||||
t.Run(cs.name, func(t *testing.T) {
|
|
||||||
input := []byte(cs.inputBody)
|
|
||||||
result := util.ApplyGemini3ThinkingLevelFromMetadata(cs.model, cs.metadata, input)
|
|
||||||
|
|
||||||
if cs.wantNoChange {
|
|
||||||
if string(result) != cs.inputBody {
|
|
||||||
t.Fatalf("expected no change, but got: %s", string(result))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
level := gjson.GetBytes(result, "generationConfig.thinkingConfig.thinkingLevel")
|
|
||||||
if !level.Exists() {
|
|
||||||
t.Fatalf("thinkingLevel not set in result: %s", string(result))
|
|
||||||
}
|
|
||||||
if level.String() != cs.wantLevel {
|
|
||||||
t.Fatalf("thinkingLevel = %q, want %q", level.String(), cs.wantLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
include := gjson.GetBytes(result, "generationConfig.thinkingConfig.includeThoughts")
|
|
||||||
if cs.wantInclude && (!include.Exists() || !include.Bool()) {
|
|
||||||
t.Fatalf("includeThoughts should be true, got: %s", string(result))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestApplyGemini3ThinkingLevelFromMetadataCLI(t *testing.T) {
|
|
||||||
cleanup := registerGemini3Models(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
metadata map[string]any
|
|
||||||
inputBody string
|
|
||||||
wantLevel string
|
|
||||||
wantInclude bool
|
|
||||||
wantNoChange bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "flash-minimal-from-suffix-cli",
|
|
||||||
model: "gemini-3-flash-preview",
|
|
||||||
metadata: map[string]any{"reasoning_effort": "minimal"},
|
|
||||||
inputBody: `{"request":{"generationConfig":{"thinkingConfig":{"includeThoughts":true}}}}`,
|
|
||||||
wantLevel: "minimal",
|
|
||||||
wantInclude: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "flash-low-from-suffix-cli",
|
|
||||||
model: "gemini-3-flash-preview",
|
|
||||||
metadata: map[string]any{"reasoning_effort": "low"},
|
|
||||||
inputBody: `{"request":{"generationConfig":{"thinkingConfig":{"includeThoughts":true}}}}`,
|
|
||||||
wantLevel: "low",
|
|
||||||
wantInclude: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "pro-low-from-suffix-cli",
|
|
||||||
model: "gemini-3-pro-preview",
|
|
||||||
metadata: map[string]any{"reasoning_effort": "low"},
|
|
||||||
inputBody: `{"request":{"generationConfig":{"thinkingConfig":{"includeThoughts":true}}}}`,
|
|
||||||
wantLevel: "low",
|
|
||||||
wantInclude: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no-metadata-no-change-cli",
|
|
||||||
model: "gemini-3-flash-preview",
|
|
||||||
metadata: nil,
|
|
||||||
inputBody: `{"request":{"generationConfig":{"thinkingConfig":{"includeThoughts":true}}}}`,
|
|
||||||
wantNoChange: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "non-gemini3-no-change-cli",
|
|
||||||
model: "gemini-2.5-pro",
|
|
||||||
metadata: map[string]any{"reasoning_effort": "high"},
|
|
||||||
inputBody: `{"request":{"generationConfig":{"thinkingConfig":{"thinkingBudget":-1}}}}`,
|
|
||||||
wantNoChange: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range cases {
|
|
||||||
t.Run(cs.name, func(t *testing.T) {
|
|
||||||
input := []byte(cs.inputBody)
|
|
||||||
result := util.ApplyGemini3ThinkingLevelFromMetadataCLI(cs.model, cs.metadata, input)
|
|
||||||
|
|
||||||
if cs.wantNoChange {
|
|
||||||
if string(result) != cs.inputBody {
|
|
||||||
t.Fatalf("expected no change, but got: %s", string(result))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
level := gjson.GetBytes(result, "request.generationConfig.thinkingConfig.thinkingLevel")
|
|
||||||
if !level.Exists() {
|
|
||||||
t.Fatalf("thinkingLevel not set in result: %s", string(result))
|
|
||||||
}
|
|
||||||
if level.String() != cs.wantLevel {
|
|
||||||
t.Fatalf("thinkingLevel = %q, want %q", level.String(), cs.wantLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
include := gjson.GetBytes(result, "request.generationConfig.thinkingConfig.includeThoughts")
|
|
||||||
if cs.wantInclude && (!include.Exists() || !include.Bool()) {
|
|
||||||
t.Fatalf("includeThoughts should be true, got: %s", string(result))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNormalizeGeminiThinkingBudget_Gemini3Conversion(t *testing.T) {
|
|
||||||
cleanup := registerGemini3Models(t)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
model string
|
|
||||||
inputBody string
|
|
||||||
wantLevel string
|
|
||||||
wantBudget bool // if true, expect thinkingBudget instead of thinkingLevel
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "gemini3-flash-budget-to-level",
|
|
||||||
model: "gemini-3-flash-preview",
|
|
||||||
inputBody: `{"generationConfig":{"thinkingConfig":{"thinkingBudget":8000}}}`,
|
|
||||||
wantLevel: "medium",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "gemini3-pro-budget-to-level",
|
|
||||||
model: "gemini-3-pro-preview",
|
|
||||||
inputBody: `{"generationConfig":{"thinkingConfig":{"thinkingBudget":20000}}}`,
|
|
||||||
wantLevel: "high",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "gemini25-keeps-budget",
|
|
||||||
model: "gemini-2.5-pro",
|
|
||||||
inputBody: `{"generationConfig":{"thinkingConfig":{"thinkingBudget":8000}}}`,
|
|
||||||
wantBudget: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cs := range cases {
|
|
||||||
t.Run(cs.name, func(t *testing.T) {
|
|
||||||
result := util.NormalizeGeminiThinkingBudget(cs.model, []byte(cs.inputBody))
|
|
||||||
|
|
||||||
if cs.wantBudget {
|
|
||||||
budget := gjson.GetBytes(result, "generationConfig.thinkingConfig.thinkingBudget")
|
|
||||||
if !budget.Exists() {
|
|
||||||
t.Fatalf("thinkingBudget should exist for non-Gemini3 model: %s", string(result))
|
|
||||||
}
|
|
||||||
level := gjson.GetBytes(result, "generationConfig.thinkingConfig.thinkingLevel")
|
|
||||||
if level.Exists() {
|
|
||||||
t.Fatalf("thinkingLevel should not exist for non-Gemini3 model: %s", string(result))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
level := gjson.GetBytes(result, "generationConfig.thinkingConfig.thinkingLevel")
|
|
||||||
if !level.Exists() {
|
|
||||||
t.Fatalf("thinkingLevel should exist for Gemini3 model: %s", string(result))
|
|
||||||
}
|
|
||||||
if level.String() != cs.wantLevel {
|
|
||||||
t.Fatalf("thinkingLevel = %q, want %q", level.String(), cs.wantLevel)
|
|
||||||
}
|
|
||||||
budget := gjson.GetBytes(result, "generationConfig.thinkingConfig.thinkingBudget")
|
|
||||||
if budget.Exists() {
|
|
||||||
t.Fatalf("thinkingBudget should be removed for Gemini3 model: %s", string(result))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,262 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
|
||||||
"github.com/tidwall/gjson"
|
|
||||||
"github.com/tidwall/sjson"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestModelAliasThinkingSuffix tests the 32 test cases defined in docs/thinking_suffix_test_cases.md
|
|
||||||
// These tests verify the thinking suffix parsing and application logic across different providers.
|
|
||||||
func TestModelAliasThinkingSuffix(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
id int
|
|
||||||
name string
|
|
||||||
provider string
|
|
||||||
requestModel string
|
|
||||||
suffixType string
|
|
||||||
expectedField string // "thinkingBudget", "thinkingLevel", "budget_tokens", "reasoning_effort", "enable_thinking"
|
|
||||||
expectedValue any
|
|
||||||
upstreamModel string // The upstream model after alias resolution
|
|
||||||
isAlias bool
|
|
||||||
}{
|
|
||||||
// === 1. Antigravity Provider ===
|
|
||||||
// 1.1 Budget-only models (Gemini 2.5)
|
|
||||||
{1, "antigravity_original_numeric", "antigravity", "gemini-2.5-computer-use-preview-10-2025(1000)", "numeric", "thinkingBudget", 1000, "gemini-2.5-computer-use-preview-10-2025", false},
|
|
||||||
{2, "antigravity_alias_numeric", "antigravity", "gp(1000)", "numeric", "thinkingBudget", 1000, "gemini-2.5-computer-use-preview-10-2025", true},
|
|
||||||
// 1.2 Budget+Levels models (Gemini 3)
|
|
||||||
{3, "antigravity_original_numeric_to_level", "antigravity", "gemini-3-flash-preview(1000)", "numeric", "thinkingLevel", "low", "gemini-3-flash-preview", false},
|
|
||||||
{4, "antigravity_original_level", "antigravity", "gemini-3-flash-preview(low)", "level", "thinkingLevel", "low", "gemini-3-flash-preview", false},
|
|
||||||
{5, "antigravity_alias_numeric_to_level", "antigravity", "gf(1000)", "numeric", "thinkingLevel", "low", "gemini-3-flash-preview", true},
|
|
||||||
{6, "antigravity_alias_level", "antigravity", "gf(low)", "level", "thinkingLevel", "low", "gemini-3-flash-preview", true},
|
|
||||||
|
|
||||||
// === 2. Gemini CLI Provider ===
|
|
||||||
// 2.1 Budget-only models
|
|
||||||
{7, "gemini_cli_original_numeric", "gemini-cli", "gemini-2.5-pro(8192)", "numeric", "thinkingBudget", 8192, "gemini-2.5-pro", false},
|
|
||||||
{8, "gemini_cli_alias_numeric", "gemini-cli", "g25p(8192)", "numeric", "thinkingBudget", 8192, "gemini-2.5-pro", true},
|
|
||||||
// 2.2 Budget+Levels models
|
|
||||||
{9, "gemini_cli_original_numeric_to_level", "gemini-cli", "gemini-3-flash-preview(1000)", "numeric", "thinkingLevel", "low", "gemini-3-flash-preview", false},
|
|
||||||
{10, "gemini_cli_original_level", "gemini-cli", "gemini-3-flash-preview(low)", "level", "thinkingLevel", "low", "gemini-3-flash-preview", false},
|
|
||||||
{11, "gemini_cli_alias_numeric_to_level", "gemini-cli", "gf(1000)", "numeric", "thinkingLevel", "low", "gemini-3-flash-preview", true},
|
|
||||||
{12, "gemini_cli_alias_level", "gemini-cli", "gf(low)", "level", "thinkingLevel", "low", "gemini-3-flash-preview", true},
|
|
||||||
|
|
||||||
// === 3. Vertex Provider ===
|
|
||||||
// 3.1 Budget-only models
|
|
||||||
{13, "vertex_original_numeric", "vertex", "gemini-2.5-pro(16384)", "numeric", "thinkingBudget", 16384, "gemini-2.5-pro", false},
|
|
||||||
{14, "vertex_alias_numeric", "vertex", "vg25p(16384)", "numeric", "thinkingBudget", 16384, "gemini-2.5-pro", true},
|
|
||||||
// 3.2 Budget+Levels models
|
|
||||||
{15, "vertex_original_numeric_to_level", "vertex", "gemini-3-flash-preview(1000)", "numeric", "thinkingLevel", "low", "gemini-3-flash-preview", false},
|
|
||||||
{16, "vertex_original_level", "vertex", "gemini-3-flash-preview(low)", "level", "thinkingLevel", "low", "gemini-3-flash-preview", false},
|
|
||||||
{17, "vertex_alias_numeric_to_level", "vertex", "vgf(1000)", "numeric", "thinkingLevel", "low", "gemini-3-flash-preview", true},
|
|
||||||
{18, "vertex_alias_level", "vertex", "vgf(low)", "level", "thinkingLevel", "low", "gemini-3-flash-preview", true},
|
|
||||||
|
|
||||||
// === 4. AI Studio Provider ===
|
|
||||||
// 4.1 Budget-only models
|
|
||||||
{19, "aistudio_original_numeric", "aistudio", "gemini-2.5-pro(12000)", "numeric", "thinkingBudget", 12000, "gemini-2.5-pro", false},
|
|
||||||
{20, "aistudio_alias_numeric", "aistudio", "ag25p(12000)", "numeric", "thinkingBudget", 12000, "gemini-2.5-pro", true},
|
|
||||||
// 4.2 Budget+Levels models
|
|
||||||
{21, "aistudio_original_numeric_to_level", "aistudio", "gemini-3-flash-preview(1000)", "numeric", "thinkingLevel", "low", "gemini-3-flash-preview", false},
|
|
||||||
{22, "aistudio_original_level", "aistudio", "gemini-3-flash-preview(low)", "level", "thinkingLevel", "low", "gemini-3-flash-preview", false},
|
|
||||||
{23, "aistudio_alias_numeric_to_level", "aistudio", "agf(1000)", "numeric", "thinkingLevel", "low", "gemini-3-flash-preview", true},
|
|
||||||
{24, "aistudio_alias_level", "aistudio", "agf(low)", "level", "thinkingLevel", "low", "gemini-3-flash-preview", true},
|
|
||||||
|
|
||||||
// === 5. Claude Provider ===
|
|
||||||
{25, "claude_original_numeric", "claude", "claude-sonnet-4-5-20250929(16384)", "numeric", "budget_tokens", 16384, "claude-sonnet-4-5-20250929", false},
|
|
||||||
{26, "claude_alias_numeric", "claude", "cs45(16384)", "numeric", "budget_tokens", 16384, "claude-sonnet-4-5-20250929", true},
|
|
||||||
|
|
||||||
// === 6. Codex Provider ===
|
|
||||||
{27, "codex_original_level", "codex", "gpt-5(high)", "level", "reasoning_effort", "high", "gpt-5", false},
|
|
||||||
{28, "codex_alias_level", "codex", "g5(high)", "level", "reasoning_effort", "high", "gpt-5", true},
|
|
||||||
|
|
||||||
// === 7. Qwen Provider ===
|
|
||||||
{29, "qwen_original_level", "qwen", "qwen3-coder-plus(high)", "level", "enable_thinking", true, "qwen3-coder-plus", false},
|
|
||||||
{30, "qwen_alias_level", "qwen", "qcp(high)", "level", "enable_thinking", true, "qwen3-coder-plus", true},
|
|
||||||
|
|
||||||
// === 8. iFlow Provider ===
|
|
||||||
{31, "iflow_original_level", "iflow", "glm-4.7(high)", "level", "reasoning_effort", "high", "glm-4.7", false},
|
|
||||||
{32, "iflow_alias_level", "iflow", "glm(high)", "level", "reasoning_effort", "high", "glm-4.7", true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
// Step 1: Parse model suffix (simulates SDK layer normalization)
|
|
||||||
// For "gp(1000)" -> requestedModel="gp", metadata={thinking_budget: 1000}
|
|
||||||
requestedModel, metadata := util.NormalizeThinkingModel(tt.requestModel)
|
|
||||||
|
|
||||||
// Verify suffix was parsed
|
|
||||||
if metadata == nil && (tt.suffixType == "numeric" || tt.suffixType == "level") {
|
|
||||||
t.Errorf("Case #%d: NormalizeThinkingModel(%q) metadata is nil", tt.id, tt.requestModel)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 2: Simulate OAuth model mapping
|
|
||||||
// Real flow: applyOAuthModelMapping stores requestedModel (the alias) in metadata
|
|
||||||
if tt.isAlias {
|
|
||||||
if metadata == nil {
|
|
||||||
metadata = make(map[string]any)
|
|
||||||
}
|
|
||||||
metadata[util.ModelMappingOriginalModelMetadataKey] = requestedModel
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 3: Verify metadata extraction
|
|
||||||
switch tt.suffixType {
|
|
||||||
case "numeric":
|
|
||||||
budget, _, _, matched := util.ThinkingFromMetadata(metadata)
|
|
||||||
if !matched {
|
|
||||||
t.Errorf("Case #%d: ThinkingFromMetadata did not match", tt.id)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if budget == nil {
|
|
||||||
t.Errorf("Case #%d: expected budget in metadata", tt.id)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// For thinkingBudget/budget_tokens, verify the parsed budget value
|
|
||||||
if tt.expectedField == "thinkingBudget" || tt.expectedField == "budget_tokens" {
|
|
||||||
expectedBudget := tt.expectedValue.(int)
|
|
||||||
if *budget != expectedBudget {
|
|
||||||
t.Errorf("Case #%d: budget = %d, want %d", tt.id, *budget, expectedBudget)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// For thinkingLevel (Gemini 3), verify conversion from budget to level
|
|
||||||
if tt.expectedField == "thinkingLevel" {
|
|
||||||
level, ok := util.ThinkingBudgetToGemini3Level(tt.upstreamModel, *budget)
|
|
||||||
if !ok {
|
|
||||||
t.Errorf("Case #%d: ThinkingBudgetToGemini3Level failed", tt.id)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
expectedLevel := tt.expectedValue.(string)
|
|
||||||
if level != expectedLevel {
|
|
||||||
t.Errorf("Case #%d: converted level = %q, want %q", tt.id, level, expectedLevel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case "level":
|
|
||||||
_, _, effort, matched := util.ThinkingFromMetadata(metadata)
|
|
||||||
if !matched {
|
|
||||||
t.Errorf("Case #%d: ThinkingFromMetadata did not match", tt.id)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if effort == nil {
|
|
||||||
t.Errorf("Case #%d: expected effort in metadata", tt.id)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if tt.expectedField == "thinkingLevel" || tt.expectedField == "reasoning_effort" {
|
|
||||||
expectedEffort := tt.expectedValue.(string)
|
|
||||||
if *effort != expectedEffort {
|
|
||||||
t.Errorf("Case #%d: effort = %q, want %q", tt.id, *effort, expectedEffort)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 4: Test Gemini-specific thinkingLevel conversion for Gemini 3 models
|
|
||||||
if tt.expectedField == "thinkingLevel" && util.IsGemini3Model(tt.upstreamModel) {
|
|
||||||
body := []byte(`{"request":{"contents":[]}}`)
|
|
||||||
|
|
||||||
// Build metadata simulating real OAuth flow:
|
|
||||||
// - requestedModel (alias like "gf") is stored in model_mapping_original_model
|
|
||||||
// - upstreamModel is passed as the model parameter
|
|
||||||
testMetadata := make(map[string]any)
|
|
||||||
if tt.isAlias {
|
|
||||||
// Real flow: applyOAuthModelMapping stores requestedModel (the alias)
|
|
||||||
testMetadata[util.ModelMappingOriginalModelMetadataKey] = requestedModel
|
|
||||||
}
|
|
||||||
// Copy parsed metadata (thinking_budget, reasoning_effort, etc.)
|
|
||||||
for k, v := range metadata {
|
|
||||||
testMetadata[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
result := util.ApplyGemini3ThinkingLevelFromMetadataCLI(tt.upstreamModel, testMetadata, body)
|
|
||||||
levelVal := gjson.GetBytes(result, "request.generationConfig.thinkingConfig.thinkingLevel")
|
|
||||||
|
|
||||||
expectedLevel := tt.expectedValue.(string)
|
|
||||||
if !levelVal.Exists() {
|
|
||||||
t.Errorf("Case #%d: expected thinkingLevel in result", tt.id)
|
|
||||||
} else if levelVal.String() != expectedLevel {
|
|
||||||
t.Errorf("Case #%d: thinkingLevel = %q, want %q", tt.id, levelVal.String(), expectedLevel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 5: Test Gemini 2.5 thinkingBudget application using thinking.ApplyThinking
|
|
||||||
if tt.expectedField == "thinkingBudget" && util.IsGemini25Model(tt.upstreamModel) {
|
|
||||||
body := []byte(`{"request":{"contents":[]}}`)
|
|
||||||
|
|
||||||
// Build metadata simulating real OAuth flow:
|
|
||||||
// - requestedModel (alias like "gp") is stored in model_mapping_original_model
|
|
||||||
// - upstreamModel is passed as the model parameter
|
|
||||||
testMetadata := make(map[string]any)
|
|
||||||
if tt.isAlias {
|
|
||||||
// Real flow: applyOAuthModelMapping stores requestedModel (the alias)
|
|
||||||
testMetadata[util.ModelMappingOriginalModelMetadataKey] = requestedModel
|
|
||||||
}
|
|
||||||
// Copy parsed metadata (thinking_budget, reasoning_effort, etc.)
|
|
||||||
for k, v := range metadata {
|
|
||||||
testMetadata[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge thinking config from metadata into body
|
|
||||||
body = applyThinkingFromMetadata(body, testMetadata)
|
|
||||||
|
|
||||||
// Use thinking.ApplyThinking for unified thinking config handling
|
|
||||||
// Note: ApplyThinking now takes model string, not *ModelInfo
|
|
||||||
result, _ := thinking.ApplyThinking(body, tt.upstreamModel, "gemini-cli")
|
|
||||||
|
|
||||||
budgetVal := gjson.GetBytes(result, "request.generationConfig.thinkingConfig.thinkingBudget")
|
|
||||||
|
|
||||||
expectedBudget := tt.expectedValue.(int)
|
|
||||||
if !budgetVal.Exists() {
|
|
||||||
t.Errorf("Case #%d: expected thinkingBudget in result", tt.id)
|
|
||||||
} else if int(budgetVal.Int()) != expectedBudget {
|
|
||||||
t.Errorf("Case #%d: thinkingBudget = %d, want %d", tt.id, int(budgetVal.Int()), expectedBudget)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// applyThinkingFromMetadata merges thinking configuration from metadata into the payload.
|
|
||||||
func applyThinkingFromMetadata(payload []byte, metadata map[string]any) []byte {
|
|
||||||
if len(metadata) == 0 {
|
|
||||||
return payload
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge thinking_budget from metadata if present
|
|
||||||
if budget, ok := metadata["thinking_budget"]; ok {
|
|
||||||
if budgetVal, okNum := parseNumberToInt(budget); okNum {
|
|
||||||
payload, _ = sjson.SetBytes(payload, "request.generationConfig.thinkingConfig.thinkingBudget", budgetVal)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge reasoning_effort from metadata if present
|
|
||||||
if effort, ok := metadata["reasoning_effort"]; ok {
|
|
||||||
if effortStr, okStr := effort.(string); okStr && effortStr != "" {
|
|
||||||
payload, _ = sjson.SetBytes(payload, "request.generationConfig.thinkingConfig.thinkingLevel", effortStr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge thinking_include_thoughts from metadata if present
|
|
||||||
if include, ok := metadata["thinking_include_thoughts"]; ok {
|
|
||||||
if includeBool, okBool := include.(bool); okBool {
|
|
||||||
payload, _ = sjson.SetBytes(payload, "request.generationConfig.thinkingConfig.includeThoughts", includeBool)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return payload
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseNumberToInt safely converts various numeric types to int
|
|
||||||
func parseNumberToInt(raw any) (int, bool) {
|
|
||||||
switch v := raw.(type) {
|
|
||||||
case int:
|
|
||||||
return v, true
|
|
||||||
case int32:
|
|
||||||
return int(v), true
|
|
||||||
case int64:
|
|
||||||
return int(v), true
|
|
||||||
case float64:
|
|
||||||
return int(v), true
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user