Compare commits

..

13 Commits

Author SHA1 Message Date
Luis Pater
78ef04fcf1 fix(kimi): reduce redundant payload cloning and simplify translation calls 2026-02-07 08:51:48 +08:00
hkfires
b7e4f00c5f fix(translator): correct gemini-cli log prefix 2026-02-07 08:40:09 +08:00
Luis Pater
f7d0019df7 fix(kimi): update base URL and integrate ClaudeExecutor fallback
- Updated `KimiAPIBaseURL` to remove versioning from the root path.
- Integrated `ClaudeExecutor` fallback in `KimiExecutor` methods for compatibility with Claude requests.
- Simplified token counting by delegating to `ClaudeExecutor`.
2026-02-07 06:42:08 +08:00
Luis Pater
f410dd0440 Merge pull request #1390 from sususu98/fix/400-invalid-request-no-retry
fix(auth): 400 invalid_request_error 立即返回不再重试
2026-02-07 03:14:25 +08:00
Luis Pater
eb5582c17c Merge pull request #1386 from shenshuoyaoyouguang/sync-auth-changes
fix(auth): normalize model key for thinking suffix in selectors
2026-02-07 03:12:01 +08:00
Luis Pater
1c6cb2bec3 Merge pull request #1239 from ThanhNguyxn/fix/gitstore-gc-after-squash
fix(store): run GC after squashing history to prevent loose object accumulation
2026-02-07 02:51:27 +08:00
Luis Pater
80b5e79e75 fix(translator): normalize and restrict stop_reason/finish_reason usage
- Standardized the handling of `stop_reason` and `finish_reason` across Codex and Gemini responses.
- Restricted pass-through of specific reasons (`max_tokens`, `stop`) for consistency.
- Enhanced fallback logic for undefined reasons.
2026-02-07 02:07:51 +08:00
Luis Pater
394497fb2f Merge pull request #1465 from router-for-me/kimi-fix
fix(kimi): add OAuth model-alias channel support and cover OAuth excl…
2026-02-07 01:27:30 +08:00
LTbinglingfeng
fc7b6ef086 fix(kimi): add OAuth model-alias channel support and cover OAuth excluded-models with tests 2026-02-07 01:16:39 +08:00
Luis Pater
1187aa8222 feat(translator): capture cached token count in usage metadata and handle prompt caching
- Added support to extract and include `cachedContentTokenCount` in `usage.prompt_tokens_details`.
- Logged warnings for failures to set cached token count for better debugging.
2026-02-06 21:28:40 +08:00
sususu98
233be6272a fix(auth): 400 invalid_request_error 立即返回不再重试
当上游返回 400 Bad Request 且错误消息包含 invalid_request_error 时,
表示请求本身格式错误,切换账户不会改变结果。

修改:
- 添加 isRequestInvalidError 判定函数
- 内层循环遇到此错误立即返回,不遍历其他账户
- 外层循环不再对此类错误进行重试
2026-02-02 17:35:51 +08:00
chujian
47cb52385e sdk/cliproxy/auth: update selector tests 2026-02-02 05:26:04 +08:00
ThanhNguyxn
a406ca2d5a fix(store): add proper GC with Handler and interval gating
Address maintainer feedback on PR #1239:
- Add Handler: repo.DeleteObject to prevent nil panic in Prune
- Handle ErrLooseObjectsNotSupported gracefully
- Add 5-minute interval gating to avoid repack overhead on every write
- Remove sirupsen/logrus dependency (best-effort silent GC)

Fixes #1104
2026-02-01 11:19:43 +07:00
14 changed files with 466 additions and 245 deletions

View File

@@ -221,7 +221,7 @@ nonstream-keepalive-interval: 0
# Global OAuth model name aliases (per channel)
# These aliases rename model IDs for both model listing and request routing.
# Supported channels: gemini-cli, vertex, aistudio, antigravity, claude, codex, qwen, iflow.
# Supported channels: gemini-cli, vertex, aistudio, antigravity, claude, codex, qwen, iflow, kimi.
# NOTE: Aliases do not apply to gemini-api-key, codex-api-key, claude-api-key, openai-compatibility, vertex-api-key, or ampcode.
# You can repeat the same name with different aliases to expose multiple client model names.
oauth-model-alias:
@@ -262,6 +262,9 @@ oauth-model-alias:
# iflow:
# - name: "glm-4.7"
# alias: "glm-god"
# kimi:
# - name: "kimi-k2.5"
# alias: "k2.5"
# OAuth provider excluded models
# oauth-excluded-models:
@@ -284,6 +287,8 @@ oauth-model-alias:
# - "vision-model"
# iflow:
# - "tstars2.0"
# kimi:
# - "kimi-k2-thinking"
# Optional payload configuration
# payload:

View File

@@ -30,7 +30,7 @@ const (
// kimiTokenURL is the endpoint for exchanging device codes for tokens.
kimiTokenURL = kimiOAuthHost + "/api/oauth/token"
// KimiAPIBaseURL is the base URL for Kimi API requests.
KimiAPIBaseURL = "https://api.kimi.com/coding/v1"
KimiAPIBaseURL = "https://api.kimi.com/coding"
// defaultPollInterval is the default interval for polling token endpoint.
defaultPollInterval = 5 * time.Second
// maxPollDuration is the maximum time to wait for user authorization.

View File

@@ -25,6 +25,7 @@ import (
// KimiExecutor is a stateless executor for Kimi API using OpenAI-compatible chat completions.
type KimiExecutor struct {
ClaudeExecutor
cfg *config.Config
}
@@ -64,6 +65,12 @@ func (e *KimiExecutor) HttpRequest(ctx context.Context, auth *cliproxyauth.Auth,
// Execute performs a non-streaming chat completion request to Kimi.
func (e *KimiExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (resp cliproxyexecutor.Response, err error) {
from := opts.SourceFormat
if from.String() == "claude" {
auth.Attributes["base_url"] = kimiauth.KimiAPIBaseURL
return e.ClaudeExecutor.Execute(ctx, auth, req, opts)
}
baseModel := thinking.ParseSuffix(req.Model).ModelName
token := kimiCreds(auth)
@@ -71,12 +78,12 @@ func (e *KimiExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req
reporter := newUsageReporter(ctx, e.Identifier(), baseModel, auth)
defer reporter.trackFailure(ctx, &err)
from := opts.SourceFormat
to := sdktranslator.FromString("openai")
originalPayload := bytes.Clone(req.Payload)
originalPayloadSource := req.Payload
if len(opts.OriginalRequest) > 0 {
originalPayload = bytes.Clone(opts.OriginalRequest)
originalPayloadSource = opts.OriginalRequest
}
originalPayload := bytes.Clone(originalPayloadSource)
originalTranslated := sdktranslator.TranslateRequest(from, to, baseModel, originalPayload, false)
body := sdktranslator.TranslateRequest(from, to, baseModel, bytes.Clone(req.Payload), false)
@@ -95,7 +102,7 @@ func (e *KimiExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req
requestedModel := payloadRequestedModel(opts, req.Model)
body = applyPayloadConfigWithRoot(e.cfg, baseModel, to.String(), "", body, originalTranslated, requestedModel)
url := kimiauth.KimiAPIBaseURL + "/chat/completions"
url := kimiauth.KimiAPIBaseURL + "/v1/chat/completions"
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body))
if err != nil {
return resp, err
@@ -148,26 +155,31 @@ func (e *KimiExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req
var param any
// Note: TranslateNonStream uses req.Model (original with suffix) to preserve
// the original model name in the response for client compatibility.
out := sdktranslator.TranslateNonStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), body, data, &param)
out := sdktranslator.TranslateNonStream(ctx, to, from, req.Model, opts.OriginalRequest, body, data, &param)
resp = cliproxyexecutor.Response{Payload: []byte(out)}
return resp, nil
}
// ExecuteStream performs a streaming chat completion request to Kimi.
func (e *KimiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (stream <-chan cliproxyexecutor.StreamChunk, err error) {
baseModel := thinking.ParseSuffix(req.Model).ModelName
from := opts.SourceFormat
if from.String() == "claude" {
auth.Attributes["base_url"] = kimiauth.KimiAPIBaseURL
return e.ClaudeExecutor.ExecuteStream(ctx, auth, req, opts)
}
baseModel := thinking.ParseSuffix(req.Model).ModelName
token := kimiCreds(auth)
reporter := newUsageReporter(ctx, e.Identifier(), baseModel, auth)
defer reporter.trackFailure(ctx, &err)
from := opts.SourceFormat
to := sdktranslator.FromString("openai")
originalPayload := bytes.Clone(req.Payload)
originalPayloadSource := req.Payload
if len(opts.OriginalRequest) > 0 {
originalPayload = bytes.Clone(opts.OriginalRequest)
originalPayloadSource = opts.OriginalRequest
}
originalPayload := bytes.Clone(originalPayloadSource)
originalTranslated := sdktranslator.TranslateRequest(from, to, baseModel, originalPayload, true)
body := sdktranslator.TranslateRequest(from, to, baseModel, bytes.Clone(req.Payload), true)
@@ -190,7 +202,7 @@ func (e *KimiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Aut
requestedModel := payloadRequestedModel(opts, req.Model)
body = applyPayloadConfigWithRoot(e.cfg, baseModel, to.String(), "", body, originalTranslated, requestedModel)
url := kimiauth.KimiAPIBaseURL + "/chat/completions"
url := kimiauth.KimiAPIBaseURL + "/v1/chat/completions"
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body))
if err != nil {
return nil, err
@@ -249,12 +261,12 @@ func (e *KimiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Aut
if detail, ok := parseOpenAIStreamUsage(line); ok {
reporter.publish(ctx, detail)
}
chunks := sdktranslator.TranslateStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), body, bytes.Clone(line), &param)
chunks := sdktranslator.TranslateStream(ctx, to, from, req.Model, opts.OriginalRequest, body, bytes.Clone(line), &param)
for i := range chunks {
out <- cliproxyexecutor.StreamChunk{Payload: []byte(chunks[i])}
}
}
doneChunks := sdktranslator.TranslateStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), body, bytes.Clone([]byte("[DONE]")), &param)
doneChunks := sdktranslator.TranslateStream(ctx, to, from, req.Model, opts.OriginalRequest, body, []byte("[DONE]"), &param)
for i := range doneChunks {
out <- cliproxyexecutor.StreamChunk{Payload: []byte(doneChunks[i])}
}
@@ -269,26 +281,8 @@ func (e *KimiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Aut
// CountTokens estimates token count for Kimi requests.
func (e *KimiExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
baseModel := thinking.ParseSuffix(req.Model).ModelName
from := opts.SourceFormat
to := sdktranslator.FromString("openai")
body := sdktranslator.TranslateRequest(from, to, baseModel, bytes.Clone(req.Payload), false)
// Use a generic tokenizer for estimation
enc, err := tokenizerForModel("gpt-4")
if err != nil {
return cliproxyexecutor.Response{}, fmt.Errorf("kimi executor: tokenizer init failed: %w", err)
}
count, err := countOpenAIChatTokens(enc, body)
if err != nil {
return cliproxyexecutor.Response{}, fmt.Errorf("kimi executor: token counting failed: %w", err)
}
usageJSON := buildOpenAIUsageJSON(count)
translated := sdktranslator.TranslateTokenCount(ctx, to, from, count, usageJSON)
return cliproxyexecutor.Response{Payload: []byte(translated)}, nil
auth.Attributes["base_url"] = kimiauth.KimiAPIBaseURL
return e.ClaudeExecutor.CountTokens(ctx, auth, req, opts)
}
// Refresh refreshes the Kimi token using the refresh token.

View File

@@ -21,6 +21,9 @@ import (
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
)
// gcInterval defines minimum time between garbage collection runs.
const gcInterval = 5 * time.Minute
// GitTokenStore persists token records and auth metadata using git as the backing storage.
type GitTokenStore struct {
mu sync.Mutex
@@ -31,6 +34,7 @@ type GitTokenStore struct {
remote string
username string
password string
lastGC time.Time
}
// NewGitTokenStore creates a token store that saves credentials to disk through the
@@ -613,6 +617,7 @@ func (s *GitTokenStore) commitAndPushLocked(message string, relPaths ...string)
} else if errRewrite := s.rewriteHeadAsSingleCommit(repo, headRef.Name(), commitHash, message, signature); errRewrite != nil {
return errRewrite
}
s.maybeRunGC(repo)
if err = repo.Push(&git.PushOptions{Auth: s.gitAuth(), Force: true}); err != nil {
if errors.Is(err, git.NoErrAlreadyUpToDate) {
return nil
@@ -652,6 +657,23 @@ func (s *GitTokenStore) rewriteHeadAsSingleCommit(repo *git.Repository, branch p
return nil
}
func (s *GitTokenStore) maybeRunGC(repo *git.Repository) {
now := time.Now()
if now.Sub(s.lastGC) < gcInterval {
return
}
s.lastGC = now
pruneOpts := git.PruneOptions{
OnlyObjectsOlderThan: now,
Handler: repo.DeleteObject,
}
if err := repo.Prune(pruneOpts); err != nil && !errors.Is(err, git.ErrLooseObjectsNotSupported) {
return
}
_ = repo.RepackObjects(&git.RepackConfig{})
}
// PersistConfig commits and pushes configuration changes to git.
func (s *GitTokenStore) PersistConfig(_ context.Context) error {
if err := s.EnsureRepository(); err != nil {

View File

@@ -113,10 +113,10 @@ func ConvertCodexResponseToClaude(_ context.Context, _ string, originalRequestRa
template = `{"type":"message_delta","delta":{"stop_reason":"tool_use","stop_sequence":null},"usage":{"input_tokens":0,"output_tokens":0}}`
p := (*param).(*ConvertCodexResponseToClaudeParams).HasToolCall
stopReason := rootResult.Get("response.stop_reason").String()
if stopReason != "" {
template, _ = sjson.Set(template, "delta.stop_reason", stopReason)
} else if p {
if p {
template, _ = sjson.Set(template, "delta.stop_reason", "tool_use")
} else if stopReason == "max_tokens" || stopReason == "stop" {
template, _ = sjson.Set(template, "delta.stop_reason", stopReason)
} else {
template, _ = sjson.Set(template, "delta.stop_reason", "end_turn")
}

View File

@@ -14,6 +14,7 @@ import (
"time"
. "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/chat-completions"
log "github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
@@ -77,14 +78,20 @@ func ConvertCliResponseToOpenAI(_ context.Context, _ string, originalRequestRawJ
template, _ = sjson.Set(template, "id", responseIDResult.String())
}
// Extract and set the finish reason.
if finishReasonResult := gjson.GetBytes(rawJSON, "response.candidates.0.finishReason"); finishReasonResult.Exists() {
template, _ = sjson.Set(template, "choices.0.finish_reason", strings.ToLower(finishReasonResult.String()))
template, _ = sjson.Set(template, "choices.0.native_finish_reason", strings.ToLower(finishReasonResult.String()))
finishReason := ""
if stopReasonResult := gjson.GetBytes(rawJSON, "response.stop_reason"); stopReasonResult.Exists() {
finishReason = stopReasonResult.String()
}
if finishReason == "" {
if finishReasonResult := gjson.GetBytes(rawJSON, "response.candidates.0.finishReason"); finishReasonResult.Exists() {
finishReason = finishReasonResult.String()
}
}
finishReason = strings.ToLower(finishReason)
// Extract and set usage metadata (token counts).
if usageResult := gjson.GetBytes(rawJSON, "response.usageMetadata"); usageResult.Exists() {
cachedTokenCount := usageResult.Get("cachedContentTokenCount").Int()
if candidatesTokenCountResult := usageResult.Get("candidatesTokenCount"); candidatesTokenCountResult.Exists() {
template, _ = sjson.Set(template, "usage.completion_tokens", candidatesTokenCountResult.Int())
}
@@ -97,6 +104,14 @@ func ConvertCliResponseToOpenAI(_ context.Context, _ string, originalRequestRawJ
if thoughtsTokenCount > 0 {
template, _ = sjson.Set(template, "usage.completion_tokens_details.reasoning_tokens", thoughtsTokenCount)
}
// Include cached token count if present (indicates prompt caching is working)
if cachedTokenCount > 0 {
var err error
template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
if err != nil {
log.Warnf("gemini-cli openai response: failed to set cached_tokens: %v", err)
}
}
}
// Process the main content part of the response.
@@ -187,6 +202,12 @@ func ConvertCliResponseToOpenAI(_ context.Context, _ string, originalRequestRawJ
if hasFunctionCall {
template, _ = sjson.Set(template, "choices.0.finish_reason", "tool_calls")
template, _ = sjson.Set(template, "choices.0.native_finish_reason", "tool_calls")
} else if finishReason != "" && (*param).(*convertCliResponseToOpenAIChatParams).FunctionIndex == 0 {
// Only pass through specific finish reasons
if finishReason == "max_tokens" || finishReason == "stop" {
template, _ = sjson.Set(template, "choices.0.finish_reason", finishReason)
template, _ = sjson.Set(template, "choices.0.native_finish_reason", finishReason)
}
}
return []string{template}

View File

@@ -129,11 +129,16 @@ func ConvertGeminiResponseToOpenAI(_ context.Context, _ string, originalRequestR
candidateIndex := int(candidate.Get("index").Int())
template, _ = sjson.Set(template, "choices.0.index", candidateIndex)
// Extract and set the finish reason.
if finishReasonResult := candidate.Get("finishReason"); finishReasonResult.Exists() {
template, _ = sjson.Set(template, "choices.0.finish_reason", strings.ToLower(finishReasonResult.String()))
template, _ = sjson.Set(template, "choices.0.native_finish_reason", strings.ToLower(finishReasonResult.String()))
finishReason := ""
if stopReasonResult := gjson.GetBytes(rawJSON, "stop_reason"); stopReasonResult.Exists() {
finishReason = stopReasonResult.String()
}
if finishReason == "" {
if finishReasonResult := gjson.GetBytes(rawJSON, "candidates.0.finishReason"); finishReasonResult.Exists() {
finishReason = finishReasonResult.String()
}
}
finishReason = strings.ToLower(finishReason)
partsResult := candidate.Get("content.parts")
hasFunctionCall := false
@@ -225,6 +230,12 @@ func ConvertGeminiResponseToOpenAI(_ context.Context, _ string, originalRequestR
if hasFunctionCall {
template, _ = sjson.Set(template, "choices.0.finish_reason", "tool_calls")
template, _ = sjson.Set(template, "choices.0.native_finish_reason", "tool_calls")
} else if finishReason != "" {
// Only pass through specific finish reasons
if finishReason == "max_tokens" || finishReason == "stop" {
template, _ = sjson.Set(template, "choices.0.finish_reason", finishReason)
template, _ = sjson.Set(template, "choices.0.native_finish_reason", finishReason)
}
}
responseStrings = append(responseStrings, template)

View File

@@ -607,6 +607,9 @@ func (m *Manager) executeMixedOnce(ctx context.Context, providers []string, req
result.RetryAfter = ra
}
m.MarkResult(execCtx, result)
if isRequestInvalidError(errExec) {
return cliproxyexecutor.Response{}, errExec
}
lastErr = errExec
continue
}
@@ -660,6 +663,9 @@ func (m *Manager) executeCountMixedOnce(ctx context.Context, providers []string,
result.RetryAfter = ra
}
m.MarkResult(execCtx, result)
if isRequestInvalidError(errExec) {
return cliproxyexecutor.Response{}, errExec
}
lastErr = errExec
continue
}
@@ -711,6 +717,9 @@ func (m *Manager) executeStreamMixedOnce(ctx context.Context, providers []string
result := Result{AuthID: auth.ID, Provider: provider, Model: routeModel, Success: false, Error: rerr}
result.RetryAfter = retryAfterFromError(errStream)
m.MarkResult(execCtx, result)
if isRequestInvalidError(errStream) {
return nil, errStream
}
lastErr = errStream
continue
}
@@ -1110,6 +1119,9 @@ func (m *Manager) shouldRetryAfterError(err error, attempt int, providers []stri
if status := statusCodeFromError(err); status == http.StatusOK {
return 0, false
}
if isRequestInvalidError(err) {
return 0, false
}
wait, found := m.closestCooldownWait(providers, model, attempt)
if !found || wait > maxWait {
return 0, false
@@ -1299,7 +1311,7 @@ func updateAggregatedAvailability(auth *Auth, now time.Time) {
stateUnavailable = true
} else if state.Unavailable {
if state.NextRetryAfter.IsZero() {
stateUnavailable = true
stateUnavailable = false
} else if state.NextRetryAfter.After(now) {
stateUnavailable = true
if earliestRetry.IsZero() || state.NextRetryAfter.Before(earliestRetry) {
@@ -1430,6 +1442,21 @@ func statusCodeFromResult(err *Error) int {
return err.StatusCode()
}
// isRequestInvalidError returns true if the error represents a client request
// error that should not be retried. Specifically, it checks for 400 Bad Request
// with "invalid_request_error" in the message, indicating the request itself is
// malformed and switching to a different auth will not help.
func isRequestInvalidError(err error) bool {
if err == nil {
return false
}
status := statusCodeFromError(err)
if status != http.StatusBadRequest {
return false
}
return strings.Contains(err.Error(), "invalid_request_error")
}
func applyAuthFailureState(auth *Auth, resultErr *Error, retryAfter *time.Duration, now time.Time) {
if auth == nil {
return

View File

@@ -0,0 +1,61 @@
package auth
import (
"testing"
"time"
)
func TestUpdateAggregatedAvailability_UnavailableWithoutNextRetryDoesNotBlockAuth(t *testing.T) {
t.Parallel()
now := time.Now()
model := "test-model"
auth := &Auth{
ID: "a",
ModelStates: map[string]*ModelState{
model: {
Status: StatusError,
Unavailable: true,
},
},
}
updateAggregatedAvailability(auth, now)
if auth.Unavailable {
t.Fatalf("auth.Unavailable = true, want false")
}
if !auth.NextRetryAfter.IsZero() {
t.Fatalf("auth.NextRetryAfter = %v, want zero", auth.NextRetryAfter)
}
}
func TestUpdateAggregatedAvailability_FutureNextRetryBlocksAuth(t *testing.T) {
t.Parallel()
now := time.Now()
model := "test-model"
next := now.Add(5 * time.Minute)
auth := &Auth{
ID: "a",
ModelStates: map[string]*ModelState{
model: {
Status: StatusError,
Unavailable: true,
NextRetryAfter: next,
},
},
}
updateAggregatedAvailability(auth, now)
if !auth.Unavailable {
t.Fatalf("auth.Unavailable = false, want true")
}
if auth.NextRetryAfter.IsZero() {
t.Fatalf("auth.NextRetryAfter = zero, want %v", next)
}
if auth.NextRetryAfter.Sub(next) > time.Second || next.Sub(auth.NextRetryAfter) > time.Second {
t.Fatalf("auth.NextRetryAfter = %v, want %v", auth.NextRetryAfter, next)
}
}

View File

@@ -221,7 +221,7 @@ func modelAliasChannel(auth *Auth) string {
// and auth kind. Returns empty string if the provider/authKind combination doesn't support
// OAuth model alias (e.g., API key authentication).
//
// Supported channels: gemini-cli, vertex, aistudio, antigravity, claude, codex, qwen, iflow.
// Supported channels: gemini-cli, vertex, aistudio, antigravity, claude, codex, qwen, iflow, kimi.
func OAuthModelAliasChannel(provider, authKind string) string {
provider = strings.ToLower(strings.TrimSpace(provider))
authKind = strings.ToLower(strings.TrimSpace(authKind))
@@ -245,7 +245,7 @@ func OAuthModelAliasChannel(provider, authKind string) string {
return ""
}
return "codex"
case "gemini-cli", "aistudio", "antigravity", "qwen", "iflow":
case "gemini-cli", "aistudio", "antigravity", "qwen", "iflow", "kimi":
return provider
default:
return ""

View File

@@ -70,6 +70,15 @@ func TestResolveOAuthUpstreamModel_SuffixPreservation(t *testing.T) {
input: "gemini-2.5-pro(none)",
want: "gemini-2.5-pro-exp-03-25(none)",
},
{
name: "kimi suffix preserved",
aliases: map[string][]internalconfig.OAuthModelAlias{
"kimi": {{Name: "kimi-k2.5", Alias: "k2.5"}},
},
channel: "kimi",
input: "k2.5(high)",
want: "kimi-k2.5(high)",
},
{
name: "case insensitive alias lookup with suffix",
aliases: map[string][]internalconfig.OAuthModelAlias{
@@ -152,11 +161,21 @@ func createAuthForChannel(channel string) *Auth {
return &Auth{Provider: "qwen"}
case "iflow":
return &Auth{Provider: "iflow"}
case "kimi":
return &Auth{Provider: "kimi"}
default:
return &Auth{Provider: channel}
}
}
func TestOAuthModelAliasChannel_Kimi(t *testing.T) {
t.Parallel()
if got := OAuthModelAliasChannel("kimi", "oauth"); got != "kimi" {
t.Fatalf("OAuthModelAliasChannel() = %q, want %q", got, "kimi")
}
}
func TestApplyOAuthModelAlias_SuffixPreservation(t *testing.T) {
t.Parallel()

View File

@@ -12,6 +12,7 @@ import (
"sync"
"time"
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
)
@@ -19,6 +20,7 @@ import (
type RoundRobinSelector struct {
mu sync.Mutex
cursors map[string]int
maxKeys int
}
// FillFirstSelector selects the first available credential (deterministic ordering).
@@ -119,6 +121,19 @@ func authPriority(auth *Auth) int {
return parsed
}
func canonicalModelKey(model string) string {
model = strings.TrimSpace(model)
if model == "" {
return ""
}
parsed := thinking.ParseSuffix(model)
modelName := strings.TrimSpace(parsed.ModelName)
if modelName == "" {
return model
}
return modelName
}
func collectAvailableByPriority(auths []*Auth, model string, now time.Time) (available map[int][]*Auth, cooldownCount int, earliest time.Time) {
available = make(map[int][]*Auth)
for i := 0; i < len(auths); i++ {
@@ -185,11 +200,18 @@ func (s *RoundRobinSelector) Pick(ctx context.Context, provider, model string, o
if err != nil {
return nil, err
}
key := provider + ":" + model
key := provider + ":" + canonicalModelKey(model)
s.mu.Lock()
if s.cursors == nil {
s.cursors = make(map[string]int)
}
limit := s.maxKeys
if limit <= 0 {
limit = 4096
}
if _, ok := s.cursors[key]; !ok && len(s.cursors) >= limit {
s.cursors = make(map[string]int)
}
index := s.cursors[key]
if index >= 2_147_483_640 {
@@ -223,7 +245,14 @@ func isAuthBlockedForModel(auth *Auth, model string, now time.Time) (bool, block
}
if model != "" {
if len(auth.ModelStates) > 0 {
if state, ok := auth.ModelStates[model]; ok && state != nil {
state, ok := auth.ModelStates[model]
if (!ok || state == nil) && model != "" {
baseModel := canonicalModelKey(model)
if baseModel != "" && baseModel != model {
state, ok = auth.ModelStates[baseModel]
}
}
if ok && state != nil {
if state.Status == StatusDisabled {
return true, blockReasonDisabled, time.Time{}
}

View File

@@ -2,7 +2,9 @@ package auth
import (
"context"
"encoding/json"
"errors"
"net/http"
"sync"
"testing"
"time"
@@ -175,3 +177,228 @@ func TestRoundRobinSelectorPick_Concurrent(t *testing.T) {
default:
}
}
func TestSelectorPick_AllCooldownReturnsModelCooldownError(t *testing.T) {
t.Parallel()
model := "test-model"
now := time.Now()
next := now.Add(60 * time.Second)
auths := []*Auth{
{
ID: "a",
ModelStates: map[string]*ModelState{
model: {
Status: StatusActive,
Unavailable: true,
NextRetryAfter: next,
Quota: QuotaState{
Exceeded: true,
NextRecoverAt: next,
},
},
},
},
{
ID: "b",
ModelStates: map[string]*ModelState{
model: {
Status: StatusActive,
Unavailable: true,
NextRetryAfter: next,
Quota: QuotaState{
Exceeded: true,
NextRecoverAt: next,
},
},
},
},
}
t.Run("mixed provider redacts provider field", func(t *testing.T) {
t.Parallel()
selector := &FillFirstSelector{}
_, err := selector.Pick(context.Background(), "mixed", model, cliproxyexecutor.Options{}, auths)
if err == nil {
t.Fatalf("Pick() error = nil")
}
var mce *modelCooldownError
if !errors.As(err, &mce) {
t.Fatalf("Pick() error = %T, want *modelCooldownError", err)
}
if mce.StatusCode() != http.StatusTooManyRequests {
t.Fatalf("StatusCode() = %d, want %d", mce.StatusCode(), http.StatusTooManyRequests)
}
headers := mce.Headers()
if got := headers.Get("Retry-After"); got == "" {
t.Fatalf("Headers().Get(Retry-After) = empty")
}
var payload map[string]any
if err := json.Unmarshal([]byte(mce.Error()), &payload); err != nil {
t.Fatalf("json.Unmarshal(Error()) error = %v", err)
}
rawErr, ok := payload["error"].(map[string]any)
if !ok {
t.Fatalf("Error() payload missing error object: %v", payload)
}
if got, _ := rawErr["code"].(string); got != "model_cooldown" {
t.Fatalf("Error().error.code = %q, want %q", got, "model_cooldown")
}
if _, ok := rawErr["provider"]; ok {
t.Fatalf("Error().error.provider exists for mixed provider: %v", rawErr["provider"])
}
})
t.Run("non-mixed provider includes provider field", func(t *testing.T) {
t.Parallel()
selector := &FillFirstSelector{}
_, err := selector.Pick(context.Background(), "gemini", model, cliproxyexecutor.Options{}, auths)
if err == nil {
t.Fatalf("Pick() error = nil")
}
var mce *modelCooldownError
if !errors.As(err, &mce) {
t.Fatalf("Pick() error = %T, want *modelCooldownError", err)
}
var payload map[string]any
if err := json.Unmarshal([]byte(mce.Error()), &payload); err != nil {
t.Fatalf("json.Unmarshal(Error()) error = %v", err)
}
rawErr, ok := payload["error"].(map[string]any)
if !ok {
t.Fatalf("Error() payload missing error object: %v", payload)
}
if got, _ := rawErr["provider"].(string); got != "gemini" {
t.Fatalf("Error().error.provider = %q, want %q", got, "gemini")
}
})
}
func TestIsAuthBlockedForModel_UnavailableWithoutNextRetryIsNotBlocked(t *testing.T) {
t.Parallel()
now := time.Now()
model := "test-model"
auth := &Auth{
ID: "a",
ModelStates: map[string]*ModelState{
model: {
Status: StatusActive,
Unavailable: true,
Quota: QuotaState{
Exceeded: true,
},
},
},
}
blocked, reason, next := isAuthBlockedForModel(auth, model, now)
if blocked {
t.Fatalf("blocked = true, want false")
}
if reason != blockReasonNone {
t.Fatalf("reason = %v, want %v", reason, blockReasonNone)
}
if !next.IsZero() {
t.Fatalf("next = %v, want zero", next)
}
}
func TestFillFirstSelectorPick_ThinkingSuffixFallsBackToBaseModelState(t *testing.T) {
t.Parallel()
selector := &FillFirstSelector{}
now := time.Now()
baseModel := "test-model"
requestedModel := "test-model(high)"
high := &Auth{
ID: "high",
Attributes: map[string]string{"priority": "10"},
ModelStates: map[string]*ModelState{
baseModel: {
Status: StatusActive,
Unavailable: true,
NextRetryAfter: now.Add(30 * time.Minute),
Quota: QuotaState{
Exceeded: true,
},
},
},
}
low := &Auth{
ID: "low",
Attributes: map[string]string{"priority": "0"},
}
got, err := selector.Pick(context.Background(), "mixed", requestedModel, cliproxyexecutor.Options{}, []*Auth{high, low})
if err != nil {
t.Fatalf("Pick() error = %v", err)
}
if got == nil {
t.Fatalf("Pick() auth = nil")
}
if got.ID != "low" {
t.Fatalf("Pick() auth.ID = %q, want %q", got.ID, "low")
}
}
func TestRoundRobinSelectorPick_ThinkingSuffixSharesCursor(t *testing.T) {
t.Parallel()
selector := &RoundRobinSelector{}
auths := []*Auth{
{ID: "b"},
{ID: "a"},
}
first, err := selector.Pick(context.Background(), "gemini", "test-model(high)", cliproxyexecutor.Options{}, auths)
if err != nil {
t.Fatalf("Pick() first error = %v", err)
}
second, err := selector.Pick(context.Background(), "gemini", "test-model(low)", cliproxyexecutor.Options{}, auths)
if err != nil {
t.Fatalf("Pick() second error = %v", err)
}
if first == nil || second == nil {
t.Fatalf("Pick() returned nil auth")
}
if first.ID != "a" {
t.Fatalf("Pick() first auth.ID = %q, want %q", first.ID, "a")
}
if second.ID != "b" {
t.Fatalf("Pick() second auth.ID = %q, want %q", second.ID, "b")
}
}
func TestRoundRobinSelectorPick_CursorKeyCap(t *testing.T) {
t.Parallel()
selector := &RoundRobinSelector{maxKeys: 2}
auths := []*Auth{{ID: "a"}}
_, _ = selector.Pick(context.Background(), "gemini", "m1", cliproxyexecutor.Options{}, auths)
_, _ = selector.Pick(context.Background(), "gemini", "m2", cliproxyexecutor.Options{}, auths)
_, _ = selector.Pick(context.Background(), "gemini", "m3", cliproxyexecutor.Options{}, auths)
selector.mu.Lock()
defer selector.mu.Unlock()
if selector.cursors == nil {
t.Fatalf("selector.cursors = nil")
}
if len(selector.cursors) != 1 {
t.Fatalf("len(selector.cursors) = %d, want %d", len(selector.cursors), 1)
}
if _, ok := selector.cursors["gemini:m3"]; !ok {
t.Fatalf("selector.cursors missing key %q", "gemini:m3")
}
}

View File

@@ -1,195 +0,0 @@
package test
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
)
func TestLegacyConfigMigration(t *testing.T) {
t.Run("onlyLegacyFields", func(t *testing.T) {
path := writeConfig(t, `
port: 8080
generative-language-api-key:
- "legacy-gemini-1"
openai-compatibility:
- name: "legacy-provider"
base-url: "https://example.com"
api-keys:
- "legacy-openai-1"
amp-upstream-url: "https://amp.example.com"
amp-upstream-api-key: "amp-legacy-key"
amp-restrict-management-to-localhost: false
amp-model-mappings:
- from: "old-model"
to: "new-model"
`)
cfg, err := config.LoadConfig(path)
if err != nil {
t.Fatalf("load legacy config: %v", err)
}
if got := len(cfg.GeminiKey); got != 1 || cfg.GeminiKey[0].APIKey != "legacy-gemini-1" {
t.Fatalf("gemini migration mismatch: %+v", cfg.GeminiKey)
}
if got := len(cfg.OpenAICompatibility); got != 1 {
t.Fatalf("expected 1 openai-compat provider, got %d", got)
}
if entries := cfg.OpenAICompatibility[0].APIKeyEntries; len(entries) != 1 || entries[0].APIKey != "legacy-openai-1" {
t.Fatalf("openai-compat migration mismatch: %+v", entries)
}
if cfg.AmpCode.UpstreamURL != "https://amp.example.com" || cfg.AmpCode.UpstreamAPIKey != "amp-legacy-key" {
t.Fatalf("amp migration failed: %+v", cfg.AmpCode)
}
if cfg.AmpCode.RestrictManagementToLocalhost {
t.Fatalf("expected amp restriction to be false after migration")
}
if got := len(cfg.AmpCode.ModelMappings); got != 1 || cfg.AmpCode.ModelMappings[0].From != "old-model" {
t.Fatalf("amp mappings migration mismatch: %+v", cfg.AmpCode.ModelMappings)
}
updated := readFile(t, path)
if strings.Contains(updated, "generative-language-api-key") {
t.Fatalf("legacy gemini key still present:\n%s", updated)
}
if strings.Contains(updated, "amp-upstream-url") || strings.Contains(updated, "amp-restrict-management-to-localhost") {
t.Fatalf("legacy amp keys still present:\n%s", updated)
}
if strings.Contains(updated, "\n api-keys:") {
t.Fatalf("legacy openai compat keys still present:\n%s", updated)
}
})
t.Run("mixedLegacyAndNewFields", func(t *testing.T) {
path := writeConfig(t, `
gemini-api-key:
- api-key: "new-gemini"
generative-language-api-key:
- "new-gemini"
- "legacy-gemini-only"
openai-compatibility:
- name: "mixed-provider"
base-url: "https://mixed.example.com"
api-key-entries:
- api-key: "new-entry"
api-keys:
- "legacy-entry"
- "new-entry"
`)
cfg, err := config.LoadConfig(path)
if err != nil {
t.Fatalf("load mixed config: %v", err)
}
if got := len(cfg.GeminiKey); got != 2 {
t.Fatalf("expected 2 gemini entries, got %d: %+v", got, cfg.GeminiKey)
}
seen := make(map[string]struct{}, len(cfg.GeminiKey))
for _, entry := range cfg.GeminiKey {
if _, exists := seen[entry.APIKey]; exists {
t.Fatalf("duplicate gemini key %q after migration", entry.APIKey)
}
seen[entry.APIKey] = struct{}{}
}
provider := cfg.OpenAICompatibility[0]
if got := len(provider.APIKeyEntries); got != 2 {
t.Fatalf("expected 2 openai entries, got %d: %+v", got, provider.APIKeyEntries)
}
entrySeen := make(map[string]struct{}, len(provider.APIKeyEntries))
for _, entry := range provider.APIKeyEntries {
if _, ok := entrySeen[entry.APIKey]; ok {
t.Fatalf("duplicate openai key %q after migration", entry.APIKey)
}
entrySeen[entry.APIKey] = struct{}{}
}
})
t.Run("onlyNewFields", func(t *testing.T) {
path := writeConfig(t, `
gemini-api-key:
- api-key: "new-only"
openai-compatibility:
- name: "new-only-provider"
base-url: "https://new-only.example.com"
api-key-entries:
- api-key: "new-only-entry"
ampcode:
upstream-url: "https://amp.new"
upstream-api-key: "new-amp-key"
restrict-management-to-localhost: true
model-mappings:
- from: "a"
to: "b"
`)
cfg, err := config.LoadConfig(path)
if err != nil {
t.Fatalf("load new config: %v", err)
}
if len(cfg.GeminiKey) != 1 || cfg.GeminiKey[0].APIKey != "new-only" {
t.Fatalf("unexpected gemini entries: %+v", cfg.GeminiKey)
}
if len(cfg.OpenAICompatibility) != 1 || len(cfg.OpenAICompatibility[0].APIKeyEntries) != 1 {
t.Fatalf("unexpected openai compat entries: %+v", cfg.OpenAICompatibility)
}
if cfg.AmpCode.UpstreamURL != "https://amp.new" || cfg.AmpCode.UpstreamAPIKey != "new-amp-key" {
t.Fatalf("unexpected amp config: %+v", cfg.AmpCode)
}
})
t.Run("duplicateNamesDifferentBase", func(t *testing.T) {
path := writeConfig(t, `
openai-compatibility:
- name: "dup-provider"
base-url: "https://provider-a"
api-keys:
- "key-a"
- name: "dup-provider"
base-url: "https://provider-b"
api-keys:
- "key-b"
`)
cfg, err := config.LoadConfig(path)
if err != nil {
t.Fatalf("load duplicate config: %v", err)
}
if len(cfg.OpenAICompatibility) != 2 {
t.Fatalf("expected 2 providers, got %d", len(cfg.OpenAICompatibility))
}
for _, entry := range cfg.OpenAICompatibility {
if len(entry.APIKeyEntries) != 1 {
t.Fatalf("expected 1 key entry per provider: %+v", entry)
}
switch entry.BaseURL {
case "https://provider-a":
if entry.APIKeyEntries[0].APIKey != "key-a" {
t.Fatalf("provider-a key mismatch: %+v", entry.APIKeyEntries)
}
case "https://provider-b":
if entry.APIKeyEntries[0].APIKey != "key-b" {
t.Fatalf("provider-b key mismatch: %+v", entry.APIKeyEntries)
}
default:
t.Fatalf("unexpected provider base url: %s", entry.BaseURL)
}
}
})
}
func writeConfig(t *testing.T, content string) string {
t.Helper()
dir := t.TempDir()
path := filepath.Join(dir, "config.yaml")
if err := os.WriteFile(path, []byte(strings.TrimSpace(content)+"\n"), 0o644); err != nil {
t.Fatalf("write temp config: %v", err)
}
return path
}
func readFile(t *testing.T, path string) string {
t.Helper()
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("read temp config: %v", err)
}
return string(data)
}