From bc6c4cdbfc68cecbf426742365f608af9be2c7d2 Mon Sep 17 00:00:00 2001 From: evann Date: Fri, 19 Dec 2025 16:49:50 +0700 Subject: [PATCH] feat(antigravity): add logging for cached token setting errors in responses --- .../claude/antigravity_claude_response.go | 13 +++++++++++-- .../chat-completions/antigravity_openai_response.go | 8 +++++++- .../chat-completions/gemini_openai_response.go | 13 +++++++++++-- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/internal/translator/antigravity/claude/antigravity_claude_response.go b/internal/translator/antigravity/claude/antigravity_claude_response.go index 30d0b164..bb06eba9 100644 --- a/internal/translator/antigravity/claude/antigravity_claude_response.go +++ b/internal/translator/antigravity/claude/antigravity_claude_response.go @@ -14,6 +14,7 @@ import ( "sync/atomic" "time" + log "github.com/sirupsen/logrus" "github.com/tidwall/gjson" "github.com/tidwall/sjson" ) @@ -306,7 +307,11 @@ func appendFinalEvents(params *Params, output *string, force bool) { delta := fmt.Sprintf(`{"type":"message_delta","delta":{"stop_reason":"%s","stop_sequence":null},"usage":{"input_tokens":%d,"output_tokens":%d}}`, stopReason, params.PromptTokenCount, usageOutputTokens) // Add cache_read_input_tokens if cached tokens are present (indicates prompt caching is working) if params.CachedTokenCount > 0 { - delta, _ = sjson.Set(delta, "usage.cache_read_input_tokens", params.CachedTokenCount) + var err error + delta, err = sjson.Set(delta, "usage.cache_read_input_tokens", params.CachedTokenCount) + if err != nil { + log.Warnf("antigravity claude response: failed to set cache_read_input_tokens: %v", err) + } } *output = *output + delta + "\n\n\n" @@ -363,7 +368,11 @@ func ConvertAntigravityResponseToClaudeNonStream(_ context.Context, _ string, or responseJSON, _ = sjson.Set(responseJSON, "usage.output_tokens", outputTokens) // Add cache_read_input_tokens if cached tokens are present (indicates prompt caching is working) if cachedTokens > 0 { - responseJSON, _ = sjson.Set(responseJSON, "usage.cache_read_input_tokens", cachedTokens) + var err error + responseJSON, err = sjson.Set(responseJSON, "usage.cache_read_input_tokens", cachedTokens) + if err != nil { + log.Warnf("antigravity claude response: failed to set cache_read_input_tokens: %v", err) + } } contentArrayInitialized := false diff --git a/internal/translator/antigravity/openai/chat-completions/antigravity_openai_response.go b/internal/translator/antigravity/openai/chat-completions/antigravity_openai_response.go index 59a08621..f9f5dea4 100644 --- a/internal/translator/antigravity/openai/chat-completions/antigravity_openai_response.go +++ b/internal/translator/antigravity/openai/chat-completions/antigravity_openai_response.go @@ -14,6 +14,8 @@ import ( "sync/atomic" "time" + log "github.com/sirupsen/logrus" + . "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/chat-completions" "github.com/tidwall/gjson" "github.com/tidwall/sjson" @@ -101,7 +103,11 @@ func ConvertAntigravityResponseToOpenAI(_ context.Context, _ string, originalReq } // Include cached token count if present (indicates prompt caching is working) if cachedTokenCount > 0 { - template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount) + var err error + template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount) + if err != nil { + log.Warnf("antigravity openai response: failed to set cached_tokens: %v", err) + } } } diff --git a/internal/translator/gemini/openai/chat-completions/gemini_openai_response.go b/internal/translator/gemini/openai/chat-completions/gemini_openai_response.go index e0ce4636..b2a44e9e 100644 --- a/internal/translator/gemini/openai/chat-completions/gemini_openai_response.go +++ b/internal/translator/gemini/openai/chat-completions/gemini_openai_response.go @@ -14,6 +14,7 @@ import ( "sync/atomic" "time" + log "github.com/sirupsen/logrus" "github.com/tidwall/gjson" "github.com/tidwall/sjson" ) @@ -104,7 +105,11 @@ func ConvertGeminiResponseToOpenAI(_ context.Context, _ string, originalRequestR } // Include cached token count if present (indicates prompt caching is working) if cachedTokenCount > 0 { - template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount) + var err error + template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount) + if err != nil { + log.Warnf("gemini openai response: failed to set cached_tokens in streaming: %v", err) + } } } @@ -260,7 +265,11 @@ func ConvertGeminiResponseToOpenAINonStream(_ context.Context, _ string, origina } // Include cached token count if present (indicates prompt caching is working) if cachedTokenCount > 0 { - template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount) + var err error + template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount) + if err != nil { + log.Warnf("gemini openai response: failed to set cached_tokens in non-streaming: %v", err) + } } }