feat(antigravity): add logging for cached token setting errors in responses

This commit is contained in:
evann
2025-12-19 16:49:50 +07:00
parent 404546ce93
commit bc6c4cdbfc
3 changed files with 29 additions and 5 deletions

View File

@@ -14,6 +14,7 @@ import (
"sync/atomic"
"time"
log "github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
@@ -306,7 +307,11 @@ func appendFinalEvents(params *Params, output *string, force bool) {
delta := fmt.Sprintf(`{"type":"message_delta","delta":{"stop_reason":"%s","stop_sequence":null},"usage":{"input_tokens":%d,"output_tokens":%d}}`, stopReason, params.PromptTokenCount, usageOutputTokens)
// Add cache_read_input_tokens if cached tokens are present (indicates prompt caching is working)
if params.CachedTokenCount > 0 {
delta, _ = sjson.Set(delta, "usage.cache_read_input_tokens", params.CachedTokenCount)
var err error
delta, err = sjson.Set(delta, "usage.cache_read_input_tokens", params.CachedTokenCount)
if err != nil {
log.Warnf("antigravity claude response: failed to set cache_read_input_tokens: %v", err)
}
}
*output = *output + delta + "\n\n\n"
@@ -363,7 +368,11 @@ func ConvertAntigravityResponseToClaudeNonStream(_ context.Context, _ string, or
responseJSON, _ = sjson.Set(responseJSON, "usage.output_tokens", outputTokens)
// Add cache_read_input_tokens if cached tokens are present (indicates prompt caching is working)
if cachedTokens > 0 {
responseJSON, _ = sjson.Set(responseJSON, "usage.cache_read_input_tokens", cachedTokens)
var err error
responseJSON, err = sjson.Set(responseJSON, "usage.cache_read_input_tokens", cachedTokens)
if err != nil {
log.Warnf("antigravity claude response: failed to set cache_read_input_tokens: %v", err)
}
}
contentArrayInitialized := false

View File

@@ -14,6 +14,8 @@ import (
"sync/atomic"
"time"
log "github.com/sirupsen/logrus"
. "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/chat-completions"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
@@ -101,7 +103,11 @@ func ConvertAntigravityResponseToOpenAI(_ context.Context, _ string, originalReq
}
// Include cached token count if present (indicates prompt caching is working)
if cachedTokenCount > 0 {
template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
var err error
template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
if err != nil {
log.Warnf("antigravity openai response: failed to set cached_tokens: %v", err)
}
}
}