feat(antigravity): add logging for cached token setting errors in responses

This commit is contained in:
evann
2025-12-19 16:49:50 +07:00
parent 404546ce93
commit bc6c4cdbfc
3 changed files with 29 additions and 5 deletions

View File

@@ -14,6 +14,8 @@ import (
"sync/atomic"
"time"
log "github.com/sirupsen/logrus"
. "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/chat-completions"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
@@ -101,7 +103,11 @@ func ConvertAntigravityResponseToOpenAI(_ context.Context, _ string, originalReq
}
// Include cached token count if present (indicates prompt caching is working)
if cachedTokenCount > 0 {
template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
var err error
template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
if err != nil {
log.Warnf("antigravity openai response: failed to set cached_tokens: %v", err)
}
}
}