feat(antigravity): add logging for cached token setting errors in responses

This commit is contained in:
evann
2025-12-19 16:49:50 +07:00
parent 404546ce93
commit bc6c4cdbfc
3 changed files with 29 additions and 5 deletions

View File

@@ -14,6 +14,7 @@ import (
"sync/atomic"
"time"
log "github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
@@ -104,7 +105,11 @@ func ConvertGeminiResponseToOpenAI(_ context.Context, _ string, originalRequestR
}
// Include cached token count if present (indicates prompt caching is working)
if cachedTokenCount > 0 {
template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
var err error
template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
if err != nil {
log.Warnf("gemini openai response: failed to set cached_tokens in streaming: %v", err)
}
}
}
@@ -260,7 +265,11 @@ func ConvertGeminiResponseToOpenAINonStream(_ context.Context, _ string, origina
}
// Include cached token count if present (indicates prompt caching is working)
if cachedTokenCount > 0 {
template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
var err error
template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
if err != nil {
log.Warnf("gemini openai response: failed to set cached_tokens in non-streaming: %v", err)
}
}
}