mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-03 04:50:52 +08:00
feat(antigravity): add logging for cached token setting errors in responses
This commit is contained in:
@@ -14,6 +14,7 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
"github.com/tidwall/sjson"
|
"github.com/tidwall/sjson"
|
||||||
)
|
)
|
||||||
@@ -306,7 +307,11 @@ func appendFinalEvents(params *Params, output *string, force bool) {
|
|||||||
delta := fmt.Sprintf(`{"type":"message_delta","delta":{"stop_reason":"%s","stop_sequence":null},"usage":{"input_tokens":%d,"output_tokens":%d}}`, stopReason, params.PromptTokenCount, usageOutputTokens)
|
delta := fmt.Sprintf(`{"type":"message_delta","delta":{"stop_reason":"%s","stop_sequence":null},"usage":{"input_tokens":%d,"output_tokens":%d}}`, stopReason, params.PromptTokenCount, usageOutputTokens)
|
||||||
// Add cache_read_input_tokens if cached tokens are present (indicates prompt caching is working)
|
// Add cache_read_input_tokens if cached tokens are present (indicates prompt caching is working)
|
||||||
if params.CachedTokenCount > 0 {
|
if params.CachedTokenCount > 0 {
|
||||||
delta, _ = sjson.Set(delta, "usage.cache_read_input_tokens", params.CachedTokenCount)
|
var err error
|
||||||
|
delta, err = sjson.Set(delta, "usage.cache_read_input_tokens", params.CachedTokenCount)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("antigravity claude response: failed to set cache_read_input_tokens: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
*output = *output + delta + "\n\n\n"
|
*output = *output + delta + "\n\n\n"
|
||||||
|
|
||||||
@@ -363,7 +368,11 @@ func ConvertAntigravityResponseToClaudeNonStream(_ context.Context, _ string, or
|
|||||||
responseJSON, _ = sjson.Set(responseJSON, "usage.output_tokens", outputTokens)
|
responseJSON, _ = sjson.Set(responseJSON, "usage.output_tokens", outputTokens)
|
||||||
// Add cache_read_input_tokens if cached tokens are present (indicates prompt caching is working)
|
// Add cache_read_input_tokens if cached tokens are present (indicates prompt caching is working)
|
||||||
if cachedTokens > 0 {
|
if cachedTokens > 0 {
|
||||||
responseJSON, _ = sjson.Set(responseJSON, "usage.cache_read_input_tokens", cachedTokens)
|
var err error
|
||||||
|
responseJSON, err = sjson.Set(responseJSON, "usage.cache_read_input_tokens", cachedTokens)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("antigravity claude response: failed to set cache_read_input_tokens: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
contentArrayInitialized := false
|
contentArrayInitialized := false
|
||||||
|
|||||||
@@ -14,6 +14,8 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/chat-completions"
|
. "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/chat-completions"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
"github.com/tidwall/sjson"
|
"github.com/tidwall/sjson"
|
||||||
@@ -101,7 +103,11 @@ func ConvertAntigravityResponseToOpenAI(_ context.Context, _ string, originalReq
|
|||||||
}
|
}
|
||||||
// Include cached token count if present (indicates prompt caching is working)
|
// Include cached token count if present (indicates prompt caching is working)
|
||||||
if cachedTokenCount > 0 {
|
if cachedTokenCount > 0 {
|
||||||
template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
|
var err error
|
||||||
|
template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("antigravity openai response: failed to set cached_tokens: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
"github.com/tidwall/sjson"
|
"github.com/tidwall/sjson"
|
||||||
)
|
)
|
||||||
@@ -104,7 +105,11 @@ func ConvertGeminiResponseToOpenAI(_ context.Context, _ string, originalRequestR
|
|||||||
}
|
}
|
||||||
// Include cached token count if present (indicates prompt caching is working)
|
// Include cached token count if present (indicates prompt caching is working)
|
||||||
if cachedTokenCount > 0 {
|
if cachedTokenCount > 0 {
|
||||||
template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
|
var err error
|
||||||
|
template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("gemini openai response: failed to set cached_tokens in streaming: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -260,7 +265,11 @@ func ConvertGeminiResponseToOpenAINonStream(_ context.Context, _ string, origina
|
|||||||
}
|
}
|
||||||
// Include cached token count if present (indicates prompt caching is working)
|
// Include cached token count if present (indicates prompt caching is working)
|
||||||
if cachedTokenCount > 0 {
|
if cachedTokenCount > 0 {
|
||||||
template, _ = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
|
var err error
|
||||||
|
template, err = sjson.Set(template, "usage.prompt_tokens_details.cached_tokens", cachedTokenCount)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("gemini openai response: failed to set cached_tokens in non-streaming: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user