fix(antigravity): preserve finish_reason tool_calls across streaming chunks

When streaming responses with tool calls, the finish_reason was being
overwritten. The upstream sends functionCall in chunk 1, then
finishReason: STOP in chunk 2. The old code would set finish_reason
from every chunk, causing "tool_calls" to be overwritten by "stop".

This broke clients like Claude Code that rely on finish_reason to
detect when tool calls are complete.

Changes:
- Add SawToolCall bool to track tool calls across entire stream
- Add UpstreamFinishReason to cache the finish reason
- Only emit finish_reason on final chunk (has both finishReason + usage)
- Priority: tool_calls > max_tokens > stop

Includes 5 unit tests covering:
- Tool calls not overwritten by subsequent STOP
- Normal text gets "stop"
- MAX_TOKENS without tool calls gets "max_tokens"
- Tool calls take priority over MAX_TOKENS
- Intermediate chunks have no finish_reason

Fixes streaming tool call detection for Claude Code + Gemini models.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
MohammadErfan Jabbari
2026-01-05 18:45:25 +01:00
parent 99478d13a8
commit fe6043aec7
2 changed files with 154 additions and 10 deletions

View File

@@ -23,6 +23,8 @@ import (
type convertCliResponseToOpenAIChatParams struct {
UnixTimestamp int64
FunctionIndex int
SawToolCall bool // Tracks if any tool call was seen in the entire stream
UpstreamFinishReason string // Caches the upstream finish reason for final chunk
}
// functionCallIDCounter provides a process-wide unique counter for function call identifiers.
@@ -78,10 +80,9 @@ func ConvertAntigravityResponseToOpenAI(_ context.Context, _ string, originalReq
template, _ = sjson.Set(template, "id", responseIDResult.String())
}
// Extract and set the finish reason.
// Cache the finish reason - do NOT set it in output yet (will be set on final chunk)
if finishReasonResult := gjson.GetBytes(rawJSON, "response.candidates.0.finishReason"); finishReasonResult.Exists() {
template, _ = sjson.Set(template, "choices.0.finish_reason", strings.ToLower(finishReasonResult.String()))
template, _ = sjson.Set(template, "choices.0.native_finish_reason", strings.ToLower(finishReasonResult.String()))
(*param).(*convertCliResponseToOpenAIChatParams).UpstreamFinishReason = strings.ToUpper(finishReasonResult.String())
}
// Extract and set usage metadata (token counts).
@@ -102,7 +103,6 @@ func ConvertAntigravityResponseToOpenAI(_ context.Context, _ string, originalReq
// Process the main content part of the response.
partsResult := gjson.GetBytes(rawJSON, "response.candidates.0.content.parts")
hasFunctionCall := false
if partsResult.IsArray() {
partResults := partsResult.Array()
for i := 0; i < len(partResults); i++ {
@@ -138,7 +138,7 @@ func ConvertAntigravityResponseToOpenAI(_ context.Context, _ string, originalReq
template, _ = sjson.Set(template, "choices.0.delta.role", "assistant")
} else if functionCallResult.Exists() {
// Handle function call content.
hasFunctionCall = true
(*param).(*convertCliResponseToOpenAIChatParams).SawToolCall = true // Persist across chunks
toolCallsResult := gjson.Get(template, "choices.0.delta.tool_calls")
functionCallIndex := (*param).(*convertCliResponseToOpenAIChatParams).FunctionIndex
(*param).(*convertCliResponseToOpenAIChatParams).FunctionIndex++
@@ -190,9 +190,25 @@ func ConvertAntigravityResponseToOpenAI(_ context.Context, _ string, originalReq
}
}
if hasFunctionCall {
template, _ = sjson.Set(template, "choices.0.finish_reason", "tool_calls")
template, _ = sjson.Set(template, "choices.0.native_finish_reason", "tool_calls")
// Determine finish_reason only on the final chunk (has both finishReason and usage metadata)
params := (*param).(*convertCliResponseToOpenAIChatParams)
upstreamFinishReason := params.UpstreamFinishReason
sawToolCall := params.SawToolCall
usageExists := gjson.GetBytes(rawJSON, "response.usageMetadata").Exists()
isFinalChunk := upstreamFinishReason != "" && usageExists
if isFinalChunk {
var finishReason string
if sawToolCall {
finishReason = "tool_calls"
} else if upstreamFinishReason == "MAX_TOKENS" {
finishReason = "max_tokens"
} else {
finishReason = "stop"
}
template, _ = sjson.Set(template, "choices.0.finish_reason", finishReason)
template, _ = sjson.Set(template, "choices.0.native_finish_reason", strings.ToLower(upstreamFinishReason))
}
return []string{template}

View File

@@ -0,0 +1,128 @@
package chat_completions
import (
"context"
"testing"
"github.com/tidwall/gjson"
)
func TestFinishReasonToolCallsNotOverwritten(t *testing.T) {
ctx := context.Background()
var param any
// Chunk 1: Contains functionCall - should set SawToolCall = true
chunk1 := []byte(`{"response":{"candidates":[{"content":{"parts":[{"functionCall":{"name":"list_files","args":{"path":"."}}}]}}]}}`)
result1 := ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk1, &param)
// Verify chunk1 has no finish_reason (null)
if len(result1) != 1 {
t.Fatalf("Expected 1 result from chunk1, got %d", len(result1))
}
fr1 := gjson.Get(result1[0], "choices.0.finish_reason")
if fr1.Exists() && fr1.String() != "" && fr1.Type.String() != "Null" {
t.Errorf("Expected finish_reason to be null in chunk1, got: %v", fr1.String())
}
// Chunk 2: Contains finishReason STOP + usage (final chunk, no functionCall)
// This simulates what the upstream sends AFTER the tool call chunk
chunk2 := []byte(`{"response":{"candidates":[{"finishReason":"STOP"}],"usageMetadata":{"promptTokenCount":10,"candidatesTokenCount":20,"totalTokenCount":30}}}`)
result2 := ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk2, &param)
// Verify chunk2 has finish_reason: "tool_calls" (not "stop")
if len(result2) != 1 {
t.Fatalf("Expected 1 result from chunk2, got %d", len(result2))
}
fr2 := gjson.Get(result2[0], "choices.0.finish_reason").String()
if fr2 != "tool_calls" {
t.Errorf("Expected finish_reason 'tool_calls', got: %s", fr2)
}
// Verify native_finish_reason is lowercase upstream value
nfr2 := gjson.Get(result2[0], "choices.0.native_finish_reason").String()
if nfr2 != "stop" {
t.Errorf("Expected native_finish_reason 'stop', got: %s", nfr2)
}
}
func TestFinishReasonStopForNormalText(t *testing.T) {
ctx := context.Background()
var param any
// Chunk 1: Text content only
chunk1 := []byte(`{"response":{"candidates":[{"content":{"parts":[{"text":"Hello world"}]}}]}}`)
ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk1, &param)
// Chunk 2: Final chunk with STOP
chunk2 := []byte(`{"response":{"candidates":[{"finishReason":"STOP"}],"usageMetadata":{"promptTokenCount":10,"candidatesTokenCount":5,"totalTokenCount":15}}}`)
result2 := ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk2, &param)
// Verify finish_reason is "stop" (no tool calls were made)
fr := gjson.Get(result2[0], "choices.0.finish_reason").String()
if fr != "stop" {
t.Errorf("Expected finish_reason 'stop', got: %s", fr)
}
}
func TestFinishReasonMaxTokens(t *testing.T) {
ctx := context.Background()
var param any
// Chunk 1: Text content
chunk1 := []byte(`{"response":{"candidates":[{"content":{"parts":[{"text":"Hello"}]}}]}}`)
ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk1, &param)
// Chunk 2: Final chunk with MAX_TOKENS
chunk2 := []byte(`{"response":{"candidates":[{"finishReason":"MAX_TOKENS"}],"usageMetadata":{"promptTokenCount":10,"candidatesTokenCount":100,"totalTokenCount":110}}}`)
result2 := ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk2, &param)
// Verify finish_reason is "max_tokens"
fr := gjson.Get(result2[0], "choices.0.finish_reason").String()
if fr != "max_tokens" {
t.Errorf("Expected finish_reason 'max_tokens', got: %s", fr)
}
}
func TestToolCallTakesPriorityOverMaxTokens(t *testing.T) {
ctx := context.Background()
var param any
// Chunk 1: Contains functionCall
chunk1 := []byte(`{"response":{"candidates":[{"content":{"parts":[{"functionCall":{"name":"test","args":{}}}]}}]}}`)
ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk1, &param)
// Chunk 2: Final chunk with MAX_TOKENS (but we had a tool call, so tool_calls should win)
chunk2 := []byte(`{"response":{"candidates":[{"finishReason":"MAX_TOKENS"}],"usageMetadata":{"promptTokenCount":10,"candidatesTokenCount":100,"totalTokenCount":110}}}`)
result2 := ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk2, &param)
// Verify finish_reason is "tool_calls" (takes priority over max_tokens)
fr := gjson.Get(result2[0], "choices.0.finish_reason").String()
if fr != "tool_calls" {
t.Errorf("Expected finish_reason 'tool_calls', got: %s", fr)
}
}
func TestNoFinishReasonOnIntermediateChunks(t *testing.T) {
ctx := context.Background()
var param any
// Chunk 1: Text content (no finish reason, no usage)
chunk1 := []byte(`{"response":{"candidates":[{"content":{"parts":[{"text":"Hello"}]}}]}}`)
result1 := ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk1, &param)
// Verify no finish_reason on intermediate chunk
fr1 := gjson.Get(result1[0], "choices.0.finish_reason")
if fr1.Exists() && fr1.String() != "" && fr1.Type.String() != "Null" {
t.Errorf("Expected no finish_reason on intermediate chunk, got: %v", fr1)
}
// Chunk 2: More text (no finish reason, no usage)
chunk2 := []byte(`{"response":{"candidates":[{"content":{"parts":[{"text":" world"}]}}]}}`)
result2 := ConvertAntigravityResponseToOpenAI(ctx, "model", nil, nil, chunk2, &param)
// Verify no finish_reason on intermediate chunk
fr2 := gjson.Get(result2[0], "choices.0.finish_reason")
if fr2.Exists() && fr2.String() != "" && fr2.Type.String() != "Null" {
t.Errorf("Expected no finish_reason on intermediate chunk, got: %v", fr2)
}
}