revert: translator changes (path guard)

CI blocks PRs that modify internal/translator. Revert translator edits and keep only the /v1/responses streaming error-chunk fix; file an issue for translator conformance work.
This commit is contained in:
canxin121
2026-02-23 13:30:43 +08:00
parent 5382764d8a
commit eb7571936c
6 changed files with 36 additions and 196 deletions

View File

@@ -109,7 +109,6 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
created, _ = sjson.Set(created, "sequence_number", nextSeq())
created, _ = sjson.Set(created, "response.id", st.ResponseID)
created, _ = sjson.Set(created, "response.created_at", st.CreatedAt)
created, _ = sjson.Set(created, "response.model", modelName)
out = append(out, emitEvent("response.created", created))
// response.in_progress
inprog := `{"type":"response.in_progress","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress"}}`
@@ -413,6 +412,8 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
if st.ReasoningBuf.Len() > 0 {
reasoningTokens = int64(st.ReasoningBuf.Len() / 4)
}
usagePresent := st.UsageSeen || reasoningTokens > 0
if usagePresent {
completed, _ = sjson.Set(completed, "response.usage.input_tokens", st.InputTokens)
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", 0)
completed, _ = sjson.Set(completed, "response.usage.output_tokens", st.OutputTokens)
@@ -420,7 +421,10 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", reasoningTokens)
}
total := st.InputTokens + st.OutputTokens
if total > 0 || st.UsageSeen {
completed, _ = sjson.Set(completed, "response.usage.total_tokens", total)
}
}
out = append(out, emitEvent("response.completed", completed))
}

View File

@@ -1,67 +0,0 @@
package responses
import (
"context"
"strings"
"testing"
"github.com/tidwall/gjson"
)
func parseSSEEvent(t *testing.T, chunk string) (string, gjson.Result) {
t.Helper()
lines := strings.Split(chunk, "\n")
if len(lines) < 2 {
t.Fatalf("unexpected SSE chunk: %q", chunk)
}
event := strings.TrimSpace(strings.TrimPrefix(lines[0], "event:"))
dataLine := strings.TrimSpace(strings.TrimPrefix(lines[1], "data:"))
if !gjson.Valid(dataLine) {
t.Fatalf("invalid SSE data JSON: %q", dataLine)
}
return event, gjson.Parse(dataLine)
}
func TestConvertClaudeResponseToOpenAIResponses_CreatedHasModelAndCompletedHasUsage(t *testing.T) {
in := []string{
`data: {"type":"message_start","message":{"id":"msg_1"}}`,
`data: {"type":"message_stop"}`,
}
var param any
var out []string
for _, line := range in {
out = append(out, ConvertClaudeResponseToOpenAIResponses(context.Background(), "test-model", nil, nil, []byte(line), &param)...)
}
gotCreated := false
gotCompleted := false
createdModel := ""
for _, chunk := range out {
ev, data := parseSSEEvent(t, chunk)
switch ev {
case "response.created":
gotCreated = true
createdModel = data.Get("response.model").String()
case "response.completed":
gotCompleted = true
if !data.Get("response.usage.input_tokens").Exists() {
t.Fatalf("response.completed missing usage.input_tokens: %s", data.Raw)
}
if !data.Get("response.usage.output_tokens").Exists() {
t.Fatalf("response.completed missing usage.output_tokens: %s", data.Raw)
}
}
}
if !gotCreated {
t.Fatalf("missing response.created event")
}
if createdModel != "test-model" {
t.Fatalf("unexpected response.created model: got %q", createdModel)
}
if !gotCompleted {
t.Fatalf("missing response.completed event")
}
}

View File

@@ -212,7 +212,6 @@ func ConvertGeminiResponseToOpenAIResponses(_ context.Context, modelName string,
created, _ = sjson.Set(created, "sequence_number", nextSeq())
created, _ = sjson.Set(created, "response.id", st.ResponseID)
created, _ = sjson.Set(created, "response.created_at", st.CreatedAt)
created, _ = sjson.Set(created, "response.model", modelName)
out = append(out, emitEvent("response.created", created))
inprog := `{"type":"response.in_progress","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress"}}`
@@ -530,36 +529,31 @@ func ConvertGeminiResponseToOpenAIResponses(_ context.Context, modelName string,
completed, _ = sjson.SetRaw(completed, "response.output", gjson.Get(outputsWrapper, "arr").Raw)
}
input := int64(0)
cached := int64(0)
output := int64(0)
reasoning := int64(0)
total := int64(0)
// usage mapping
if um := root.Get("usageMetadata"); um.Exists() {
// input tokens = prompt + thoughts
input = um.Get("promptTokenCount").Int() + um.Get("thoughtsTokenCount").Int()
input := um.Get("promptTokenCount").Int() + um.Get("thoughtsTokenCount").Int()
completed, _ = sjson.Set(completed, "response.usage.input_tokens", input)
// cached token details: align with OpenAI "cached_tokens" semantics.
cached = um.Get("cachedContentTokenCount").Int()
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", um.Get("cachedContentTokenCount").Int())
// output tokens
if v := um.Get("candidatesTokenCount"); v.Exists() {
output = v.Int()
completed, _ = sjson.Set(completed, "response.usage.output_tokens", v.Int())
} else {
completed, _ = sjson.Set(completed, "response.usage.output_tokens", 0)
}
if v := um.Get("thoughtsTokenCount"); v.Exists() {
reasoning = v.Int()
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", v.Int())
} else {
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", 0)
}
if v := um.Get("totalTokenCount"); v.Exists() {
total = v.Int()
completed, _ = sjson.Set(completed, "response.usage.total_tokens", v.Int())
} else {
total = input + output
completed, _ = sjson.Set(completed, "response.usage.total_tokens", 0)
}
}
completed, _ = sjson.Set(completed, "response.usage.input_tokens", input)
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", cached)
completed, _ = sjson.Set(completed, "response.usage.output_tokens", output)
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", reasoning)
completed, _ = sjson.Set(completed, "response.usage.total_tokens", total)
out = append(out, emitEvent("response.completed", completed))
}

View File

@@ -53,7 +53,6 @@ func TestConvertGeminiResponseToOpenAIResponses_UnwrapAndAggregateText(t *testin
textDone string
messageText string
responseID string
createdModel string
instructions string
cachedTokens int64
@@ -69,8 +68,6 @@ func TestConvertGeminiResponseToOpenAIResponses_UnwrapAndAggregateText(t *testin
for i, chunk := range out {
ev, data := parseSSEEvent(t, chunk)
switch ev {
case "response.created":
createdModel = data.Get("response.model").String()
case "response.output_text.done":
gotTextDone = true
if posTextDone == -1 {
@@ -135,9 +132,6 @@ func TestConvertGeminiResponseToOpenAIResponses_UnwrapAndAggregateText(t *testin
if responseID != "resp_req_vrtx_1" {
t.Fatalf("unexpected response id: got %q", responseID)
}
if createdModel != "test-model" {
t.Fatalf("unexpected response.created model: got %q", createdModel)
}
if instructions != "test instructions" {
t.Fatalf("unexpected instructions echo: got %q", instructions)
}
@@ -159,31 +153,6 @@ func TestConvertGeminiResponseToOpenAIResponses_UnwrapAndAggregateText(t *testin
}
}
func TestConvertGeminiResponseToOpenAIResponses_CompletedAlwaysHasUsage(t *testing.T) {
in := `data: {"response":{"candidates":[{"content":{"role":"model","parts":[{"text":"hi"}]},"finishReason":"STOP"}],"modelVersion":"test-model","responseId":"req_no_usage"},"traceId":"t1"}`
var param any
out := ConvertGeminiResponseToOpenAIResponses(context.Background(), "test-model", nil, nil, []byte(in), &param)
gotCompleted := false
for _, chunk := range out {
ev, data := parseSSEEvent(t, chunk)
if ev != "response.completed" {
continue
}
gotCompleted = true
if !data.Get("response.usage.input_tokens").Exists() {
t.Fatalf("response.completed missing usage.input_tokens: %s", data.Raw)
}
if !data.Get("response.usage.output_tokens").Exists() {
t.Fatalf("response.completed missing usage.output_tokens: %s", data.Raw)
}
}
if !gotCompleted {
t.Fatalf("missing response.completed event")
}
}
func TestConvertGeminiResponseToOpenAIResponses_ReasoningEncryptedContent(t *testing.T) {
sig := "RXE0RENrZ0lDeEFDR0FJcVFOZDdjUzlleGFuRktRdFcvSzNyZ2MvWDNCcDQ4RmxSbGxOWUlOVU5kR1l1UHMrMGdkMVp0Vkg3ekdKU0g4YVljc2JjN3lNK0FrdGpTNUdqamI4T3Z0VVNETzdQd3pmcFhUOGl3U3hXUEJvTVFRQ09mWTFyMEtTWGZxUUlJakFqdmFGWk83RW1XRlBKckJVOVpkYzdDKw=="
in := []string{

View File

@@ -153,7 +153,6 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
created, _ = sjson.Set(created, "sequence_number", nextSeq())
created, _ = sjson.Set(created, "response.id", st.ResponseID)
created, _ = sjson.Set(created, "response.created_at", st.Created)
created, _ = sjson.Set(created, "response.model", modelName)
out = append(out, emitRespEvent("response.created", created))
inprog := `{"type":"response.in_progress","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress"}}`
@@ -579,6 +578,7 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
if gjson.Get(outputsWrapper, "arr.#").Int() > 0 {
completed, _ = sjson.SetRaw(completed, "response.output", gjson.Get(outputsWrapper, "arr").Raw)
}
if st.UsageSeen {
completed, _ = sjson.Set(completed, "response.usage.input_tokens", st.PromptTokens)
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", st.CachedTokens)
completed, _ = sjson.Set(completed, "response.usage.output_tokens", st.CompletionTokens)
@@ -590,6 +590,7 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
total = st.PromptTokens + st.CompletionTokens
}
completed, _ = sjson.Set(completed, "response.usage.total_tokens", total)
}
out = append(out, emitRespEvent("response.completed", completed))
}

View File

@@ -1,61 +0,0 @@
package responses
import (
"context"
"strings"
"testing"
"github.com/tidwall/gjson"
)
func parseSSEEvent(t *testing.T, chunk string) (string, gjson.Result) {
t.Helper()
lines := strings.Split(chunk, "\n")
if len(lines) < 2 {
t.Fatalf("unexpected SSE chunk: %q", chunk)
}
event := strings.TrimSpace(strings.TrimPrefix(lines[0], "event:"))
dataLine := strings.TrimSpace(strings.TrimPrefix(lines[1], "data:"))
if !gjson.Valid(dataLine) {
t.Fatalf("invalid SSE data JSON: %q", dataLine)
}
return event, gjson.Parse(dataLine)
}
func TestConvertOpenAIChatCompletionsResponseToOpenAIResponses_CreatedHasModelAndCompletedHasUsage(t *testing.T) {
in := `data: {"id":"chatcmpl-1","object":"chat.completion.chunk","created":1700000000,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}`
var param any
out := ConvertOpenAIChatCompletionsResponseToOpenAIResponses(context.Background(), "test-model", nil, nil, []byte(in), &param)
gotCreated := false
gotCompleted := false
createdModel := ""
for _, chunk := range out {
ev, data := parseSSEEvent(t, chunk)
switch ev {
case "response.created":
gotCreated = true
createdModel = data.Get("response.model").String()
case "response.completed":
gotCompleted = true
if !data.Get("response.usage.input_tokens").Exists() {
t.Fatalf("response.completed missing usage.input_tokens: %s", data.Raw)
}
if !data.Get("response.usage.output_tokens").Exists() {
t.Fatalf("response.completed missing usage.output_tokens: %s", data.Raw)
}
}
}
if !gotCreated {
t.Fatalf("missing response.created event")
}
if createdModel != "test-model" {
t.Fatalf("unexpected response.created model: got %q", createdModel)
}
if !gotCompleted {
t.Fatalf("missing response.completed event")
}
}