diff --git a/internal/constant/constant.go b/internal/constant/constant.go index 963cdc31..58b388a1 100644 --- a/internal/constant/constant.go +++ b/internal/constant/constant.go @@ -21,4 +21,7 @@ const ( // OpenaiResponse represents the OpenAI response format identifier. OpenaiResponse = "openai-response" + + // Antigravity represents the Antigravity response format identifier. + Antigravity = "antigravity" ) diff --git a/internal/runtime/executor/antigravity_executor.go b/internal/runtime/executor/antigravity_executor.go index 3f990ba8..726b5202 100644 --- a/internal/runtime/executor/antigravity_executor.go +++ b/internal/runtime/executor/antigravity_executor.go @@ -70,7 +70,7 @@ func (e *AntigravityExecutor) Execute(ctx context.Context, auth *cliproxyauth.Au defer reporter.trackFailure(ctx, &err) from := opts.SourceFormat - to := sdktranslator.FromString("gemini-cli") + to := sdktranslator.FromString("antigravity") translated := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false) httpReq, errReq := e.buildRequest(ctx, auth, token, req.Model, translated, false, opts.Alt) @@ -127,7 +127,7 @@ func (e *AntigravityExecutor) ExecuteStream(ctx context.Context, auth *cliproxya defer reporter.trackFailure(ctx, &err) from := opts.SourceFormat - to := sdktranslator.FromString("gemini-cli") + to := sdktranslator.FromString("antigravity") translated := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true) httpReq, errReq := e.buildRequest(ctx, auth, token, req.Model, translated, true, opts.Alt) diff --git a/internal/runtime/executor/usage_helpers.go b/internal/runtime/executor/usage_helpers.go index 8b79defe..501d05c5 100644 --- a/internal/runtime/executor/usage_helpers.go +++ b/internal/runtime/executor/usage_helpers.go @@ -453,6 +453,10 @@ func StripUsageMetadataFromJSON(rawJSON []byte) ([]byte, bool) { usageMetadata = gjson.GetBytes(jsonBytes, "response.usageMetadata") } + if hasNonZeroUsageMetadata(usageMetadata) { + return rawJSON, false + } + if !usageMetadata.Exists() { return rawJSON, false } @@ -475,3 +479,14 @@ func StripUsageMetadataFromJSON(rawJSON []byte) ([]byte, bool) { return cleaned, changed } + +// hasNonZeroUsageMetadata checks if any usage token counts are present. +func hasNonZeroUsageMetadata(node gjson.Result) bool { + if !node.Exists() { + return false + } + return node.Get("totalTokenCount").Int() > 0 || + node.Get("promptTokenCount").Int() > 0 || + node.Get("candidatesTokenCount").Int() > 0 || + node.Get("thoughtsTokenCount").Int() > 0 +} diff --git a/internal/translator/antigravity/claude/antigravity_claude_request.go b/internal/translator/antigravity/claude/antigravity_claude_request.go new file mode 100644 index 00000000..00893c14 --- /dev/null +++ b/internal/translator/antigravity/claude/antigravity_claude_request.go @@ -0,0 +1,188 @@ +// Package claude provides request translation functionality for Claude Code API compatibility. +// This package handles the conversion of Claude Code API requests into Gemini CLI-compatible +// JSON format, transforming message contents, system instructions, and tool declarations +// into the format expected by Gemini CLI API clients. It performs JSON data transformation +// to ensure compatibility between Claude Code API format and Gemini CLI API's expected format. +package claude + +import ( + "bytes" + "encoding/json" + "strings" + + client "github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces" + "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/common" + "github.com/router-for-me/CLIProxyAPI/v6/internal/util" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" +) + +const geminiCLIClaudeThoughtSignature = "skip_thought_signature_validator" + +// ConvertClaudeRequestToAntigravity parses and transforms a Claude Code API request into Gemini CLI API format. +// It extracts the model name, system instruction, message contents, and tool declarations +// from the raw JSON request and returns them in the format expected by the Gemini CLI API. +// The function performs the following transformations: +// 1. Extracts the model information from the request +// 2. Restructures the JSON to match Gemini CLI API format +// 3. Converts system instructions to the expected format +// 4. Maps message contents with proper role transformations +// 5. Handles tool declarations and tool choices +// 6. Maps generation configuration parameters +// +// Parameters: +// - modelName: The name of the model to use for the request +// - rawJSON: The raw JSON request data from the Claude Code API +// - stream: A boolean indicating if the request is for a streaming response (unused in current implementation) +// +// Returns: +// - []byte: The transformed request data in Gemini CLI API format +func ConvertClaudeRequestToAntigravity(modelName string, inputRawJSON []byte, _ bool) []byte { + rawJSON := bytes.Clone(inputRawJSON) + rawJSON = bytes.Replace(rawJSON, []byte(`"url":{"type":"string","format":"uri",`), []byte(`"url":{"type":"string",`), -1) + + // system instruction + var systemInstruction *client.Content + systemResult := gjson.GetBytes(rawJSON, "system") + if systemResult.IsArray() { + systemResults := systemResult.Array() + systemInstruction = &client.Content{Role: "user", Parts: []client.Part{}} + for i := 0; i < len(systemResults); i++ { + systemPromptResult := systemResults[i] + systemTypePromptResult := systemPromptResult.Get("type") + if systemTypePromptResult.Type == gjson.String && systemTypePromptResult.String() == "text" { + systemPrompt := systemPromptResult.Get("text").String() + systemPart := client.Part{Text: systemPrompt} + systemInstruction.Parts = append(systemInstruction.Parts, systemPart) + } + } + if len(systemInstruction.Parts) == 0 { + systemInstruction = nil + } + } + + // contents + contents := make([]client.Content, 0) + messagesResult := gjson.GetBytes(rawJSON, "messages") + if messagesResult.IsArray() { + messageResults := messagesResult.Array() + for i := 0; i < len(messageResults); i++ { + messageResult := messageResults[i] + roleResult := messageResult.Get("role") + if roleResult.Type != gjson.String { + continue + } + role := roleResult.String() + if role == "assistant" { + role = "model" + } + clientContent := client.Content{Role: role, Parts: []client.Part{}} + contentsResult := messageResult.Get("content") + if contentsResult.IsArray() { + contentResults := contentsResult.Array() + for j := 0; j < len(contentResults); j++ { + contentResult := contentResults[j] + contentTypeResult := contentResult.Get("type") + if contentTypeResult.Type == gjson.String && contentTypeResult.String() == "text" { + prompt := contentResult.Get("text").String() + clientContent.Parts = append(clientContent.Parts, client.Part{Text: prompt}) + } else if contentTypeResult.Type == gjson.String && contentTypeResult.String() == "tool_use" { + functionName := contentResult.Get("name").String() + functionArgs := contentResult.Get("input").String() + var args map[string]any + if err := json.Unmarshal([]byte(functionArgs), &args); err == nil { + clientContent.Parts = append(clientContent.Parts, client.Part{ + FunctionCall: &client.FunctionCall{Name: functionName, Args: args}, + ThoughtSignature: geminiCLIClaudeThoughtSignature, + }) + } + } else if contentTypeResult.Type == gjson.String && contentTypeResult.String() == "tool_result" { + toolCallID := contentResult.Get("tool_use_id").String() + if toolCallID != "" { + funcName := toolCallID + toolCallIDs := strings.Split(toolCallID, "-") + if len(toolCallIDs) > 1 { + funcName = strings.Join(toolCallIDs[0:len(toolCallIDs)-1], "-") + } + responseData := contentResult.Get("content").Raw + functionResponse := client.FunctionResponse{Name: funcName, Response: map[string]interface{}{"result": responseData}} + clientContent.Parts = append(clientContent.Parts, client.Part{FunctionResponse: &functionResponse}) + } + } + } + contents = append(contents, clientContent) + } else if contentsResult.Type == gjson.String { + prompt := contentsResult.String() + contents = append(contents, client.Content{Role: role, Parts: []client.Part{{Text: prompt}}}) + } + } + } + + // tools + var tools []client.ToolDeclaration + toolsResult := gjson.GetBytes(rawJSON, "tools") + if toolsResult.IsArray() { + tools = make([]client.ToolDeclaration, 1) + tools[0].FunctionDeclarations = make([]any, 0) + toolsResults := toolsResult.Array() + for i := 0; i < len(toolsResults); i++ { + toolResult := toolsResults[i] + inputSchemaResult := toolResult.Get("input_schema") + if inputSchemaResult.Exists() && inputSchemaResult.IsObject() { + inputSchema := inputSchemaResult.Raw + tool, _ := sjson.Delete(toolResult.Raw, "input_schema") + tool, _ = sjson.SetRaw(tool, "parametersJsonSchema", inputSchema) + tool, _ = sjson.Delete(tool, "strict") + tool, _ = sjson.Delete(tool, "input_examples") + var toolDeclaration any + if err := json.Unmarshal([]byte(tool), &toolDeclaration); err == nil { + tools[0].FunctionDeclarations = append(tools[0].FunctionDeclarations, toolDeclaration) + } + } + } + } else { + tools = make([]client.ToolDeclaration, 0) + } + + // Build output Gemini CLI request JSON + out := `{"model":"","request":{"contents":[]}}` + out, _ = sjson.Set(out, "model", modelName) + if systemInstruction != nil { + b, _ := json.Marshal(systemInstruction) + out, _ = sjson.SetRaw(out, "request.systemInstruction", string(b)) + } + if len(contents) > 0 { + b, _ := json.Marshal(contents) + out, _ = sjson.SetRaw(out, "request.contents", string(b)) + } + if len(tools) > 0 && len(tools[0].FunctionDeclarations) > 0 { + b, _ := json.Marshal(tools) + out, _ = sjson.SetRaw(out, "request.tools", string(b)) + } + + // Map Anthropic thinking -> Gemini thinkingBudget/include_thoughts when type==enabled + if t := gjson.GetBytes(rawJSON, "thinking"); t.Exists() && t.IsObject() && util.ModelSupportsThinking(modelName) { + if t.Get("type").String() == "enabled" { + if b := t.Get("budget_tokens"); b.Exists() && b.Type == gjson.Number { + budget := int(b.Int()) + budget = util.NormalizeThinkingBudget(modelName, budget) + out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.thinkingBudget", budget) + out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.include_thoughts", true) + } + } + } + if v := gjson.GetBytes(rawJSON, "temperature"); v.Exists() && v.Type == gjson.Number { + out, _ = sjson.Set(out, "request.generationConfig.temperature", v.Num) + } + if v := gjson.GetBytes(rawJSON, "top_p"); v.Exists() && v.Type == gjson.Number { + out, _ = sjson.Set(out, "request.generationConfig.topP", v.Num) + } + if v := gjson.GetBytes(rawJSON, "top_k"); v.Exists() && v.Type == gjson.Number { + out, _ = sjson.Set(out, "request.generationConfig.topK", v.Num) + } + + outBytes := []byte(out) + outBytes = common.AttachDefaultSafetySettings(outBytes, "request.safetySettings") + + return outBytes +} diff --git a/internal/translator/antigravity/claude/antigravity_claude_response.go b/internal/translator/antigravity/claude/antigravity_claude_response.go new file mode 100644 index 00000000..6c699e1a --- /dev/null +++ b/internal/translator/antigravity/claude/antigravity_claude_response.go @@ -0,0 +1,447 @@ +// Package claude provides response translation functionality for Claude Code API compatibility. +// This package handles the conversion of backend client responses into Claude Code-compatible +// Server-Sent Events (SSE) format, implementing a sophisticated state machine that manages +// different response types including text content, thinking processes, and function calls. +// The translation ensures proper sequencing of SSE events and maintains state across +// multiple response chunks to provide a seamless streaming experience. +package claude + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" +) + +// Params holds parameters for response conversion and maintains state across streaming chunks. +// This structure tracks the current state of the response translation process to ensure +// proper sequencing of SSE events and transitions between different content types. +type Params struct { + HasFirstResponse bool // Indicates if the initial message_start event has been sent + ResponseType int // Current response type: 0=none, 1=content, 2=thinking, 3=function + ResponseIndex int // Index counter for content blocks in the streaming response + HasFinishReason bool // Tracks whether a finish reason has been observed + FinishReason string // The finish reason string returned by the provider + HasUsageMetadata bool // Tracks whether usage metadata has been observed + PromptTokenCount int64 // Cached prompt token count from usage metadata + CandidatesTokenCount int64 // Cached candidate token count from usage metadata + ThoughtsTokenCount int64 // Cached thinking token count from usage metadata + TotalTokenCount int64 // Cached total token count from usage metadata + HasSentFinalEvents bool // Indicates if final content/message events have been sent + HasToolUse bool // Indicates if tool use was observed in the stream +} + +// ConvertAntigravityResponseToClaude performs sophisticated streaming response format conversion. +// This function implements a complex state machine that translates backend client responses +// into Claude Code-compatible Server-Sent Events (SSE) format. It manages different response types +// and handles state transitions between content blocks, thinking processes, and function calls. +// +// Response type states: 0=none, 1=content, 2=thinking, 3=function +// The function maintains state across multiple calls to ensure proper SSE event sequencing. +// +// Parameters: +// - ctx: The context for the request, used for cancellation and timeout handling +// - modelName: The name of the model being used for the response (unused in current implementation) +// - rawJSON: The raw JSON response from the Gemini CLI API +// - param: A pointer to a parameter object for maintaining state between calls +// +// Returns: +// - []string: A slice of strings, each containing a Claude Code-compatible JSON response +func ConvertAntigravityResponseToClaude(_ context.Context, _ string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) []string { + if *param == nil { + *param = &Params{ + HasFirstResponse: false, + ResponseType: 0, + ResponseIndex: 0, + } + } + + params := (*param).(*Params) + + if bytes.Equal(rawJSON, []byte("[DONE]")) { + output := "" + appendFinalEvents(params, &output, true) + + return []string{ + output + "event: message_stop\ndata: {\"type\":\"message_stop\"}\n\n\n", + } + } + + output := "" + + // Initialize the streaming session with a message_start event + // This is only sent for the very first response chunk to establish the streaming session + if !params.HasFirstResponse { + output = "event: message_start\n" + + // Create the initial message structure with default values according to Claude Code API specification + // This follows the Claude Code API specification for streaming message initialization + messageStartTemplate := `{"type": "message_start", "message": {"id": "msg_1nZdL29xx5MUA1yADyHTEsnR8uuvGzszyY", "type": "message", "role": "assistant", "content": [], "model": "claude-3-5-sonnet-20241022", "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 0, "output_tokens": 0}}}` + + // Override default values with actual response metadata if available from the Gemini CLI response + if modelVersionResult := gjson.GetBytes(rawJSON, "response.modelVersion"); modelVersionResult.Exists() { + messageStartTemplate, _ = sjson.Set(messageStartTemplate, "message.model", modelVersionResult.String()) + } + if responseIDResult := gjson.GetBytes(rawJSON, "response.responseId"); responseIDResult.Exists() { + messageStartTemplate, _ = sjson.Set(messageStartTemplate, "message.id", responseIDResult.String()) + } + output = output + fmt.Sprintf("data: %s\n\n\n", messageStartTemplate) + + params.HasFirstResponse = true + } + + // Process the response parts array from the backend client + // Each part can contain text content, thinking content, or function calls + partsResult := gjson.GetBytes(rawJSON, "response.candidates.0.content.parts") + if partsResult.IsArray() { + partResults := partsResult.Array() + for i := 0; i < len(partResults); i++ { + partResult := partResults[i] + + // Extract the different types of content from each part + partTextResult := partResult.Get("text") + functionCallResult := partResult.Get("functionCall") + + // Handle text content (both regular content and thinking) + if partTextResult.Exists() { + // Process thinking content (internal reasoning) + if partResult.Get("thought").Bool() { + // Continue existing thinking block if already in thinking state + if params.ResponseType == 2 { + output = output + "event: content_block_delta\n" + data, _ := sjson.Set(fmt.Sprintf(`{"type":"content_block_delta","index":%d,"delta":{"type":"thinking_delta","thinking":""}}`, params.ResponseIndex), "delta.thinking", partTextResult.String()) + output = output + fmt.Sprintf("data: %s\n\n\n", data) + } else { + // Transition from another state to thinking + // First, close any existing content block + if params.ResponseType != 0 { + if params.ResponseType == 2 { + // output = output + "event: content_block_delta\n" + // output = output + fmt.Sprintf(`data: {"type":"content_block_delta","index":%d,"delta":{"type":"signature_delta","signature":null}}`, params.ResponseIndex) + // output = output + "\n\n\n" + } + output = output + "event: content_block_stop\n" + output = output + fmt.Sprintf(`data: {"type":"content_block_stop","index":%d}`, params.ResponseIndex) + output = output + "\n\n\n" + params.ResponseIndex++ + } + + // Start a new thinking content block + output = output + "event: content_block_start\n" + output = output + fmt.Sprintf(`data: {"type":"content_block_start","index":%d,"content_block":{"type":"thinking","thinking":""}}`, params.ResponseIndex) + output = output + "\n\n\n" + output = output + "event: content_block_delta\n" + data, _ := sjson.Set(fmt.Sprintf(`{"type":"content_block_delta","index":%d,"delta":{"type":"thinking_delta","thinking":""}}`, params.ResponseIndex), "delta.thinking", partTextResult.String()) + output = output + fmt.Sprintf("data: %s\n\n\n", data) + params.ResponseType = 2 // Set state to thinking + } + } else { + // Process regular text content (user-visible output) + // Continue existing text block if already in content state + if params.ResponseType == 1 { + output = output + "event: content_block_delta\n" + data, _ := sjson.Set(fmt.Sprintf(`{"type":"content_block_delta","index":%d,"delta":{"type":"text_delta","text":""}}`, params.ResponseIndex), "delta.text", partTextResult.String()) + output = output + fmt.Sprintf("data: %s\n\n\n", data) + } else { + // Transition from another state to text content + // First, close any existing content block + if params.ResponseType != 0 { + if params.ResponseType == 2 { + // output = output + "event: content_block_delta\n" + // output = output + fmt.Sprintf(`data: {"type":"content_block_delta","index":%d,"delta":{"type":"signature_delta","signature":null}}`, params.ResponseIndex) + // output = output + "\n\n\n" + } + output = output + "event: content_block_stop\n" + output = output + fmt.Sprintf(`data: {"type":"content_block_stop","index":%d}`, params.ResponseIndex) + output = output + "\n\n\n" + params.ResponseIndex++ + } + + // Start a new text content block + output = output + "event: content_block_start\n" + output = output + fmt.Sprintf(`data: {"type":"content_block_start","index":%d,"content_block":{"type":"text","text":""}}`, params.ResponseIndex) + output = output + "\n\n\n" + output = output + "event: content_block_delta\n" + data, _ := sjson.Set(fmt.Sprintf(`{"type":"content_block_delta","index":%d,"delta":{"type":"text_delta","text":""}}`, params.ResponseIndex), "delta.text", partTextResult.String()) + output = output + fmt.Sprintf("data: %s\n\n\n", data) + params.ResponseType = 1 // Set state to content + } + } + } else if functionCallResult.Exists() { + // Handle function/tool calls from the AI model + // This processes tool usage requests and formats them for Claude Code API compatibility + params.HasToolUse = true + fcName := functionCallResult.Get("name").String() + + // Handle state transitions when switching to function calls + // Close any existing function call block first + if params.ResponseType == 3 { + output = output + "event: content_block_stop\n" + output = output + fmt.Sprintf(`data: {"type":"content_block_stop","index":%d}`, params.ResponseIndex) + output = output + "\n\n\n" + params.ResponseIndex++ + params.ResponseType = 0 + } + + // Special handling for thinking state transition + if params.ResponseType == 2 { + // output = output + "event: content_block_delta\n" + // output = output + fmt.Sprintf(`data: {"type":"content_block_delta","index":%d,"delta":{"type":"signature_delta","signature":null}}`, params.ResponseIndex) + // output = output + "\n\n\n" + } + + // Close any other existing content block + if params.ResponseType != 0 { + output = output + "event: content_block_stop\n" + output = output + fmt.Sprintf(`data: {"type":"content_block_stop","index":%d}`, params.ResponseIndex) + output = output + "\n\n\n" + params.ResponseIndex++ + } + + // Start a new tool use content block + // This creates the structure for a function call in Claude Code format + output = output + "event: content_block_start\n" + + // Create the tool use block with unique ID and function details + data := fmt.Sprintf(`{"type":"content_block_start","index":%d,"content_block":{"type":"tool_use","id":"","name":"","input":{}}}`, params.ResponseIndex) + data, _ = sjson.Set(data, "content_block.id", fmt.Sprintf("%s-%d", fcName, time.Now().UnixNano())) + data, _ = sjson.Set(data, "content_block.name", fcName) + output = output + fmt.Sprintf("data: %s\n\n\n", data) + + if fcArgsResult := functionCallResult.Get("args"); fcArgsResult.Exists() { + output = output + "event: content_block_delta\n" + data, _ = sjson.Set(fmt.Sprintf(`{"type":"content_block_delta","index":%d,"delta":{"type":"input_json_delta","partial_json":""}}`, params.ResponseIndex), "delta.partial_json", fcArgsResult.Raw) + output = output + fmt.Sprintf("data: %s\n\n\n", data) + } + params.ResponseType = 3 + } + } + } + + if finishReasonResult := gjson.GetBytes(rawJSON, "response.candidates.0.finishReason"); finishReasonResult.Exists() { + params.HasFinishReason = true + params.FinishReason = finishReasonResult.String() + } + + if usageResult := gjson.GetBytes(rawJSON, "response.usageMetadata"); usageResult.Exists() { + params.HasUsageMetadata = true + params.PromptTokenCount = usageResult.Get("promptTokenCount").Int() + params.CandidatesTokenCount = usageResult.Get("candidatesTokenCount").Int() + params.ThoughtsTokenCount = usageResult.Get("thoughtsTokenCount").Int() + params.TotalTokenCount = usageResult.Get("totalTokenCount").Int() + if params.CandidatesTokenCount == 0 && params.TotalTokenCount > 0 { + params.CandidatesTokenCount = params.TotalTokenCount - params.PromptTokenCount - params.ThoughtsTokenCount + if params.CandidatesTokenCount < 0 { + params.CandidatesTokenCount = 0 + } + } + } + + if params.HasUsageMetadata && params.HasFinishReason { + appendFinalEvents(params, &output, false) + } + + return []string{output} +} + +func appendFinalEvents(params *Params, output *string, force bool) { + if params.HasSentFinalEvents { + return + } + + if !params.HasUsageMetadata && !force { + return + } + + if params.ResponseType != 0 { + *output = *output + "event: content_block_stop\n" + *output = *output + fmt.Sprintf(`data: {"type":"content_block_stop","index":%d}`, params.ResponseIndex) + *output = *output + "\n\n\n" + params.ResponseType = 0 + } + + stopReason := resolveStopReason(params) + usageOutputTokens := params.CandidatesTokenCount + params.ThoughtsTokenCount + if usageOutputTokens == 0 && params.TotalTokenCount > 0 { + usageOutputTokens = params.TotalTokenCount - params.PromptTokenCount + if usageOutputTokens < 0 { + usageOutputTokens = 0 + } + } + + *output = *output + "event: message_delta\n" + *output = *output + "data: " + delta := fmt.Sprintf(`{"type":"message_delta","delta":{"stop_reason":"%s","stop_sequence":null},"usage":{"input_tokens":%d,"output_tokens":%d}}`, stopReason, params.PromptTokenCount, usageOutputTokens) + *output = *output + delta + "\n\n\n" + + params.HasSentFinalEvents = true +} + +func resolveStopReason(params *Params) string { + if params.HasToolUse { + return "tool_use" + } + + switch params.FinishReason { + case "MAX_TOKENS": + return "max_tokens" + case "STOP", "FINISH_REASON_UNSPECIFIED", "UNKNOWN": + return "end_turn" + } + + return "end_turn" +} + +// ConvertAntigravityResponseToClaudeNonStream converts a non-streaming Gemini CLI response to a non-streaming Claude response. +// +// Parameters: +// - ctx: The context for the request. +// - modelName: The name of the model. +// - rawJSON: The raw JSON response from the Gemini CLI API. +// - param: A pointer to a parameter object for the conversion. +// +// Returns: +// - string: A Claude-compatible JSON response. +func ConvertAntigravityResponseToClaudeNonStream(_ context.Context, _ string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string { + _ = originalRequestRawJSON + _ = requestRawJSON + + root := gjson.ParseBytes(rawJSON) + promptTokens := root.Get("response.usageMetadata.promptTokenCount").Int() + candidateTokens := root.Get("response.usageMetadata.candidatesTokenCount").Int() + thoughtTokens := root.Get("response.usageMetadata.thoughtsTokenCount").Int() + totalTokens := root.Get("response.usageMetadata.totalTokenCount").Int() + outputTokens := candidateTokens + thoughtTokens + if outputTokens == 0 && totalTokens > 0 { + outputTokens = totalTokens - promptTokens + if outputTokens < 0 { + outputTokens = 0 + } + } + + response := map[string]interface{}{ + "id": root.Get("response.responseId").String(), + "type": "message", + "role": "assistant", + "model": root.Get("response.modelVersion").String(), + "content": []interface{}{}, + "stop_reason": nil, + "stop_sequence": nil, + "usage": map[string]interface{}{ + "input_tokens": promptTokens, + "output_tokens": outputTokens, + }, + } + + parts := root.Get("response.candidates.0.content.parts") + var contentBlocks []interface{} + textBuilder := strings.Builder{} + thinkingBuilder := strings.Builder{} + toolIDCounter := 0 + hasToolCall := false + + flushText := func() { + if textBuilder.Len() == 0 { + return + } + contentBlocks = append(contentBlocks, map[string]interface{}{ + "type": "text", + "text": textBuilder.String(), + }) + textBuilder.Reset() + } + + flushThinking := func() { + if thinkingBuilder.Len() == 0 { + return + } + contentBlocks = append(contentBlocks, map[string]interface{}{ + "type": "thinking", + "thinking": thinkingBuilder.String(), + }) + thinkingBuilder.Reset() + } + + if parts.IsArray() { + for _, part := range parts.Array() { + if text := part.Get("text"); text.Exists() && text.String() != "" { + if part.Get("thought").Bool() { + flushText() + thinkingBuilder.WriteString(text.String()) + continue + } + flushThinking() + textBuilder.WriteString(text.String()) + continue + } + + if functionCall := part.Get("functionCall"); functionCall.Exists() { + flushThinking() + flushText() + hasToolCall = true + + name := functionCall.Get("name").String() + toolIDCounter++ + toolBlock := map[string]interface{}{ + "type": "tool_use", + "id": fmt.Sprintf("tool_%d", toolIDCounter), + "name": name, + "input": map[string]interface{}{}, + } + + if args := functionCall.Get("args"); args.Exists() { + var parsed interface{} + if err := json.Unmarshal([]byte(args.Raw), &parsed); err == nil { + toolBlock["input"] = parsed + } + } + + contentBlocks = append(contentBlocks, toolBlock) + continue + } + } + } + + flushThinking() + flushText() + + response["content"] = contentBlocks + + stopReason := "end_turn" + if hasToolCall { + stopReason = "tool_use" + } else { + if finish := root.Get("response.candidates.0.finishReason"); finish.Exists() { + switch finish.String() { + case "MAX_TOKENS": + stopReason = "max_tokens" + case "STOP", "FINISH_REASON_UNSPECIFIED", "UNKNOWN": + stopReason = "end_turn" + default: + stopReason = "end_turn" + } + } + } + response["stop_reason"] = stopReason + + if usage := response["usage"].(map[string]interface{}); usage["input_tokens"] == int64(0) && usage["output_tokens"] == int64(0) { + if usageMeta := root.Get("response.usageMetadata"); !usageMeta.Exists() { + delete(response, "usage") + } + } + + encoded, err := json.Marshal(response) + if err != nil { + return "" + } + return string(encoded) +} + +func ClaudeTokenCount(ctx context.Context, count int64) string { + return fmt.Sprintf(`{"input_tokens":%d}`, count) +} diff --git a/internal/translator/antigravity/claude/init.go b/internal/translator/antigravity/claude/init.go new file mode 100644 index 00000000..21fe0b26 --- /dev/null +++ b/internal/translator/antigravity/claude/init.go @@ -0,0 +1,20 @@ +package claude + +import ( + . "github.com/router-for-me/CLIProxyAPI/v6/internal/constant" + "github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces" + "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator" +) + +func init() { + translator.Register( + Claude, + Antigravity, + ConvertClaudeRequestToAntigravity, + interfaces.TranslateResponse{ + Stream: ConvertAntigravityResponseToClaude, + NonStream: ConvertAntigravityResponseToClaudeNonStream, + TokenCount: ClaudeTokenCount, + }, + ) +} diff --git a/internal/translator/antigravity/gemini/antigravity_gemini_request.go b/internal/translator/antigravity/gemini/antigravity_gemini_request.go new file mode 100644 index 00000000..b79e9a44 --- /dev/null +++ b/internal/translator/antigravity/gemini/antigravity_gemini_request.go @@ -0,0 +1,279 @@ +// Package gemini provides request translation functionality for Gemini CLI to Gemini API compatibility. +// It handles parsing and transforming Gemini CLI API requests into Gemini API format, +// extracting model information, system instructions, message contents, and tool declarations. +// The package performs JSON data transformation to ensure compatibility +// between Gemini CLI API format and Gemini API's expected format. +package gemini + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/common" + "github.com/router-for-me/CLIProxyAPI/v6/internal/util" + log "github.com/sirupsen/logrus" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" +) + +// ConvertGeminiRequestToAntigravity parses and transforms a Gemini CLI API request into Gemini API format. +// It extracts the model name, system instruction, message contents, and tool declarations +// from the raw JSON request and returns them in the format expected by the Gemini API. +// The function performs the following transformations: +// 1. Extracts the model information from the request +// 2. Restructures the JSON to match Gemini API format +// 3. Converts system instructions to the expected format +// 4. Fixes CLI tool response format and grouping +// +// Parameters: +// - modelName: The name of the model to use for the request (unused in current implementation) +// - rawJSON: The raw JSON request data from the Gemini CLI API +// - stream: A boolean indicating if the request is for a streaming response (unused in current implementation) +// +// Returns: +// - []byte: The transformed request data in Gemini API format +func ConvertGeminiRequestToAntigravity(_ string, inputRawJSON []byte, _ bool) []byte { + rawJSON := bytes.Clone(inputRawJSON) + template := "" + template = `{"project":"","request":{},"model":""}` + template, _ = sjson.SetRaw(template, "request", string(rawJSON)) + template, _ = sjson.Set(template, "model", gjson.Get(template, "request.model").String()) + template, _ = sjson.Delete(template, "request.model") + + template, errFixCLIToolResponse := fixCLIToolResponse(template) + if errFixCLIToolResponse != nil { + return []byte{} + } + + systemInstructionResult := gjson.Get(template, "request.system_instruction") + if systemInstructionResult.Exists() { + template, _ = sjson.SetRaw(template, "request.systemInstruction", systemInstructionResult.Raw) + template, _ = sjson.Delete(template, "request.system_instruction") + } + rawJSON = []byte(template) + + // Normalize roles in request.contents: default to valid values if missing/invalid + contents := gjson.GetBytes(rawJSON, "request.contents") + if contents.Exists() { + prevRole := "" + idx := 0 + contents.ForEach(func(_ gjson.Result, value gjson.Result) bool { + role := value.Get("role").String() + valid := role == "user" || role == "model" + if role == "" || !valid { + var newRole string + if prevRole == "" { + newRole = "user" + } else if prevRole == "user" { + newRole = "model" + } else { + newRole = "user" + } + path := fmt.Sprintf("request.contents.%d.role", idx) + rawJSON, _ = sjson.SetBytes(rawJSON, path, newRole) + role = newRole + } + prevRole = role + idx++ + return true + }) + } + + toolsResult := gjson.GetBytes(rawJSON, "request.tools") + if toolsResult.Exists() && toolsResult.IsArray() { + toolResults := toolsResult.Array() + for i := 0; i < len(toolResults); i++ { + functionDeclarationsResult := gjson.GetBytes(rawJSON, fmt.Sprintf("request.tools.%d.function_declarations", i)) + if functionDeclarationsResult.Exists() && functionDeclarationsResult.IsArray() { + functionDeclarationsResults := functionDeclarationsResult.Array() + for j := 0; j < len(functionDeclarationsResults); j++ { + parametersResult := gjson.GetBytes(rawJSON, fmt.Sprintf("request.tools.%d.function_declarations.%d.parameters", i, j)) + if parametersResult.Exists() { + strJson, _ := util.RenameKey(string(rawJSON), fmt.Sprintf("request.tools.%d.function_declarations.%d.parameters", i, j), fmt.Sprintf("request.tools.%d.function_declarations.%d.parametersJsonSchema", i, j)) + rawJSON = []byte(strJson) + } + } + } + } + } + + return common.AttachDefaultSafetySettings(rawJSON, "request.safetySettings") +} + +// FunctionCallGroup represents a group of function calls and their responses +type FunctionCallGroup struct { + ModelContent map[string]interface{} + FunctionCalls []gjson.Result + ResponsesNeeded int +} + +// fixCLIToolResponse performs sophisticated tool response format conversion and grouping. +// This function transforms the CLI tool response format by intelligently grouping function calls +// with their corresponding responses, ensuring proper conversation flow and API compatibility. +// It converts from a linear format (1.json) to a grouped format (2.json) where function calls +// and their responses are properly associated and structured. +// +// Parameters: +// - input: The input JSON string to be processed +// +// Returns: +// - string: The processed JSON string with grouped function calls and responses +// - error: An error if the processing fails +func fixCLIToolResponse(input string) (string, error) { + // Parse the input JSON to extract the conversation structure + parsed := gjson.Parse(input) + + // Extract the contents array which contains the conversation messages + contents := parsed.Get("request.contents") + if !contents.Exists() { + // log.Debugf(input) + return input, fmt.Errorf("contents not found in input") + } + + // Initialize data structures for processing and grouping + var newContents []interface{} // Final processed contents array + var pendingGroups []*FunctionCallGroup // Groups awaiting completion with responses + var collectedResponses []gjson.Result // Standalone responses to be matched + + // Process each content object in the conversation + // This iterates through messages and groups function calls with their responses + contents.ForEach(func(key, value gjson.Result) bool { + role := value.Get("role").String() + parts := value.Get("parts") + + // Check if this content has function responses + var responsePartsInThisContent []gjson.Result + parts.ForEach(func(_, part gjson.Result) bool { + if part.Get("functionResponse").Exists() { + responsePartsInThisContent = append(responsePartsInThisContent, part) + } + return true + }) + + // If this content has function responses, collect them + if len(responsePartsInThisContent) > 0 { + collectedResponses = append(collectedResponses, responsePartsInThisContent...) + + // Check if any pending groups can be satisfied + for i := len(pendingGroups) - 1; i >= 0; i-- { + group := pendingGroups[i] + if len(collectedResponses) >= group.ResponsesNeeded { + // Take the needed responses for this group + groupResponses := collectedResponses[:group.ResponsesNeeded] + collectedResponses = collectedResponses[group.ResponsesNeeded:] + + // Create merged function response content + var responseParts []interface{} + for _, response := range groupResponses { + var responseMap map[string]interface{} + errUnmarshal := json.Unmarshal([]byte(response.Raw), &responseMap) + if errUnmarshal != nil { + log.Warnf("failed to unmarshal function response: %v\n", errUnmarshal) + continue + } + responseParts = append(responseParts, responseMap) + } + + if len(responseParts) > 0 { + functionResponseContent := map[string]interface{}{ + "parts": responseParts, + "role": "function", + } + newContents = append(newContents, functionResponseContent) + } + + // Remove this group as it's been satisfied + pendingGroups = append(pendingGroups[:i], pendingGroups[i+1:]...) + break + } + } + + return true // Skip adding this content, responses are merged + } + + // If this is a model with function calls, create a new group + if role == "model" { + var functionCallsInThisModel []gjson.Result + parts.ForEach(func(_, part gjson.Result) bool { + if part.Get("functionCall").Exists() { + functionCallsInThisModel = append(functionCallsInThisModel, part) + } + return true + }) + + if len(functionCallsInThisModel) > 0 { + // Add the model content + var contentMap map[string]interface{} + errUnmarshal := json.Unmarshal([]byte(value.Raw), &contentMap) + if errUnmarshal != nil { + log.Warnf("failed to unmarshal model content: %v\n", errUnmarshal) + return true + } + newContents = append(newContents, contentMap) + + // Create a new group for tracking responses + group := &FunctionCallGroup{ + ModelContent: contentMap, + FunctionCalls: functionCallsInThisModel, + ResponsesNeeded: len(functionCallsInThisModel), + } + pendingGroups = append(pendingGroups, group) + } else { + // Regular model content without function calls + var contentMap map[string]interface{} + errUnmarshal := json.Unmarshal([]byte(value.Raw), &contentMap) + if errUnmarshal != nil { + log.Warnf("failed to unmarshal content: %v\n", errUnmarshal) + return true + } + newContents = append(newContents, contentMap) + } + } else { + // Non-model content (user, etc.) + var contentMap map[string]interface{} + errUnmarshal := json.Unmarshal([]byte(value.Raw), &contentMap) + if errUnmarshal != nil { + log.Warnf("failed to unmarshal content: %v\n", errUnmarshal) + return true + } + newContents = append(newContents, contentMap) + } + + return true + }) + + // Handle any remaining pending groups with remaining responses + for _, group := range pendingGroups { + if len(collectedResponses) >= group.ResponsesNeeded { + groupResponses := collectedResponses[:group.ResponsesNeeded] + collectedResponses = collectedResponses[group.ResponsesNeeded:] + + var responseParts []interface{} + for _, response := range groupResponses { + var responseMap map[string]interface{} + errUnmarshal := json.Unmarshal([]byte(response.Raw), &responseMap) + if errUnmarshal != nil { + log.Warnf("failed to unmarshal function response: %v\n", errUnmarshal) + continue + } + responseParts = append(responseParts, responseMap) + } + + if len(responseParts) > 0 { + functionResponseContent := map[string]interface{}{ + "parts": responseParts, + "role": "function", + } + newContents = append(newContents, functionResponseContent) + } + } + } + + // Update the original JSON with the new contents + result := input + newContentsJSON, _ := json.Marshal(newContents) + result, _ = sjson.Set(result, "request.contents", json.RawMessage(newContentsJSON)) + + return result, nil +} diff --git a/internal/translator/antigravity/gemini/antigravity_gemini_response.go b/internal/translator/antigravity/gemini/antigravity_gemini_response.go new file mode 100644 index 00000000..6f9d9791 --- /dev/null +++ b/internal/translator/antigravity/gemini/antigravity_gemini_response.go @@ -0,0 +1,86 @@ +// Package gemini provides request translation functionality for Gemini to Gemini CLI API compatibility. +// It handles parsing and transforming Gemini API requests into Gemini CLI API format, +// extracting model information, system instructions, message contents, and tool declarations. +// The package performs JSON data transformation to ensure compatibility +// between Gemini API format and Gemini CLI API's expected format. +package gemini + +import ( + "bytes" + "context" + "fmt" + + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" +) + +// ConvertAntigravityResponseToGemini parses and transforms a Gemini CLI API request into Gemini API format. +// It extracts the model name, system instruction, message contents, and tool declarations +// from the raw JSON request and returns them in the format expected by the Gemini API. +// The function performs the following transformations: +// 1. Extracts the response data from the request +// 2. Handles alternative response formats +// 3. Processes array responses by extracting individual response objects +// +// Parameters: +// - ctx: The context for the request, used for cancellation and timeout handling +// - modelName: The name of the model to use for the request (unused in current implementation) +// - rawJSON: The raw JSON request data from the Gemini CLI API +// - param: A pointer to a parameter object for the conversion (unused in current implementation) +// +// Returns: +// - []string: The transformed request data in Gemini API format +func ConvertAntigravityResponseToGemini(ctx context.Context, _ string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) []string { + if bytes.HasPrefix(rawJSON, []byte("data:")) { + rawJSON = bytes.TrimSpace(rawJSON[5:]) + } + + if alt, ok := ctx.Value("alt").(string); ok { + var chunk []byte + if alt == "" { + responseResult := gjson.GetBytes(rawJSON, "response") + if responseResult.Exists() { + chunk = []byte(responseResult.Raw) + } + } else { + chunkTemplate := "[]" + responseResult := gjson.ParseBytes(chunk) + if responseResult.IsArray() { + responseResultItems := responseResult.Array() + for i := 0; i < len(responseResultItems); i++ { + responseResultItem := responseResultItems[i] + if responseResultItem.Get("response").Exists() { + chunkTemplate, _ = sjson.SetRaw(chunkTemplate, "-1", responseResultItem.Get("response").Raw) + } + } + } + chunk = []byte(chunkTemplate) + } + return []string{string(chunk)} + } + return []string{} +} + +// ConvertAntigravityResponseToGeminiNonStream converts a non-streaming Gemini CLI request to a non-streaming Gemini response. +// This function processes the complete Gemini CLI request and transforms it into a single Gemini-compatible +// JSON response. It extracts the response data from the request and returns it in the expected format. +// +// Parameters: +// - ctx: The context for the request, used for cancellation and timeout handling +// - modelName: The name of the model being used for the response (unused in current implementation) +// - rawJSON: The raw JSON request data from the Gemini CLI API +// - param: A pointer to a parameter object for the conversion (unused in current implementation) +// +// Returns: +// - string: A Gemini-compatible JSON response containing the response data +func ConvertAntigravityResponseToGeminiNonStream(_ context.Context, _ string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string { + responseResult := gjson.GetBytes(rawJSON, "response") + if responseResult.Exists() { + return responseResult.Raw + } + return string(rawJSON) +} + +func GeminiTokenCount(ctx context.Context, count int64) string { + return fmt.Sprintf(`{"totalTokens":%d,"promptTokensDetails":[{"modality":"TEXT","tokenCount":%d}]}`, count, count) +} diff --git a/internal/translator/antigravity/gemini/init.go b/internal/translator/antigravity/gemini/init.go new file mode 100644 index 00000000..39558248 --- /dev/null +++ b/internal/translator/antigravity/gemini/init.go @@ -0,0 +1,20 @@ +package gemini + +import ( + . "github.com/router-for-me/CLIProxyAPI/v6/internal/constant" + "github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces" + "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator" +) + +func init() { + translator.Register( + Gemini, + Antigravity, + ConvertGeminiRequestToAntigravity, + interfaces.TranslateResponse{ + Stream: ConvertAntigravityResponseToGemini, + NonStream: ConvertAntigravityResponseToGeminiNonStream, + TokenCount: GeminiTokenCount, + }, + ) +} diff --git a/internal/translator/antigravity/openai/chat-completions/antigravity_openai_request.go b/internal/translator/antigravity/openai/chat-completions/antigravity_openai_request.go new file mode 100644 index 00000000..98a9d2e0 --- /dev/null +++ b/internal/translator/antigravity/openai/chat-completions/antigravity_openai_request.go @@ -0,0 +1,356 @@ +// Package openai provides request translation functionality for OpenAI to Gemini CLI API compatibility. +// It converts OpenAI Chat Completions requests into Gemini CLI compatible JSON using gjson/sjson only. +package chat_completions + +import ( + "bytes" + "fmt" + "strings" + + "github.com/router-for-me/CLIProxyAPI/v6/internal/misc" + "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/common" + "github.com/router-for-me/CLIProxyAPI/v6/internal/util" + log "github.com/sirupsen/logrus" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" +) + +const geminiCLIFunctionThoughtSignature = "skip_thought_signature_validator" + +// ConvertOpenAIRequestToAntigravity converts an OpenAI Chat Completions request (raw JSON) +// into a complete Gemini CLI request JSON. All JSON construction uses sjson and lookups use gjson. +// +// Parameters: +// - modelName: The name of the model to use for the request +// - rawJSON: The raw JSON request data from the OpenAI API +// - stream: A boolean indicating if the request is for a streaming response (unused in current implementation) +// +// Returns: +// - []byte: The transformed request data in Gemini CLI API format +func ConvertOpenAIRequestToAntigravity(modelName string, inputRawJSON []byte, _ bool) []byte { + rawJSON := bytes.Clone(inputRawJSON) + // Base envelope (no default thinkingConfig) + out := []byte(`{"project":"","request":{"contents":[]},"model":"gemini-2.5-pro"}`) + + // Model + out, _ = sjson.SetBytes(out, "model", modelName) + + // Reasoning effort -> thinkingBudget/include_thoughts + // Note: OpenAI official fields take precedence over extra_body.google.thinking_config + re := gjson.GetBytes(rawJSON, "reasoning_effort") + hasOfficialThinking := re.Exists() + if hasOfficialThinking && util.ModelSupportsThinking(modelName) { + switch re.String() { + case "none": + out, _ = sjson.DeleteBytes(out, "request.generationConfig.thinkingConfig.include_thoughts") + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", 0) + case "auto": + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", -1) + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", true) + case "low": + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", util.NormalizeThinkingBudget(modelName, 1024)) + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", true) + case "medium": + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", util.NormalizeThinkingBudget(modelName, 8192)) + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", true) + case "high": + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", util.NormalizeThinkingBudget(modelName, 32768)) + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", true) + default: + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", -1) + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", true) + } + } + + // Cherry Studio extension extra_body.google.thinking_config (effective only when official fields are absent) + if !hasOfficialThinking && util.ModelSupportsThinking(modelName) { + if tc := gjson.GetBytes(rawJSON, "extra_body.google.thinking_config"); tc.Exists() && tc.IsObject() { + var setBudget bool + var normalized int + + if v := tc.Get("thinkingBudget"); v.Exists() { + normalized = util.NormalizeThinkingBudget(modelName, int(v.Int())) + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", normalized) + setBudget = true + } else if v := tc.Get("thinking_budget"); v.Exists() { + normalized = util.NormalizeThinkingBudget(modelName, int(v.Int())) + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", normalized) + setBudget = true + } + + if v := tc.Get("includeThoughts"); v.Exists() { + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", v.Bool()) + } else if v := tc.Get("include_thoughts"); v.Exists() { + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", v.Bool()) + } else if setBudget && normalized != 0 { + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", true) + } + } + } + + // For gemini-3-pro-preview, always send default thinkingConfig when none specified. + // This matches the official Gemini CLI behavior which always sends: + // { thinkingBudget: -1, includeThoughts: true } + // See: ai-gemini-cli/packages/core/src/config/defaultModelConfigs.ts + if !gjson.GetBytes(out, "request.generationConfig.thinkingConfig").Exists() && modelName == "gemini-3-pro-preview" { + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.thinkingBudget", -1) + out, _ = sjson.SetBytes(out, "request.generationConfig.thinkingConfig.include_thoughts", true) + } + + // Temperature/top_p/top_k + if tr := gjson.GetBytes(rawJSON, "temperature"); tr.Exists() && tr.Type == gjson.Number { + out, _ = sjson.SetBytes(out, "request.generationConfig.temperature", tr.Num) + } + if tpr := gjson.GetBytes(rawJSON, "top_p"); tpr.Exists() && tpr.Type == gjson.Number { + out, _ = sjson.SetBytes(out, "request.generationConfig.topP", tpr.Num) + } + if tkr := gjson.GetBytes(rawJSON, "top_k"); tkr.Exists() && tkr.Type == gjson.Number { + out, _ = sjson.SetBytes(out, "request.generationConfig.topK", tkr.Num) + } + + // Map OpenAI modalities -> Gemini CLI request.generationConfig.responseModalities + // e.g. "modalities": ["image", "text"] -> ["IMAGE", "TEXT"] + if mods := gjson.GetBytes(rawJSON, "modalities"); mods.Exists() && mods.IsArray() { + var responseMods []string + for _, m := range mods.Array() { + switch strings.ToLower(m.String()) { + case "text": + responseMods = append(responseMods, "TEXT") + case "image": + responseMods = append(responseMods, "IMAGE") + } + } + if len(responseMods) > 0 { + out, _ = sjson.SetBytes(out, "request.generationConfig.responseModalities", responseMods) + } + } + + // OpenRouter-style image_config support + // If the input uses top-level image_config.aspect_ratio, map it into request.generationConfig.imageConfig.aspectRatio. + if imgCfg := gjson.GetBytes(rawJSON, "image_config"); imgCfg.Exists() && imgCfg.IsObject() { + if ar := imgCfg.Get("aspect_ratio"); ar.Exists() && ar.Type == gjson.String { + out, _ = sjson.SetBytes(out, "request.generationConfig.imageConfig.aspectRatio", ar.Str) + } + } + + // messages -> systemInstruction + contents + messages := gjson.GetBytes(rawJSON, "messages") + if messages.IsArray() { + arr := messages.Array() + // First pass: assistant tool_calls id->name map + tcID2Name := map[string]string{} + for i := 0; i < len(arr); i++ { + m := arr[i] + if m.Get("role").String() == "assistant" { + tcs := m.Get("tool_calls") + if tcs.IsArray() { + for _, tc := range tcs.Array() { + if tc.Get("type").String() == "function" { + id := tc.Get("id").String() + name := tc.Get("function.name").String() + if id != "" && name != "" { + tcID2Name[id] = name + } + } + } + } + } + } + + // Second pass build systemInstruction/tool responses cache + toolResponses := map[string]string{} // tool_call_id -> response text + for i := 0; i < len(arr); i++ { + m := arr[i] + role := m.Get("role").String() + if role == "tool" { + toolCallID := m.Get("tool_call_id").String() + if toolCallID != "" { + c := m.Get("content") + toolResponses[toolCallID] = c.Raw + } + } + } + + for i := 0; i < len(arr); i++ { + m := arr[i] + role := m.Get("role").String() + content := m.Get("content") + + if role == "system" && len(arr) > 1 { + // system -> request.systemInstruction as a user message style + if content.Type == gjson.String { + out, _ = sjson.SetBytes(out, "request.systemInstruction.role", "user") + out, _ = sjson.SetBytes(out, "request.systemInstruction.parts.0.text", content.String()) + } else if content.IsObject() && content.Get("type").String() == "text" { + out, _ = sjson.SetBytes(out, "request.systemInstruction.role", "user") + out, _ = sjson.SetBytes(out, "request.systemInstruction.parts.0.text", content.Get("text").String()) + } + } else if role == "user" || (role == "system" && len(arr) == 1) { + // Build single user content node to avoid splitting into multiple contents + node := []byte(`{"role":"user","parts":[]}`) + if content.Type == gjson.String { + node, _ = sjson.SetBytes(node, "parts.0.text", content.String()) + } else if content.IsArray() { + items := content.Array() + p := 0 + for _, item := range items { + switch item.Get("type").String() { + case "text": + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".text", item.Get("text").String()) + p++ + case "image_url": + imageURL := item.Get("image_url.url").String() + if len(imageURL) > 5 { + pieces := strings.SplitN(imageURL[5:], ";", 2) + if len(pieces) == 2 && len(pieces[1]) > 7 { + mime := pieces[0] + data := pieces[1][7:] + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".inlineData.mime_type", mime) + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".inlineData.data", data) + p++ + } + } + case "file": + filename := item.Get("file.filename").String() + fileData := item.Get("file.file_data").String() + ext := "" + if sp := strings.Split(filename, "."); len(sp) > 1 { + ext = sp[len(sp)-1] + } + if mimeType, ok := misc.MimeTypes[ext]; ok { + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".inlineData.mime_type", mimeType) + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".inlineData.data", fileData) + p++ + } else { + log.Warnf("Unknown file name extension '%s' in user message, skip", ext) + } + } + } + } + out, _ = sjson.SetRawBytes(out, "request.contents.-1", node) + } else if role == "assistant" { + if content.Type == gjson.String { + // Assistant text -> single model content + node := []byte(`{"role":"model","parts":[{"text":""}]}`) + node, _ = sjson.SetBytes(node, "parts.0.text", content.String()) + out, _ = sjson.SetRawBytes(out, "request.contents.-1", node) + } else if !content.Exists() || content.Type == gjson.Null { + // Tool calls -> single model content with functionCall parts + tcs := m.Get("tool_calls") + if tcs.IsArray() { + node := []byte(`{"role":"model","parts":[]}`) + p := 0 + fIDs := make([]string, 0) + for _, tc := range tcs.Array() { + if tc.Get("type").String() != "function" { + continue + } + fid := tc.Get("id").String() + fname := tc.Get("function.name").String() + fargs := tc.Get("function.arguments").String() + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".functionCall.name", fname) + node, _ = sjson.SetRawBytes(node, "parts."+itoa(p)+".functionCall.args", []byte(fargs)) + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".thoughtSignature", geminiCLIFunctionThoughtSignature) + p++ + if fid != "" { + fIDs = append(fIDs, fid) + } + } + out, _ = sjson.SetRawBytes(out, "request.contents.-1", node) + + // Append a single tool content combining name + response per function + toolNode := []byte(`{"role":"tool","parts":[]}`) + pp := 0 + for _, fid := range fIDs { + if name, ok := tcID2Name[fid]; ok { + toolNode, _ = sjson.SetBytes(toolNode, "parts."+itoa(pp)+".functionResponse.name", name) + resp := toolResponses[fid] + if resp == "" { + resp = "{}" + } + toolNode, _ = sjson.SetBytes(toolNode, "parts."+itoa(pp)+".functionResponse.response.result", []byte(resp)) + pp++ + } + } + if pp > 0 { + out, _ = sjson.SetRawBytes(out, "request.contents.-1", toolNode) + } + } + } + } + } + } + + // tools -> request.tools[0].functionDeclarations + tools := gjson.GetBytes(rawJSON, "tools") + if tools.IsArray() && len(tools.Array()) > 0 { + out, _ = sjson.SetRawBytes(out, "request.tools", []byte(`[{"functionDeclarations":[]}]`)) + fdPath := "request.tools.0.functionDeclarations" + for _, t := range tools.Array() { + if t.Get("type").String() == "function" { + fn := t.Get("function") + if fn.Exists() && fn.IsObject() { + fnRaw := fn.Raw + if fn.Get("parameters").Exists() { + renamed, errRename := util.RenameKey(fnRaw, "parameters", "parametersJsonSchema") + if errRename != nil { + log.Warnf("Failed to rename parameters for tool '%s': %v", fn.Get("name").String(), errRename) + var errSet error + fnRaw, errSet = sjson.Set(fnRaw, "parametersJsonSchema.type", "object") + if errSet != nil { + log.Warnf("Failed to set default schema type for tool '%s': %v", fn.Get("name").String(), errSet) + continue + } + fnRaw, errSet = sjson.Set(fnRaw, "parametersJsonSchema.properties", map[string]interface{}{}) + if errSet != nil { + log.Warnf("Failed to set default schema properties for tool '%s': %v", fn.Get("name").String(), errSet) + continue + } + } else { + fnRaw = renamed + } + } else { + var errSet error + fnRaw, errSet = sjson.Set(fnRaw, "parametersJsonSchema.type", "object") + if errSet != nil { + log.Warnf("Failed to set default schema type for tool '%s': %v", fn.Get("name").String(), errSet) + continue + } + fnRaw, errSet = sjson.Set(fnRaw, "parametersJsonSchema.properties", map[string]interface{}{}) + if errSet != nil { + log.Warnf("Failed to set default schema properties for tool '%s': %v", fn.Get("name").String(), errSet) + continue + } + } + fnRaw, _ = sjson.Delete(fnRaw, "strict") + tmp, errSet := sjson.SetRawBytes(out, fdPath+".-1", []byte(fnRaw)) + if errSet != nil { + log.Warnf("Failed to append tool declaration for '%s': %v", fn.Get("name").String(), errSet) + continue + } + out = tmp + } + } + } + } + + return common.AttachDefaultSafetySettings(out, "request.safetySettings") +} + +// itoa converts int to string without strconv import for few usages. +func itoa(i int) string { return fmt.Sprintf("%d", i) } + +// quoteIfNeeded ensures a string is valid JSON value (quotes plain text), pass-through for JSON objects/arrays. +func quoteIfNeeded(s string) string { + s = strings.TrimSpace(s) + if s == "" { + return "\"\"" + } + if len(s) > 0 && (s[0] == '{' || s[0] == '[') { + return s + } + // escape quotes minimally + s = strings.ReplaceAll(s, "\\", "\\\\") + s = strings.ReplaceAll(s, "\"", "\\\"") + return "\"" + s + "\"" +} diff --git a/internal/translator/antigravity/openai/chat-completions/antigravity_openai_response.go b/internal/translator/antigravity/openai/chat-completions/antigravity_openai_response.go new file mode 100644 index 00000000..cd6f4043 --- /dev/null +++ b/internal/translator/antigravity/openai/chat-completions/antigravity_openai_response.go @@ -0,0 +1,222 @@ +// Package openai provides response translation functionality for Gemini CLI to OpenAI API compatibility. +// This package handles the conversion of Gemini CLI API responses into OpenAI Chat Completions-compatible +// JSON format, transforming streaming events and non-streaming responses into the format +// expected by OpenAI API clients. It supports both streaming and non-streaming modes, +// handling text content, tool calls, reasoning content, and usage metadata appropriately. +package chat_completions + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "time" + + . "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/chat-completions" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" +) + +// convertCliResponseToOpenAIChatParams holds parameters for response conversion. +type convertCliResponseToOpenAIChatParams struct { + UnixTimestamp int64 + FunctionIndex int +} + +// ConvertAntigravityResponseToOpenAI translates a single chunk of a streaming response from the +// Gemini CLI API format to the OpenAI Chat Completions streaming format. +// It processes various Gemini CLI event types and transforms them into OpenAI-compatible JSON responses. +// The function handles text content, tool calls, reasoning content, and usage metadata, outputting +// responses that match the OpenAI API format. It supports incremental updates for streaming responses. +// +// Parameters: +// - ctx: The context for the request, used for cancellation and timeout handling +// - modelName: The name of the model being used for the response (unused in current implementation) +// - rawJSON: The raw JSON response from the Gemini CLI API +// - param: A pointer to a parameter object for maintaining state between calls +// +// Returns: +// - []string: A slice of strings, each containing an OpenAI-compatible JSON response +func ConvertAntigravityResponseToOpenAI(_ context.Context, _ string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) []string { + if *param == nil { + *param = &convertCliResponseToOpenAIChatParams{ + UnixTimestamp: 0, + FunctionIndex: 0, + } + } + + if bytes.Equal(rawJSON, []byte("[DONE]")) { + return []string{} + } + + // Initialize the OpenAI SSE template. + template := `{"id":"","object":"chat.completion.chunk","created":12345,"model":"model","choices":[{"index":0,"delta":{"role":null,"content":null,"reasoning_content":null,"tool_calls":null},"finish_reason":null,"native_finish_reason":null}]}` + + // Extract and set the model version. + if modelVersionResult := gjson.GetBytes(rawJSON, "response.modelVersion"); modelVersionResult.Exists() { + template, _ = sjson.Set(template, "model", modelVersionResult.String()) + } + + // Extract and set the creation timestamp. + if createTimeResult := gjson.GetBytes(rawJSON, "response.createTime"); createTimeResult.Exists() { + t, err := time.Parse(time.RFC3339Nano, createTimeResult.String()) + if err == nil { + (*param).(*convertCliResponseToOpenAIChatParams).UnixTimestamp = t.Unix() + } + template, _ = sjson.Set(template, "created", (*param).(*convertCliResponseToOpenAIChatParams).UnixTimestamp) + } else { + template, _ = sjson.Set(template, "created", (*param).(*convertCliResponseToOpenAIChatParams).UnixTimestamp) + } + + // Extract and set the response ID. + if responseIDResult := gjson.GetBytes(rawJSON, "response.responseId"); responseIDResult.Exists() { + template, _ = sjson.Set(template, "id", responseIDResult.String()) + } + + // Extract and set the finish reason. + if finishReasonResult := gjson.GetBytes(rawJSON, "response.candidates.0.finishReason"); finishReasonResult.Exists() { + template, _ = sjson.Set(template, "choices.0.finish_reason", finishReasonResult.String()) + template, _ = sjson.Set(template, "choices.0.native_finish_reason", finishReasonResult.String()) + } + + // Extract and set usage metadata (token counts). + if usageResult := gjson.GetBytes(rawJSON, "response.usageMetadata"); usageResult.Exists() { + if candidatesTokenCountResult := usageResult.Get("candidatesTokenCount"); candidatesTokenCountResult.Exists() { + template, _ = sjson.Set(template, "usage.completion_tokens", candidatesTokenCountResult.Int()) + } + if totalTokenCountResult := usageResult.Get("totalTokenCount"); totalTokenCountResult.Exists() { + template, _ = sjson.Set(template, "usage.total_tokens", totalTokenCountResult.Int()) + } + promptTokenCount := usageResult.Get("promptTokenCount").Int() + thoughtsTokenCount := usageResult.Get("thoughtsTokenCount").Int() + template, _ = sjson.Set(template, "usage.prompt_tokens", promptTokenCount+thoughtsTokenCount) + if thoughtsTokenCount > 0 { + template, _ = sjson.Set(template, "usage.completion_tokens_details.reasoning_tokens", thoughtsTokenCount) + } + } + + // Process the main content part of the response. + partsResult := gjson.GetBytes(rawJSON, "response.candidates.0.content.parts") + hasFunctionCall := false + hasValidContent := false + if partsResult.IsArray() { + partResults := partsResult.Array() + for i := 0; i < len(partResults); i++ { + partResult := partResults[i] + partTextResult := partResult.Get("text") + functionCallResult := partResult.Get("functionCall") + thoughtSignatureResult := partResult.Get("thoughtSignature") + inlineDataResult := partResult.Get("inlineData") + if !inlineDataResult.Exists() { + inlineDataResult = partResult.Get("inline_data") + } + + // Handle thoughtSignature - this is encrypted reasoning content that should not be exposed to the client + if thoughtSignatureResult.Exists() && thoughtSignatureResult.String() != "" { + // Skip thoughtSignature processing - it's internal encrypted data + continue + } + + if partTextResult.Exists() { + textContent := partTextResult.String() + // Skip empty text content to avoid generating unnecessary chunks + if textContent == "" { + continue + } + + // Handle text content, distinguishing between regular content and reasoning/thoughts. + if partResult.Get("thought").Bool() { + template, _ = sjson.Set(template, "choices.0.delta.reasoning_content", textContent) + } else { + template, _ = sjson.Set(template, "choices.0.delta.content", textContent) + } + template, _ = sjson.Set(template, "choices.0.delta.role", "assistant") + hasValidContent = true + } else if functionCallResult.Exists() { + // Handle function call content. + hasFunctionCall = true + toolCallsResult := gjson.Get(template, "choices.0.delta.tool_calls") + functionCallIndex := (*param).(*convertCliResponseToOpenAIChatParams).FunctionIndex + (*param).(*convertCliResponseToOpenAIChatParams).FunctionIndex++ + if toolCallsResult.Exists() && toolCallsResult.IsArray() { + functionCallIndex = len(toolCallsResult.Array()) + } else { + template, _ = sjson.SetRaw(template, "choices.0.delta.tool_calls", `[]`) + } + + functionCallTemplate := `{"id": "","index": 0,"type": "function","function": {"name": "","arguments": ""}}` + fcName := functionCallResult.Get("name").String() + functionCallTemplate, _ = sjson.Set(functionCallTemplate, "id", fmt.Sprintf("%s-%d", fcName, time.Now().UnixNano())) + functionCallTemplate, _ = sjson.Set(functionCallTemplate, "index", functionCallIndex) + functionCallTemplate, _ = sjson.Set(functionCallTemplate, "function.name", fcName) + if fcArgsResult := functionCallResult.Get("args"); fcArgsResult.Exists() { + functionCallTemplate, _ = sjson.Set(functionCallTemplate, "function.arguments", fcArgsResult.Raw) + } + template, _ = sjson.Set(template, "choices.0.delta.role", "assistant") + template, _ = sjson.SetRaw(template, "choices.0.delta.tool_calls.-1", functionCallTemplate) + } else if inlineDataResult.Exists() { + data := inlineDataResult.Get("data").String() + if data == "" { + continue + } + mimeType := inlineDataResult.Get("mimeType").String() + if mimeType == "" { + mimeType = inlineDataResult.Get("mime_type").String() + } + if mimeType == "" { + mimeType = "image/png" + } + imageURL := fmt.Sprintf("data:%s;base64,%s", mimeType, data) + imagePayload, err := json.Marshal(map[string]any{ + "type": "image_url", + "image_url": map[string]string{ + "url": imageURL, + }, + }) + if err != nil { + continue + } + imagesResult := gjson.Get(template, "choices.0.delta.images") + if !imagesResult.Exists() || !imagesResult.IsArray() { + template, _ = sjson.SetRaw(template, "choices.0.delta.images", `[]`) + } + template, _ = sjson.Set(template, "choices.0.delta.role", "assistant") + template, _ = sjson.SetRaw(template, "choices.0.delta.images.-1", string(imagePayload)) + } + } + } + + if hasFunctionCall { + template, _ = sjson.Set(template, "choices.0.finish_reason", "tool_calls") + template, _ = sjson.Set(template, "choices.0.native_finish_reason", "tool_calls") + } + + // Only return a chunk if there's actual content or a finish reason + finishReason := gjson.GetBytes(rawJSON, "response.candidates.0.finishReason") + if !hasValidContent && !finishReason.Exists() { + return []string{} + } + + return []string{template} +} + +// ConvertAntigravityResponseToOpenAINonStream converts a non-streaming Gemini CLI response to a non-streaming OpenAI response. +// This function processes the complete Gemini CLI response and transforms it into a single OpenAI-compatible +// JSON response. It handles message content, tool calls, reasoning content, and usage metadata, combining all +// the information into a single response that matches the OpenAI API format. +// +// Parameters: +// - ctx: The context for the request, used for cancellation and timeout handling +// - modelName: The name of the model being used for the response +// - rawJSON: The raw JSON response from the Gemini CLI API +// - param: A pointer to a parameter object for the conversion +// +// Returns: +// - string: An OpenAI-compatible JSON response containing all message content and metadata +func ConvertAntigravityResponseToOpenAINonStream(ctx context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) string { + responseResult := gjson.GetBytes(rawJSON, "response") + if responseResult.Exists() { + return ConvertGeminiResponseToOpenAINonStream(ctx, modelName, originalRequestRawJSON, requestRawJSON, []byte(responseResult.Raw), param) + } + return "" +} diff --git a/internal/translator/antigravity/openai/chat-completions/init.go b/internal/translator/antigravity/openai/chat-completions/init.go new file mode 100644 index 00000000..5c5c71e4 --- /dev/null +++ b/internal/translator/antigravity/openai/chat-completions/init.go @@ -0,0 +1,19 @@ +package chat_completions + +import ( + . "github.com/router-for-me/CLIProxyAPI/v6/internal/constant" + "github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces" + "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator" +) + +func init() { + translator.Register( + OpenAI, + Antigravity, + ConvertOpenAIRequestToAntigravity, + interfaces.TranslateResponse{ + Stream: ConvertAntigravityResponseToOpenAI, + NonStream: ConvertAntigravityResponseToOpenAINonStream, + }, + ) +} diff --git a/internal/translator/antigravity/openai/responses/antigravity_openai-responses_request.go b/internal/translator/antigravity/openai/responses/antigravity_openai-responses_request.go new file mode 100644 index 00000000..cbe4757c --- /dev/null +++ b/internal/translator/antigravity/openai/responses/antigravity_openai-responses_request.go @@ -0,0 +1,14 @@ +package responses + +import ( + "bytes" + + . "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini-cli/gemini" + . "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/responses" +) + +func ConvertOpenAIResponsesRequestToAntigravity(modelName string, inputRawJSON []byte, stream bool) []byte { + rawJSON := bytes.Clone(inputRawJSON) + rawJSON = ConvertOpenAIResponsesRequestToGemini(modelName, rawJSON, stream) + return ConvertGeminiRequestToGeminiCLI(modelName, rawJSON, stream) +} diff --git a/internal/translator/antigravity/openai/responses/antigravity_openai-responses_response.go b/internal/translator/antigravity/openai/responses/antigravity_openai-responses_response.go new file mode 100644 index 00000000..7c416c1f --- /dev/null +++ b/internal/translator/antigravity/openai/responses/antigravity_openai-responses_response.go @@ -0,0 +1,35 @@ +package responses + +import ( + "context" + + . "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/responses" + "github.com/tidwall/gjson" +) + +func ConvertAntigravityResponseToOpenAIResponses(ctx context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) []string { + responseResult := gjson.GetBytes(rawJSON, "response") + if responseResult.Exists() { + rawJSON = []byte(responseResult.Raw) + } + return ConvertGeminiResponseToOpenAIResponses(ctx, modelName, originalRequestRawJSON, requestRawJSON, rawJSON, param) +} + +func ConvertAntigravityResponseToOpenAIResponsesNonStream(ctx context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) string { + responseResult := gjson.GetBytes(rawJSON, "response") + if responseResult.Exists() { + rawJSON = []byte(responseResult.Raw) + } + + requestResult := gjson.GetBytes(originalRequestRawJSON, "request") + if responseResult.Exists() { + originalRequestRawJSON = []byte(requestResult.Raw) + } + + requestResult = gjson.GetBytes(requestRawJSON, "request") + if responseResult.Exists() { + requestRawJSON = []byte(requestResult.Raw) + } + + return ConvertGeminiResponseToOpenAIResponsesNonStream(ctx, modelName, originalRequestRawJSON, requestRawJSON, rawJSON, param) +} diff --git a/internal/translator/antigravity/openai/responses/init.go b/internal/translator/antigravity/openai/responses/init.go new file mode 100644 index 00000000..8d137032 --- /dev/null +++ b/internal/translator/antigravity/openai/responses/init.go @@ -0,0 +1,19 @@ +package responses + +import ( + . "github.com/router-for-me/CLIProxyAPI/v6/internal/constant" + "github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces" + "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator" +) + +func init() { + translator.Register( + OpenaiResponse, + Antigravity, + ConvertOpenAIResponsesRequestToAntigravity, + interfaces.TranslateResponse{ + Stream: ConvertAntigravityResponseToOpenAIResponses, + NonStream: ConvertAntigravityResponseToOpenAIResponsesNonStream, + }, + ) +} diff --git a/internal/translator/init.go b/internal/translator/init.go index 1f430457..084ea7ac 100644 --- a/internal/translator/init.go +++ b/internal/translator/init.go @@ -28,4 +28,9 @@ import ( _ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/openai/gemini-cli" _ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/openai/openai/chat-completions" _ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/openai/openai/responses" + + _ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/antigravity/claude" + _ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/antigravity/gemini" + _ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/antigravity/openai/chat-completions" + _ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/antigravity/openai/responses" )