v6 version first commit

This commit is contained in:
Luis Pater
2025-09-22 01:40:24 +08:00
parent d42384cdb7
commit 4999fce7f4
171 changed files with 7626 additions and 7494 deletions

View File

@@ -0,0 +1,19 @@
package chat_completions
import (
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
"github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator"
)
func init() {
translator.Register(
OPENAI,
OPENAI,
ConvertOpenAIRequestToOpenAI,
interfaces.TranslateResponse{
Stream: ConvertOpenAIResponseToOpenAI,
NonStream: ConvertOpenAIResponseToOpenAINonStream,
},
)
}

View File

@@ -0,0 +1,24 @@
// Package openai provides request translation functionality for OpenAI to Gemini CLI API compatibility.
// It converts OpenAI Chat Completions requests into Gemini CLI compatible JSON using gjson/sjson only.
package chat_completions
import (
"bytes"
log "github.com/sirupsen/logrus"
)
// ConvertOpenAIRequestToOpenAI converts an OpenAI Chat Completions request (raw JSON)
// into a complete Gemini CLI request JSON. All JSON construction uses sjson and lookups use gjson.
//
// Parameters:
// - modelName: The name of the model to use for the request
// - rawJSON: The raw JSON request data from the OpenAI API
// - stream: A boolean indicating if the request is for a streaming response (unused in current implementation)
//
// Returns:
// - []byte: The transformed request data in Gemini CLI API format
func ConvertOpenAIRequestToOpenAI(modelName string, inputRawJSON []byte, _ bool) []byte {
log.Debug("ConvertOpenAIRequestToOpenAI")
return bytes.Clone(inputRawJSON)
}

View File

@@ -0,0 +1,56 @@
// Package openai provides response translation functionality for Gemini CLI to OpenAI API compatibility.
// This package handles the conversion of Gemini CLI API responses into OpenAI Chat Completions-compatible
// JSON format, transforming streaming events and non-streaming responses into the format
// expected by OpenAI API clients. It supports both streaming and non-streaming modes,
// handling text content, tool calls, reasoning content, and usage metadata appropriately.
package chat_completions
import (
"bytes"
"context"
log "github.com/sirupsen/logrus"
)
// ConvertOpenAIResponseToOpenAI translates a single chunk of a streaming response from the
// Gemini CLI API format to the OpenAI Chat Completions streaming format.
// It processes various Gemini CLI event types and transforms them into OpenAI-compatible JSON responses.
// The function handles text content, tool calls, reasoning content, and usage metadata, outputting
// responses that match the OpenAI API format. It supports incremental updates for streaming responses.
//
// Parameters:
// - ctx: The context for the request, used for cancellation and timeout handling
// - modelName: The name of the model being used for the response (unused in current implementation)
// - rawJSON: The raw JSON response from the Gemini CLI API
// - param: A pointer to a parameter object for maintaining state between calls
//
// Returns:
// - []string: A slice of strings, each containing an OpenAI-compatible JSON response
func ConvertOpenAIResponseToOpenAI(_ context.Context, _ string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) []string {
log.Debug("ConvertOpenAIResponseToOpenAI")
if bytes.HasPrefix(rawJSON, []byte("data:")) {
rawJSON = bytes.TrimSpace(rawJSON[5:])
}
if bytes.Equal(rawJSON, []byte("[DONE]")) {
return []string{}
}
return []string{string(rawJSON)}
}
// ConvertOpenAIResponseToOpenAINonStream converts a non-streaming Gemini CLI response to a non-streaming OpenAI response.
// This function processes the complete Gemini CLI response and transforms it into a single OpenAI-compatible
// JSON response. It handles message content, tool calls, reasoning content, and usage metadata, combining all
// the information into a single response that matches the OpenAI API format.
//
// Parameters:
// - ctx: The context for the request, used for cancellation and timeout handling
// - modelName: The name of the model being used for the response
// - rawJSON: The raw JSON response from the Gemini CLI API
// - param: A pointer to a parameter object for the conversion
//
// Returns:
// - string: An OpenAI-compatible JSON response containing all message content and metadata
func ConvertOpenAIResponseToOpenAINonStream(ctx context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) string {
log.Debug("ConvertOpenAIResponseToOpenAINonStream")
return string(rawJSON)
}

View File

@@ -1,9 +1,9 @@
package responses
import (
. "github.com/luispater/CLIProxyAPI/v5/internal/constant"
"github.com/luispater/CLIProxyAPI/v5/internal/interfaces"
"github.com/luispater/CLIProxyAPI/v5/internal/translator/translator"
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
"github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator"
)
func init() {

View File

@@ -3,6 +3,7 @@ package responses
import (
"bytes"
log "github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
@@ -27,6 +28,7 @@ import (
// Returns:
// - []byte: The transformed request data in OpenAI chat completions format
func ConvertOpenAIResponsesRequestToOpenAIChatCompletions(modelName string, inputRawJSON []byte, stream bool) []byte {
log.Debug("ConvertOpenAIResponsesRequestToOpenAIChatCompletions")
rawJSON := bytes.Clone(inputRawJSON)
// Base OpenAI chat completions template with default values
out := `{"model":"","messages":[],"stream":false}`

View File

@@ -1,11 +1,13 @@
package responses
import (
"bytes"
"context"
"fmt"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
@@ -34,12 +36,13 @@ type oaiToResponsesState struct {
}
func emitRespEvent(event string, payload string) string {
return fmt.Sprintf("event: %s\ndata: %s\n\n", event, payload)
return fmt.Sprintf("event: %s\ndata: %s", event, payload)
}
// ConvertOpenAIChatCompletionsResponseToOpenAIResponses converts OpenAI Chat Completions streaming chunks
// to OpenAI Responses SSE events (response.*).
func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) []string {
log.Debug("ConvertOpenAIChatCompletionsResponseToOpenAIResponses")
if *param == nil {
*param = &oaiToResponsesState{
FuncArgsBuf: make(map[int]*strings.Builder),
@@ -55,6 +58,10 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
}
st := (*param).(*oaiToResponsesState)
if bytes.HasPrefix(rawJSON, []byte("data:")) {
rawJSON = bytes.TrimSpace(rawJSON[5:])
}
root := gjson.ParseBytes(rawJSON)
obj := root.Get("object").String()
if obj != "chat.completion.chunk" {
@@ -511,6 +518,7 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
// ConvertOpenAIChatCompletionsResponseToOpenAIResponsesNonStream builds a single Responses JSON
// from a non-streaming OpenAI Chat Completions response.
func ConvertOpenAIChatCompletionsResponseToOpenAIResponsesNonStream(_ context.Context, _ string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
log.Debug("ConvertOpenAIChatCompletionsResponseToOpenAIResponsesNonStream")
root := gjson.ParseBytes(rawJSON)
// Basic response scaffold
@@ -540,7 +548,7 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponsesNonStream(_ context.Co
resp, _ = sjson.Set(resp, "max_output_tokens", v.Int())
} else {
// Also support max_tokens from chat completion style
if v := req.Get("max_tokens"); v.Exists() {
if v = req.Get("max_tokens"); v.Exists() {
resp, _ = sjson.Set(resp, "max_output_tokens", v.Int())
}
}
@@ -549,7 +557,7 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponsesNonStream(_ context.Co
}
if v := req.Get("model"); v.Exists() {
resp, _ = sjson.Set(resp, "model", v.String())
} else if v := root.Get("model"); v.Exists() {
} else if v = root.Get("model"); v.Exists() {
resp, _ = sjson.Set(resp, "model", v.String())
}
if v := req.Get("parallel_tool_calls"); v.Exists() {