mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-02 20:40:52 +08:00
- Modularized handlers into dedicated packages (`gemini`, `claude`, `cli`) for better structure. - Centralized `ErrorResponse` and `ErrorDetail` types under `handlers` package for reuse. - Updated all handlers to utilize the shared `ErrorResponse` model. - Introduced specialization of handler structs (`GeminiAPIHandlers`, `ClaudeCodeAPIHandlers`, `GeminiCLIAPIHandlers`) for improved clarity and separation of concerns. - Refactored `getClient` logic with additional properties and better state management. Refactor `translator` package by modularizing code for `claude` and `gemini` - Moved Claude-specific logic (`PrepareClaudeRequest`, `ConvertCliToClaude`) to `translator/claude/code`. - Moved Gemini-specific logic (`FixCLIToolResponse`) to `translator/gemini/cli` for better package structure. - Updated affected handler imports and method references. Add comprehensive package-level documentation across key modules - Introduced detailed package-level documentation for core modules: `auth`, `client`, `cmd`, `handlers`, `util`, `watcher`, `config`, `translator`, and `api`. - Enhanced code readability and maintainability by clarifying the purpose and functionality of each package. - Aligned documentation style and tone with existing codebase conventions. Refactor API handlers and translator modules for improved clarity and consistency - Standardized handler struct names (`GeminiAPIHandlers`, `ClaudeCodeAPIHandlers`, `GeminiCLIAPIHandlers`, `OpenAIAPIHandlers`) and updated related comments. - Fixed unnecessary `else` blocks in streaming logic for cleaner error handling. - Renamed variables for better readability (`responseIdResult` to `responseIDResult`, `activationUrl` to `activationURL`, etc.). - Addressed minor inconsistencies in API handler comments and SSE header initialization. - Improved modularization of `claude` and `gemini` translator components. Standardize configuration field naming for consistency across modules - Renamed `ProxyUrl` to `ProxyURL`, `ApiKeys` to `APIKeys`, and `ConfigQuotaExceeded` to `QuotaExceeded`. - Updated all relevant references and comments in `config`, `auth`, `api`, `util`, and `watcher`. - Ensured consistent casing for `GlAPIKey` debug logs.
265 lines
8.3 KiB
Go
265 lines
8.3 KiB
Go
// Package openai provides HTTP handlers for OpenAI API endpoints.
|
|
// This package implements the OpenAI-compatible API interface, including model listing
|
|
// and chat completion functionality. It supports both streaming and non-streaming responses,
|
|
// and manages a pool of clients to interact with backend services.
|
|
// The handlers translate OpenAI API requests to the appropriate backend format and
|
|
// convert responses back to OpenAI-compatible format.
|
|
package openai
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"github.com/luispater/CLIProxyAPI/internal/api/handlers"
|
|
"github.com/luispater/CLIProxyAPI/internal/api/translator/openai"
|
|
"github.com/luispater/CLIProxyAPI/internal/client"
|
|
log "github.com/sirupsen/logrus"
|
|
"github.com/tidwall/gjson"
|
|
"net/http"
|
|
"time"
|
|
|
|
"github.com/gin-gonic/gin"
|
|
)
|
|
|
|
// OpenAIAPIHandlers contains the handlers for OpenAI API endpoints.
|
|
// It holds a pool of clients to interact with the backend service.
|
|
type OpenAIAPIHandlers struct {
|
|
*handlers.APIHandlers
|
|
}
|
|
|
|
// NewOpenAIAPIHandlers creates a new OpenAI API handlers instance.
|
|
// It takes an APIHandlers instance as input and returns an OpenAIAPIHandlers.
|
|
func NewOpenAIAPIHandlers(apiHandlers *handlers.APIHandlers) *OpenAIAPIHandlers {
|
|
return &OpenAIAPIHandlers{
|
|
APIHandlers: apiHandlers,
|
|
}
|
|
}
|
|
|
|
// Models handles the /v1/models endpoint.
|
|
// It returns a hardcoded list of available AI models.
|
|
func (h *OpenAIAPIHandlers) Models(c *gin.Context) {
|
|
c.JSON(http.StatusOK, gin.H{
|
|
"data": []map[string]any{
|
|
{
|
|
"id": "gemini-2.5-pro",
|
|
"object": "model",
|
|
"version": "2.5",
|
|
"name": "Gemini 2.5 Pro",
|
|
"description": "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
|
"context_length": 1048576,
|
|
"max_completion_tokens": 65536,
|
|
"supported_parameters": []string{
|
|
"tools",
|
|
"temperature",
|
|
"top_p",
|
|
"top_k",
|
|
},
|
|
"temperature": 1,
|
|
"topP": 0.95,
|
|
"topK": 64,
|
|
"maxTemperature": 2,
|
|
"thinking": true,
|
|
},
|
|
{
|
|
"id": "gemini-2.5-flash",
|
|
"object": "model",
|
|
"version": "001",
|
|
"name": "Gemini 2.5 Flash",
|
|
"description": "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
|
"context_length": 1048576,
|
|
"max_completion_tokens": 65536,
|
|
"supported_parameters": []string{
|
|
"tools",
|
|
"temperature",
|
|
"top_p",
|
|
"top_k",
|
|
},
|
|
"temperature": 1,
|
|
"topP": 0.95,
|
|
"topK": 64,
|
|
"maxTemperature": 2,
|
|
"thinking": true,
|
|
},
|
|
},
|
|
})
|
|
}
|
|
|
|
// ChatCompletions handles the /v1/chat/completions endpoint.
|
|
// It determines whether the request is for a streaming or non-streaming response
|
|
// and calls the appropriate handler.
|
|
func (h *OpenAIAPIHandlers) ChatCompletions(c *gin.Context) {
|
|
rawJSON, err := c.GetRawData()
|
|
// If data retrieval fails, return a 400 Bad Request error.
|
|
if err != nil {
|
|
c.JSON(http.StatusBadRequest, handlers.ErrorResponse{
|
|
Error: handlers.ErrorDetail{
|
|
Message: fmt.Sprintf("Invalid request: %v", err),
|
|
Type: "invalid_request_error",
|
|
},
|
|
})
|
|
return
|
|
}
|
|
|
|
// Check if the client requested a streaming response.
|
|
streamResult := gjson.GetBytes(rawJSON, "stream")
|
|
if streamResult.Type == gjson.True {
|
|
h.handleStreamingResponse(c, rawJSON)
|
|
} else {
|
|
h.handleNonStreamingResponse(c, rawJSON)
|
|
}
|
|
}
|
|
|
|
// handleNonStreamingResponse handles non-streaming chat completion responses.
|
|
// It selects a client from the pool, sends the request, and aggregates the response
|
|
// before sending it back to the client.
|
|
func (h *OpenAIAPIHandlers) handleNonStreamingResponse(c *gin.Context, rawJSON []byte) {
|
|
c.Header("Content-Type", "application/json")
|
|
|
|
modelName, systemInstruction, contents, tools := openai.PrepareRequest(rawJSON)
|
|
cliCtx, cliCancel := context.WithCancel(context.Background())
|
|
var cliClient *client.Client
|
|
defer func() {
|
|
if cliClient != nil {
|
|
cliClient.RequestMutex.Unlock()
|
|
}
|
|
}()
|
|
|
|
for {
|
|
var errorResponse *client.ErrorMessage
|
|
cliClient, errorResponse = h.GetClient(modelName)
|
|
if errorResponse != nil {
|
|
c.Status(errorResponse.StatusCode)
|
|
_, _ = fmt.Fprint(c.Writer, errorResponse.Error)
|
|
cliCancel()
|
|
return
|
|
}
|
|
|
|
isGlAPIKey := false
|
|
if glAPIKey := cliClient.GetGenerativeLanguageAPIKey(); glAPIKey != "" {
|
|
log.Debugf("Request use generative language API Key: %s", glAPIKey)
|
|
isGlAPIKey = true
|
|
} else {
|
|
log.Debugf("Request use account: %s, project id: %s", cliClient.GetEmail(), cliClient.GetProjectID())
|
|
}
|
|
|
|
resp, err := cliClient.SendMessage(cliCtx, rawJSON, modelName, systemInstruction, contents, tools)
|
|
if err != nil {
|
|
if err.StatusCode == 429 && h.Cfg.QuotaExceeded.SwitchProject {
|
|
continue
|
|
} else {
|
|
c.Status(err.StatusCode)
|
|
_, _ = c.Writer.Write([]byte(err.Error.Error()))
|
|
cliCancel()
|
|
}
|
|
break
|
|
} else {
|
|
openAIFormat := openai.ConvertCliToOpenAINonStream(resp, time.Now().Unix(), isGlAPIKey)
|
|
if openAIFormat != "" {
|
|
_, _ = c.Writer.Write([]byte(openAIFormat))
|
|
}
|
|
cliCancel()
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
// handleStreamingResponse handles streaming responses
|
|
func (h *OpenAIAPIHandlers) handleStreamingResponse(c *gin.Context, rawJSON []byte) {
|
|
c.Header("Content-Type", "text/event-stream")
|
|
c.Header("Cache-Control", "no-cache")
|
|
c.Header("Connection", "keep-alive")
|
|
c.Header("Access-Control-Allow-Origin", "*")
|
|
|
|
// Get the http.Flusher interface to manually flush the response.
|
|
flusher, ok := c.Writer.(http.Flusher)
|
|
if !ok {
|
|
c.JSON(http.StatusInternalServerError, handlers.ErrorResponse{
|
|
Error: handlers.ErrorDetail{
|
|
Message: "Streaming not supported",
|
|
Type: "server_error",
|
|
},
|
|
})
|
|
return
|
|
}
|
|
|
|
// Prepare the request for the backend client.
|
|
modelName, systemInstruction, contents, tools := openai.PrepareRequest(rawJSON)
|
|
cliCtx, cliCancel := context.WithCancel(context.Background())
|
|
var cliClient *client.Client
|
|
defer func() {
|
|
// Ensure the client's mutex is unlocked on function exit.
|
|
if cliClient != nil {
|
|
cliClient.RequestMutex.Unlock()
|
|
}
|
|
}()
|
|
|
|
outLoop:
|
|
for {
|
|
var errorResponse *client.ErrorMessage
|
|
cliClient, errorResponse = h.GetClient(modelName)
|
|
if errorResponse != nil {
|
|
c.Status(errorResponse.StatusCode)
|
|
_, _ = fmt.Fprint(c.Writer, errorResponse.Error)
|
|
flusher.Flush()
|
|
cliCancel()
|
|
return
|
|
}
|
|
|
|
isGlAPIKey := false
|
|
if glAPIKey := cliClient.GetGenerativeLanguageAPIKey(); glAPIKey != "" {
|
|
log.Debugf("Request use generative language API Key: %s", glAPIKey)
|
|
isGlAPIKey = true
|
|
} else {
|
|
log.Debugf("Request use account: %s, project id: %s", cliClient.GetEmail(), cliClient.GetProjectID())
|
|
}
|
|
// Send the message and receive response chunks and errors via channels.
|
|
respChan, errChan := cliClient.SendMessageStream(cliCtx, rawJSON, modelName, systemInstruction, contents, tools)
|
|
hasFirstResponse := false
|
|
for {
|
|
select {
|
|
// Handle client disconnection.
|
|
case <-c.Request.Context().Done():
|
|
if c.Request.Context().Err().Error() == "context canceled" {
|
|
log.Debugf("Client disconnected: %v", c.Request.Context().Err())
|
|
cliCancel() // Cancel the backend request.
|
|
return
|
|
}
|
|
// Process incoming response chunks.
|
|
case chunk, okStream := <-respChan:
|
|
if !okStream {
|
|
// Stream is closed, send the final [DONE] message.
|
|
_, _ = fmt.Fprintf(c.Writer, "data: [DONE]\n\n")
|
|
flusher.Flush()
|
|
cliCancel()
|
|
return
|
|
}
|
|
// Convert the chunk to OpenAI format and send it to the client.
|
|
hasFirstResponse = true
|
|
openAIFormat := openai.ConvertCliToOpenAI(chunk, time.Now().Unix(), isGlAPIKey)
|
|
if openAIFormat != "" {
|
|
_, _ = fmt.Fprintf(c.Writer, "data: %s\n\n", openAIFormat)
|
|
flusher.Flush()
|
|
}
|
|
// Handle errors from the backend.
|
|
case err, okError := <-errChan:
|
|
if okError {
|
|
if err.StatusCode == 429 && h.Cfg.QuotaExceeded.SwitchProject {
|
|
continue outLoop
|
|
} else {
|
|
c.Status(err.StatusCode)
|
|
_, _ = fmt.Fprint(c.Writer, err.Error.Error())
|
|
flusher.Flush()
|
|
cliCancel()
|
|
}
|
|
return
|
|
}
|
|
// Send a keep-alive signal to the client.
|
|
case <-time.After(500 * time.Millisecond):
|
|
if hasFirstResponse {
|
|
_, _ = c.Writer.Write([]byte(": CLI-PROXY-API PROCESSING\n\n"))
|
|
flusher.Flush()
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|