Compare commits

...

10 Commits

Author SHA1 Message Date
Luis Pater
3b2ab0d7bd Fix SSE headers initialization for geminiStreamGenerateContent and internalStreamGenerateContent
- Added conditional logic to properly initialize SSE headers only when `alt` is empty.
- Ensured headers like `Content-Type`, `Cache-Control`, and `Access-Control-Allow-Origin` are set for better compatibility.
2025-07-26 17:16:55 +08:00
Luis Pater
e64fa48823 Enhance Gemini request handling with fallback support for contents
- Added conditional logic to support `contents` as a fallback to `generateContentRequest`.
- Improved template construction and ensured proper cleanup of request fields.
- Introduced debug logging for troubleshooting request generation.
2025-07-26 17:04:14 +08:00
Luis Pater
beff9282f6 Fix alt parameter handling in URL construction
- Ensured `alt` parameter is only appended when non-empty.
- Added debug logging for constructed URLs.
2025-07-26 15:51:04 +08:00
Luis Pater
31a9e2d11f Add GeminiGetHandler, enhance Gemini functionality, and enable token counting
- Added `GeminiGetHandler` for handling GET requests with extended Gemini model support.
- Introduced `geminiCountTokens` function to calculate token usage.
- Refactored `APIRequest` and related methods to support `alt` parameter for enhanced flexibility.
- Updated routes and request processing to integrate new handler and functions.
2025-07-26 06:51:49 +08:00
Luis Pater
423faae3da Add GeminiModels handler and enhance API key validation
- Introduced `GeminiModels` handler to serve Gemini model information under `/v1beta/models`.
- Updated `AuthMiddleware` to validate API keys from query parameters for improved flexibility.
- Adjusted route to use the new handler for model retrieval.
2025-07-26 04:41:55 +08:00
Luis Pater
ead71fb7ef Improve error logging and add user guidance for issue reporting
- Added fatal log in `login.go` for Cloud AI API enablement check failures, prompting users to report issues.
- Enhanced error logging in `client.go` with warning messages directing users to copy and provide error details when creating issues.
2025-07-24 04:51:09 +08:00
Luis Pater
58b7afdf1e Enhance HTTP server with custom multiplexer in Auth flow
- Replaced default `http` handler with `http.ServeMux` for improved routing control.
- Refactored callback handling to utilize the custom multiplexer.
2025-07-23 05:09:05 +08:00
Luis Pater
c86545d7e1 Add Chinese README and update project files
- Introduced `README_CN.md` to provide detailed documentation in Chinese.
- Updated `.goreleaser.yml` to include the new README file in release assets.
- Enhanced `README.md` with a language toggle link for improved accessibility.
2025-07-21 11:23:13 +08:00
Luis Pater
f49a530c1a Refactor client handling and improve error responses
- Centralized client retrieval logic with `getClient` function for reduced redundancy.
- Simplified client rotation and error handling by removing excessive load balancing logic.
- Updated server address in `auth.go` to use dynamic binding (`:8085`).
2025-07-15 17:03:18 +08:00
Luis Pater
368796349e Add Docker support with CI/CD workflow and usage instructions
- Added `.github/workflows/docker-image.yml` for automated Docker image build and push on version tags.
- Created `Dockerfile` to containerize the application.
- Updated README with instructions for running the application using Docker.
2025-07-14 16:50:51 +08:00
13 changed files with 666 additions and 111 deletions

42
.github/workflows/docker-image.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: docker-image
on:
push:
tags:
- v*
env:
APP_NAME: CLIProxyAPI
DOCKERHUB_REPO: eceasy/cli-proxy-api
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Generate App Version
run: echo APP_VERSION=`git describe --tags --always` >> $GITHUB_ENV
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
platforms: |
linux/amd64
linux/arm64
push: true
build-args: |
APP_NAME=${{ env.APP_NAME }}
APP_VERSION=${{ env.APP_VERSION }}
tags: |
${{ env.DOCKERHUB_REPO }}:latest
${{ env.DOCKERHUB_REPO }}:${{ env.APP_VERSION }}

View File

@@ -14,4 +14,5 @@ archives:
files:
- LICENSE
- README.md
- README_CN.md
- config.yaml

23
Dockerfile Normal file
View File

@@ -0,0 +1,23 @@
FROM golang:1.24-alpine AS builder
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -o ./CLIProxyAPI ./cmd/server/
FROM alpine:3.22.0
RUN mkdir /CLIProxyAPI
COPY --from=builder ./app/CLIProxyAPI /CLIProxyAPI/CLIProxyAPI
WORKDIR /CLIProxyAPI
EXPOSE 8317
CMD ["./CLIProxyAPI"]

View File

@@ -1,5 +1,7 @@
# CLI Proxy API
English | [中文](README_CN.md)
A proxy server that provides an OpenAI/Gemini/Claude compatible API interface for CLI. This allows you to use CLI models with tools and libraries designed for the OpenAI/Gemini/Claude API.
## Features
@@ -148,14 +150,17 @@ The server uses a YAML configuration file (`config.yaml`) located in the project
### Configuration Options
| Parameter | Type | Default | Description |
|-------------------------------|----------|--------------------|----------------------------------------------------------------------------------------------|
| `port` | integer | 8317 | The port number on which the server will listen |
| `auth-dir` | string | "~/.cli-proxy-api" | Directory where authentication tokens are stored. Supports using `~` for home directory |
| `proxy-url` | string | "" | Proxy url, support socks5/http/https protocol, example: socks5://user:pass@192.168.1.1:1080/ |
| `debug` | boolean | false | Enable debug mode for verbose logging |
| `api-keys` | string[] | [] | List of API keys that can be used to authenticate requests |
| `generative-language-api-key` | string[] | [] | List of Generative Language API keys |
| Parameter | Type | Default | Description |
|---------------------------------------|----------|--------------------|----------------------------------------------------------------------------------------------|
| `port` | integer | 8317 | The port number on which the server will listen |
| `auth-dir` | string | "~/.cli-proxy-api" | Directory where authentication tokens are stored. Supports using `~` for home directory |
| `proxy-url` | string | "" | Proxy url, support socks5/http/https protocol, example: socks5://user:pass@192.168.1.1:1080/ |
| `quota-exceeded` | object | {} | Configuration for handling quota exceeded |
| `quota-exceeded.switch-project` | boolean | true | Whether to automatically switch to another project when a quota is exceeded |
| `quota-exceeded.switch-preview-model` | boolean | true | Whether to automatically switch to a preview model when a quota is exceeded |
| `debug` | boolean | false | Enable debug mode for verbose logging |
| `api-keys` | string[] | [] | List of API keys that can be used to authenticate requests |
| `generative-language-api-key` | string[] | [] | List of Generative Language API keys |
### Example Configuration File
@@ -169,6 +174,14 @@ auth-dir: "~/.cli-proxy-api"
# Enable debug logging
debug: false
# Proxy url, support socks5/http/https protocol, example: socks5://user:pass@192.168.1.1:1080/
proxy-url: ""
# Quota exceeded behavior
quota-exceeded:
switch-project: true # Whether to automatically switch to another project when a quota is exceeded
switch-preview-model: true # Whether to automatically switch to a preview model when a quota is exceeded
# API keys for authentication
api-keys:
- "your-api-key-1"
@@ -212,6 +225,20 @@ The server will relay the `loadCodeAssist`, `onboardUser`, and `countTokens` req
> This feature only allows local access because I couldn't find a way to authenticate the requests.
> I hardcoded `127.0.0.1` into the load balancing.
## Run with Docker
Run the following command to login:
```bash
docker run --rm -p 8085:8085 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --login
```
Run the following command to start the server:
```bash
docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest
```
## Contributing
Contributions are welcome! Please feel free to submit a Pull Request.

254
README_CN.md Normal file
View File

@@ -0,0 +1,254 @@
# CLI 代理 API
[English](README.md) | 中文
一个为 CLI 提供 OpenAI/Gemini/Claude 兼容 API 接口的代理服务器。这让您可以摆脱终端界面的束缚,将 Gemini 的强大能力以 API 的形式轻松接入到任何您喜爱的客户端或应用中。
## 功能特性
- 为 CLI 模型提供 OpenAI/Gemini/Claude 兼容的 API 端点
- 支持流式和非流式响应
- 函数调用/工具支持
- 多模态输入支持(文本和图像)
- 多账户支持与负载均衡
- 简单的 CLI 身份验证流程
- 支持 Gemini AIStudio API 密钥
- 支持 Gemini CLI 多账户轮询
## 安装
### 前置要求
- Go 1.24 或更高版本
- 有权访问 CLI 模型的 Google 账户
### 从源码构建
1. 克隆仓库:
```bash
git clone https://github.com/luispater/CLIProxyAPI.git
cd CLIProxyAPI
```
2. 构建应用程序:
```bash
go build -o cli-proxy-api ./cmd/server
```
## 使用方法
### 身份验证
在使用 API 之前,您需要使用 Google 账户进行身份验证:
```bash
./cli-proxy-api --login
```
如果您是旧版 gemini code 用户,可能需要指定项目 ID
```bash
./cli-proxy-api --login --project_id <your_project_id>
```
### 启动服务器
身份验证完成后,启动服务器:
```bash
./cli-proxy-api
```
默认情况下,服务器在端口 8317 上运行。
### API 端点
#### 列出模型
```
GET http://localhost:8317/v1/models
```
#### 聊天补全
```
POST http://localhost:8317/v1/chat/completions
```
请求体示例:
```json
{
"model": "gemini-2.5-pro",
"messages": [
{
"role": "user",
"content": "你好,你好吗?"
}
],
"stream": true
}
```
### 与 OpenAI 库一起使用
您可以通过将基础 URL 设置为本地服务器来将此代理与任何 OpenAI 兼容的库一起使用:
#### Python使用 OpenAI 库)
```python
from openai import OpenAI
client = OpenAI(
api_key="dummy", # 不使用但必需
base_url="http://localhost:8317/v1"
)
response = client.chat.completions.create(
model="gemini-2.5-pro",
messages=[
{"role": "user", "content": "你好,你好吗?"}
]
)
print(response.choices[0].message.content)
```
#### JavaScript/TypeScript
```javascript
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: 'dummy', // 不使用但必需
baseURL: 'http://localhost:8317/v1',
});
const response = await openai.chat.completions.create({
model: 'gemini-2.5-pro',
messages: [
{ role: 'user', content: '你好,你好吗?' }
],
});
console.log(response.choices[0].message.content);
```
## 支持的模型
- gemini-2.5-pro
- gemini-2.5-flash
- 并且自动切换到之前的预览版本
## 配置
服务器默认使用位于项目根目录的 YAML 配置文件(`config.yaml`)。您可以使用 `--config` 标志指定不同的配置文件路径:
```bash
./cli-proxy --config /path/to/your/config.yaml
```
### 配置选项
| 参数 | 类型 | 默认值 | 描述 |
|---------------------------------------|----------|--------------------|------------------------------------------------------------------------|
| `port` | integer | 8317 | 服务器监听的端口号 |
| `auth-dir` | string | "~/.cli-proxy-api" | 存储身份验证令牌的目录。支持使用 `~` 表示主目录 |
| `proxy-url` | string | "" | 代理 URL支持 socks5/http/https 协议示例socks5://user:pass@192.168.1.1:1080/ |
| `quota-exceeded` | object | {} | 处理配额超限的配置 |
| `quota-exceeded.switch-project` | boolean | true | 当配额超限时是否自动切换到另一个项目 |
| `quota-exceeded.switch-preview-model` | boolean | true | 当配额超限时是否自动切换到预览模型 |
| `debug` | boolean | false | 启用调试模式以进行详细日志记录 |
| `api-keys` | string[] | [] | 可用于验证请求的 API 密钥列表 |
| `generative-language-api-key` | string[] | [] | 生成式语言 API 密钥列表 |
### 配置文件示例
```yaml
# 服务器端口
port: 8317
# 身份验证目录(支持 ~ 表示主目录)
auth-dir: "~/.cli-proxy-api"
# 启用调试日志
debug: false
# 代理 URL支持 socks5/http/https 协议示例socks5://user:pass@192.168.1.1:1080/
proxy-url: ""
# 配额超限行为
quota-exceeded:
switch-project: true # 当配额超限时是否自动切换到另一个项目
switch-preview-model: true # 当配额超限时是否自动切换到预览模型
# 用于本地身份验证的 API 密钥
api-keys:
- "your-api-key-1"
- "your-api-key-2"
# AIStduio Gemini API 的 API 密钥
generative-language-api-key:
- "AIzaSy...01"
- "AIzaSy...02"
- "AIzaSy...03"
- "AIzaSy...04"
```
### 身份验证目录
`auth-dir` 参数指定身份验证令牌的存储位置。当您运行登录命令时,应用程序将在此目录中创建包含 Google 账户身份验证令牌的 JSON 文件。多个账户可用于轮询。
### API 密钥
`api-keys` 参数允许您定义可用于验证对代理服务器请求的 API 密钥列表。在向 API 发出请求时,您可以在 `Authorization` 标头中包含其中一个密钥:
```
Authorization: Bearer your-api-key-1
```
### 官方生成式语言 API
`generative-language-api-key` 参数允许您定义可用于验证对官方 AIStudio Gemini API 请求的 API 密钥列表。
## Gemini CLI 多账户负载均衡
启动 CLI 代理 API 服务器,然后将 `CODE_ASSIST_ENDPOINT` 环境变量设置为 CLI 代理 API 服务器的 URL。
```bash
export CODE_ASSIST_ENDPOINT="http://127.0.0.1:8317"
```
服务器将中继 `loadCodeAssist`、`onboardUser` 和 `countTokens` 请求。并自动在多个账户之间轮询文本生成请求。
> [!NOTE]
> 此功能仅允许本地访问,因为找不到一个可以验证请求的方法。
> 所以只能强制只有 `127.0.0.1` 可以访问。
## 使用 Docker 运行
运行以下命令进行登录:
```bash
docker run --rm -p 8085:8085 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --login
```
运行以下命令启动服务器:
```bash
docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest
```
## 贡献
欢迎贡献!请随时提交 Pull Request。
1. Fork 仓库
2. 创建您的功能分支(`git checkout -b feature/amazing-feature`
3. 提交您的更改(`git commit -m 'Add some amazing feature'`
4. 推送到分支(`git push origin feature/amazing-feature`
5. 打开 Pull Request
## 许可证
此项目根据 MIT 许可证授权 - 有关详细信息,请参阅 [LICENSE](LICENSE) 文件。

View File

@@ -78,62 +78,16 @@ func (h *APIHandlers) ClaudeMessages(c *gin.Context) {
// This loop implements a sophisticated load balancing and failover mechanism
outLoop:
for {
// Thread-safe client index rotation to distribute load evenly
// This ensures fair usage across all available clients
mutex.Lock()
startIndex := lastUsedClientIndex
currentIndex := (startIndex + 1) % len(h.cliClients)
lastUsedClientIndex = currentIndex
mutex.Unlock()
// Build a list of available clients, starting from the next client in rotation
// This implements round-robin load balancing while filtering out quota-exceeded clients
reorderedClients := make([]*client.Client, 0)
for i := 0; i < len(h.cliClients); i++ {
cliClient = h.cliClients[(startIndex+1+i)%len(h.cliClients)]
// Skip clients that have exceeded their quota for the requested model
if cliClient.IsModelQuotaExceeded(modelName) {
// Log different messages based on authentication method (API key vs account)
if cliClient.GetGenerativeLanguageAPIKey() == "" {
log.Debugf("Model %s is quota exceeded for account %s, project id: %s", modelName, cliClient.GetEmail(), cliClient.GetProjectID())
} else {
log.Debugf("Model %s is quota exceeded for generative language API Key: %s", modelName, cliClient.GetGenerativeLanguageAPIKey())
}
cliClient = nil
continue
}
reorderedClients = append(reorderedClients, cliClient)
}
// If all clients have exceeded quota, return a 429 Too Many Requests error
if len(reorderedClients) == 0 {
c.Status(429)
_, _ = fmt.Fprint(c.Writer, fmt.Sprintf(`{"error":{"code":429,"message":"All the models of '%s' are quota exceeded","status":"RESOURCE_EXHAUSTED"}}`, modelName))
var errorResponse *client.ErrorMessage
cliClient, errorResponse = h.getClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = fmt.Fprint(c.Writer, errorResponse.Error)
flusher.Flush()
cliCancel()
return
}
// Attempt to acquire a lock on an available client using non-blocking TryLock
// This prevents blocking when a client is busy with another request
locked := false
for i := 0; i < len(reorderedClients); i++ {
cliClient = reorderedClients[i]
if cliClient.RequestMutex.TryLock() {
locked = true
break
}
}
// If no client is immediately available, fall back to blocking on the first client
// This ensures the request will eventually be processed
if !locked {
cliClient = h.cliClients[0]
cliClient.RequestMutex.Lock()
}
// Determine the authentication method being used by the selected client
// This affects how responses are formatted and logged
isGlAPIKey := false

View File

@@ -99,6 +99,15 @@ func (h *APIHandlers) CLIHandler(c *gin.Context) {
}
func (h *APIHandlers) internalStreamGenerateContent(c *gin.Context, rawJson []byte) {
alt := h.getAlt(c)
if alt == "" {
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("Access-Control-Allow-Origin", "*")
}
// Get the http.Flusher interface to manually flush the response.
flusher, ok := c.Writer.(http.Flusher)
if !ok {
@@ -141,7 +150,7 @@ outLoop:
log.Debugf("Request use account: %s, project id: %s", cliClient.GetEmail(), cliClient.GetProjectID())
}
// Send the message and receive response chunks and errors via channels.
respChan, errChan := cliClient.SendRawMessageStream(cliCtx, rawJson)
respChan, errChan := cliClient.SendRawMessageStream(cliCtx, rawJson, "")
hasFirstResponse := false
for {
select {
@@ -220,7 +229,7 @@ func (h *APIHandlers) internalGenerateContent(c *gin.Context, rawJson []byte) {
log.Debugf("Request use account: %s, project id: %s", cliClient.GetEmail(), cliClient.GetProjectID())
}
resp, err := cliClient.SendRawMessage(cliCtx, rawJson)
resp, err := cliClient.SendRawMessage(cliCtx, rawJson, "")
if err != nil {
if err.StatusCode == 429 && h.cfg.QuotaExceeded.SwitchProject {
continue

View File

@@ -14,11 +14,27 @@ import (
"time"
)
func (h *APIHandlers) GeminiHandler(c *gin.Context) {
var person struct {
func (h *APIHandlers) GeminiModels(c *gin.Context) {
c.Status(http.StatusOK)
c.Header("Content-Type", "application/json; charset=UTF-8")
_, _ = c.Writer.Write([]byte(`{"models":[{"name":"models/gemini-2.5-flash","version":"001","displayName":"Gemini `))
_, _ = c.Writer.Write([]byte(`2.5 Flash","description":"Stable version of Gemini 2.5 Flash, our mid-size multimod`))
_, _ = c.Writer.Write([]byte(`al model that supports up to 1 million tokens, released in June of 2025.","inputTok`))
_, _ = c.Writer.Write([]byte(`enLimit":1048576,"outputTokenLimit":65536,"supportedGenerationMethods":["generateCo`))
_, _ = c.Writer.Write([]byte(`ntent","countTokens","createCachedContent","batchGenerateContent"],"temperature":1,`))
_, _ = c.Writer.Write([]byte(`"topP":0.95,"topK":64,"maxTemperature":2,"thinking":true},{"name":"models/gemini-2.`))
_, _ = c.Writer.Write([]byte(`5-pro","version":"2.5","displayName":"Gemini 2.5 Pro","description":"Stable release`))
_, _ = c.Writer.Write([]byte(` (June 17th, 2025) of Gemini 2.5 Pro","inputTokenLimit":1048576,"outputTokenLimit":`))
_, _ = c.Writer.Write([]byte(`65536,"supportedGenerationMethods":["generateContent","countTokens","createCachedCo`))
_, _ = c.Writer.Write([]byte(`ntent","batchGenerateContent"],"temperature":1,"topP":0.95,"topK":64,"maxTemperatur`))
_, _ = c.Writer.Write([]byte(`e":2,"thinking":true}],"nextPageToken":""}`))
}
func (h *APIHandlers) GeminiGetHandler(c *gin.Context) {
var request struct {
Action string `uri:"action" binding:"required"`
}
if err := c.ShouldBindUri(&person); err != nil {
if err := c.ShouldBindUri(&request); err != nil {
c.JSON(http.StatusBadRequest, ErrorResponse{
Error: ErrorDetail{
Message: fmt.Sprintf("Invalid request: %v", err),
@@ -27,7 +43,45 @@ func (h *APIHandlers) GeminiHandler(c *gin.Context) {
})
return
}
action := strings.Split(person.Action, ":")
if request.Action == "gemini-2.5-pro" {
c.Status(http.StatusOK)
c.Header("Content-Type", "application/json; charset=UTF-8")
_, _ = c.Writer.Write([]byte(`{"name":"models/gemini-2.5-pro","version":"2.5","displayName":"Gemini 2.5 Pro",`))
_, _ = c.Writer.Write([]byte(`"description":"Stable release (June 17th, 2025) of Gemini 2.5 Pro","inputTokenL`))
_, _ = c.Writer.Write([]byte(`imit":1048576,"outputTokenLimit":65536,"supportedGenerationMethods":["generateC`))
_, _ = c.Writer.Write([]byte(`ontent","countTokens","createCachedContent","batchGenerateContent"],"temperatur`))
_, _ = c.Writer.Write([]byte(`e":1,"topP":0.95,"topK":64,"maxTemperature":2,"thinking":true}`))
} else if request.Action == "gemini-2.5-flash" {
c.Status(http.StatusOK)
c.Header("Content-Type", "application/json; charset=UTF-8")
_, _ = c.Writer.Write([]byte(`{"name":"models/gemini-2.5-flash","version":"001","displayName":"Gemini 2.5 Fla`))
_, _ = c.Writer.Write([]byte(`sh","description":"Stable version of Gemini 2.5 Flash, our mid-size multimodal `))
_, _ = c.Writer.Write([]byte(`model that supports up to 1 million tokens, released in June of 2025.","inputTo`))
_, _ = c.Writer.Write([]byte(`kenLimit":1048576,"outputTokenLimit":65536,"supportedGenerationMethods":["gener`))
_, _ = c.Writer.Write([]byte(`ateContent","countTokens","createCachedContent","batchGenerateContent"],"temper`))
_, _ = c.Writer.Write([]byte(`ature":1,"topP":0.95,"topK":64,"maxTemperature":2,"thinking":true}`))
} else {
c.Status(http.StatusNotFound)
_, _ = c.Writer.Write([]byte(
`{"error":{"message":"Not Found","code":404,"status":"NOT_FOUND"}}`,
))
}
}
func (h *APIHandlers) GeminiHandler(c *gin.Context) {
var request struct {
Action string `uri:"action" binding:"required"`
}
if err := c.ShouldBindUri(&request); err != nil {
c.JSON(http.StatusBadRequest, ErrorResponse{
Error: ErrorDetail{
Message: fmt.Sprintf("Invalid request: %v", err),
Type: "invalid_request_error",
},
})
return
}
action := strings.Split(request.Action, ":")
if len(action) != 2 {
c.JSON(http.StatusNotFound, ErrorResponse{
Error: ErrorDetail{
@@ -47,10 +101,21 @@ func (h *APIHandlers) GeminiHandler(c *gin.Context) {
h.geminiGenerateContent(c, rawJson)
} else if method == "streamGenerateContent" {
h.geminiStreamGenerateContent(c, rawJson)
} else if method == "countTokens" {
h.geminiCountTokens(c, rawJson)
}
}
func (h *APIHandlers) geminiStreamGenerateContent(c *gin.Context, rawJson []byte) {
alt := h.getAlt(c)
if alt == "" {
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("Access-Control-Allow-Origin", "*")
}
// Get the http.Flusher interface to manually flush the response.
flusher, ok := c.Writer.(http.Flusher)
if !ok {
@@ -118,7 +183,7 @@ outLoop:
}
// Send the message and receive response chunks and errors via channels.
respChan, errChan := cliClient.SendRawMessageStream(cliCtx, rawJson)
respChan, errChan := cliClient.SendRawMessageStream(cliCtx, rawJson, alt)
for {
select {
// Handle client disconnection.
@@ -135,14 +200,33 @@ outLoop:
return
} else {
if cliClient.GetGenerativeLanguageAPIKey() == "" {
responseResult := gjson.GetBytes(chunk, "response")
if responseResult.Exists() {
chunk = []byte(responseResult.Raw)
if alt == "" {
responseResult := gjson.GetBytes(chunk, "response")
if responseResult.Exists() {
chunk = []byte(responseResult.Raw)
}
} else {
chunkTemplate := "[]"
responseResult := gjson.ParseBytes(chunk)
if responseResult.IsArray() {
responseResultItems := responseResult.Array()
for i := 0; i < len(responseResultItems); i++ {
responseResultItem := responseResultItems[i]
if responseResultItem.Get("response").Exists() {
chunkTemplate, _ = sjson.SetRaw(chunkTemplate, "-1", responseResultItem.Get("response").Raw)
}
}
}
chunk = []byte(chunkTemplate)
}
}
_, _ = c.Writer.Write([]byte("data: "))
_, _ = c.Writer.Write(chunk)
_, _ = c.Writer.Write([]byte("\n\n"))
if alt == "" {
_, _ = c.Writer.Write([]byte("data: "))
_, _ = c.Writer.Write(chunk)
_, _ = c.Writer.Write([]byte("\n\n"))
} else {
_, _ = c.Writer.Write(chunk)
}
flusher.Flush()
}
// Handle errors from the backend.
@@ -165,9 +249,79 @@ outLoop:
}
}
func (h *APIHandlers) geminiCountTokens(c *gin.Context, rawJson []byte) {
c.Header("Content-Type", "application/json")
alt := h.getAlt(c)
// orgRawJson := rawJson
modelResult := gjson.GetBytes(rawJson, "model")
modelName := modelResult.String()
cliCtx, cliCancel := context.WithCancel(context.Background())
var cliClient *client.Client
defer func() {
if cliClient != nil {
cliClient.RequestMutex.Unlock()
}
}()
for {
var errorResponse *client.ErrorMessage
cliClient, errorResponse = h.getClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = fmt.Fprint(c.Writer, errorResponse.Error)
cliCancel()
return
}
if glAPIKey := cliClient.GetGenerativeLanguageAPIKey(); glAPIKey != "" {
log.Debugf("Request use generative language API Key: %s", glAPIKey)
} else {
log.Debugf("Request use account: %s, project id: %s", cliClient.GetEmail(), cliClient.GetProjectID())
template := `{"request":{}}`
if gjson.GetBytes(rawJson, "generateContentRequest").Exists() {
template, _ = sjson.SetRaw(template, "request", gjson.GetBytes(rawJson, "generateContentRequest").Raw)
template, _ = sjson.Delete(template, "generateContentRequest")
} else if gjson.GetBytes(rawJson, "contents").Exists() {
template, _ = sjson.SetRaw(template, "request.contents", gjson.GetBytes(rawJson, "contents").Raw)
template, _ = sjson.Delete(template, "contents")
}
rawJson = []byte(template)
}
resp, err := cliClient.SendRawTokenCount(cliCtx, rawJson, alt)
if err != nil {
if err.StatusCode == 429 && h.cfg.QuotaExceeded.SwitchProject {
continue
} else {
c.Status(err.StatusCode)
_, _ = c.Writer.Write([]byte(err.Error.Error()))
cliCancel()
// log.Debugf(err.Error.Error())
// log.Debugf(string(rawJson))
// log.Debugf(string(orgRawJson))
}
break
} else {
if cliClient.GetGenerativeLanguageAPIKey() == "" {
responseResult := gjson.GetBytes(resp, "response")
if responseResult.Exists() {
resp = []byte(responseResult.Raw)
}
}
_, _ = c.Writer.Write(resp)
cliCancel()
break
}
}
}
func (h *APIHandlers) geminiGenerateContent(c *gin.Context, rawJson []byte) {
c.Header("Content-Type", "application/json")
alt := h.getAlt(c)
modelResult := gjson.GetBytes(rawJson, "model")
modelName := modelResult.String()
cliCtx, cliCancel := context.WithCancel(context.Background())
@@ -217,7 +371,7 @@ func (h *APIHandlers) geminiGenerateContent(c *gin.Context, rawJson []byte) {
} else {
log.Debugf("Request use account: %s, project id: %s", cliClient.GetEmail(), cliClient.GetProjectID())
}
resp, err := cliClient.SendRawMessage(cliCtx, rawJson)
resp, err := cliClient.SendRawMessage(cliCtx, rawJson, alt)
if err != nil {
if err.StatusCode == 429 && h.cfg.QuotaExceeded.SwitchProject {
continue
@@ -240,3 +394,16 @@ func (h *APIHandlers) geminiGenerateContent(c *gin.Context, rawJson []byte) {
}
}
}
func (h *APIHandlers) getAlt(c *gin.Context) string {
var alt string
var hasAlt bool
alt, hasAlt = c.GetQuery("alt")
if !hasAlt {
alt, _ = c.GetQuery("$alt")
}
if alt == "sse" {
return ""
}
return alt
}

View File

@@ -86,6 +86,10 @@ func (h *APIHandlers) Models(c *gin.Context) {
}
func (h *APIHandlers) getClient(modelName string) (*client.Client, *client.ErrorMessage) {
if len(h.cliClients) == 0 {
return nil, &client.ErrorMessage{StatusCode: 500, Error: fmt.Errorf("no clients available")}
}
var cliClient *client.Client
// Lock the mutex to update the last used client index

View File

@@ -75,8 +75,9 @@ func (s *Server) setupRoutes() {
v1beta := s.engine.Group("/v1beta")
v1beta.Use(AuthMiddleware(s.cfg))
{
v1beta.GET("/models", s.handlers.Models)
v1beta.GET("/models", s.handlers.GeminiModels)
v1beta.POST("/models/:action", s.handlers.GeminiHandler)
v1beta.GET("/models/:action", s.handlers.GeminiGetHandler)
}
// Root endpoint
@@ -151,7 +152,11 @@ func AuthMiddleware(cfg *config.Config) gin.HandlerFunc {
authHeader := c.GetHeader("Authorization")
authHeaderGoogle := c.GetHeader("X-Goog-Api-Key")
authHeaderAnthropic := c.GetHeader("X-Api-Key")
if authHeader == "" && authHeaderGoogle == "" && authHeaderAnthropic == "" {
// Get the API key from the query parameter
apiKeyQuery, _ := c.GetQuery("key")
if authHeader == "" && authHeaderGoogle == "" && authHeaderAnthropic == "" && apiKeyQuery == "" {
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
"error": "Missing API key",
})
@@ -170,7 +175,7 @@ func AuthMiddleware(cfg *config.Config) gin.HandlerFunc {
// Find the API key in the in-memory list
var foundKey string
for i := range cfg.ApiKeys {
if cfg.ApiKeys[i] == apiKey || cfg.ApiKeys[i] == authHeaderGoogle || cfg.ApiKeys[i] == authHeaderAnthropic {
if cfg.ApiKeys[i] == apiKey || cfg.ApiKeys[i] == authHeaderGoogle || cfg.ApiKeys[i] == authHeaderAnthropic || cfg.ApiKeys[i] == apiKeyQuery {
foundKey = cfg.ApiKeys[i]
break
}

View File

@@ -168,11 +168,12 @@ func getTokenFromWeb(ctx context.Context, config *oauth2.Config) (*oauth2.Token,
codeChan := make(chan string)
errChan := make(chan error)
// Create a new HTTP server.
server := &http.Server{Addr: "localhost:8085"}
// Create a new HTTP server with its own multiplexer.
mux := http.NewServeMux()
server := &http.Server{Addr: ":8085", Handler: mux}
config.RedirectURL = "http://localhost:8085/oauth2callback"
http.HandleFunc("/oauth2callback", func(w http.ResponseWriter, r *http.Request) {
mux.HandleFunc("/oauth2callback", func(w http.ResponseWriter, r *http.Request) {
if err := r.URL.Query().Get("error"); err != "" {
_, _ = fmt.Fprintf(w, "Authentication failed: %s", err)
errChan <- fmt.Errorf("authentication failed via callback: %s", err)

View File

@@ -28,7 +28,7 @@ const (
apiVersion = "v1internal"
pluginVersion = "0.1.9"
glEndPoint = "https://generativelanguage.googleapis.com/"
glEndPoint = "https://generativelanguage.googleapis.com"
glApiVersion = "v1beta"
)
@@ -241,7 +241,7 @@ func (c *Client) makeAPIRequest(ctx context.Context, endpoint, method string, bo
}
// APIRequest handles making requests to the CLI API endpoints.
func (c *Client) APIRequest(ctx context.Context, endpoint string, body interface{}, stream bool) (io.ReadCloser, *ErrorMessage) {
func (c *Client) APIRequest(ctx context.Context, endpoint string, body interface{}, alt string, stream bool) (io.ReadCloser, *ErrorMessage) {
var jsonBody []byte
var err error
if byteBody, ok := body.([]byte); ok {
@@ -257,25 +257,39 @@ func (c *Client) APIRequest(ctx context.Context, endpoint string, body interface
if c.glAPIKey == "" {
// Add alt=sse for streaming
url = fmt.Sprintf("%s/%s:%s", codeAssistEndpoint, apiVersion, endpoint)
if stream {
if alt == "" && stream {
url = url + "?alt=sse"
} else {
if alt != "" {
url = url + fmt.Sprintf("?$alt=%s", alt)
}
}
} else {
modelResult := gjson.GetBytes(jsonBody, "model")
url = fmt.Sprintf("%s/%s/models/%s:%s", glEndPoint, glApiVersion, modelResult.String(), endpoint)
if stream {
url = url + "?alt=sse"
}
jsonBody = []byte(gjson.GetBytes(jsonBody, "request").Raw)
systemInstructionResult := gjson.GetBytes(jsonBody, "systemInstruction")
if systemInstructionResult.Exists() {
jsonBody, _ = sjson.SetRawBytes(jsonBody, "system_instruction", []byte(systemInstructionResult.Raw))
jsonBody, _ = sjson.DeleteBytes(jsonBody, "systemInstruction")
jsonBody, _ = sjson.DeleteBytes(jsonBody, "session_id")
if endpoint == "countTokens" {
modelResult := gjson.GetBytes(jsonBody, "model")
url = fmt.Sprintf("%s/%s/models/%s:%s", glEndPoint, glApiVersion, modelResult.String(), endpoint)
} else {
modelResult := gjson.GetBytes(jsonBody, "model")
url = fmt.Sprintf("%s/%s/models/%s:%s", glEndPoint, glApiVersion, modelResult.String(), endpoint)
if alt == "" && stream {
url = url + "?alt=sse"
} else {
if alt != "" {
url = url + fmt.Sprintf("?$alt=%s", alt)
}
}
jsonBody = []byte(gjson.GetBytes(jsonBody, "request").Raw)
systemInstructionResult := gjson.GetBytes(jsonBody, "systemInstruction")
if systemInstructionResult.Exists() {
jsonBody, _ = sjson.SetRawBytes(jsonBody, "system_instruction", []byte(systemInstructionResult.Raw))
jsonBody, _ = sjson.DeleteBytes(jsonBody, "systemInstruction")
jsonBody, _ = sjson.DeleteBytes(jsonBody, "session_id")
}
}
}
// log.Debug(string(jsonBody))
// log.Debug(url)
reqBody := bytes.NewBuffer(jsonBody)
req, err := http.NewRequestWithContext(ctx, "POST", url, reqBody)
@@ -392,7 +406,7 @@ func (c *Client) SendMessage(ctx context.Context, rawJson []byte, model string,
}
}
respBody, err := c.APIRequest(ctx, "generateContent", byteRequestBody, false)
respBody, err := c.APIRequest(ctx, "generateContent", byteRequestBody, "", false)
if err != nil {
if err.StatusCode == 429 {
now := time.Now()
@@ -544,7 +558,7 @@ func (c *Client) SendMessageStream(ctx context.Context, rawJson []byte, model st
// Attempt to establish a streaming connection with the API
var err *ErrorMessage
stream, err = c.APIRequest(ctx, "streamGenerateContent", byteRequestBody, true)
stream, err = c.APIRequest(ctx, "streamGenerateContent", byteRequestBody, "", true)
if err != nil {
// Handle quota exceeded errors by marking the model and potentially retrying
if err.StatusCode == 429 {
@@ -593,8 +607,49 @@ func (c *Client) SendMessageStream(ctx context.Context, rawJson []byte, model st
return dataChan, errChan
}
// SendRawTokenCount handles a token count.
func (c *Client) SendRawTokenCount(ctx context.Context, rawJson []byte, alt string) ([]byte, *ErrorMessage) {
modelResult := gjson.GetBytes(rawJson, "model")
model := modelResult.String()
modelName := model
for {
if c.isModelQuotaExceeded(modelName) {
if c.cfg.QuotaExceeded.SwitchPreviewModel && c.glAPIKey == "" {
modelName = c.getPreviewModel(model)
if modelName != "" {
log.Debugf("Model %s is quota exceeded. Switch to preview model %s", model, modelName)
rawJson, _ = sjson.SetBytes(rawJson, "model", modelName)
continue
}
}
return nil, &ErrorMessage{
StatusCode: 429,
Error: fmt.Errorf(`{"error":{"code":429,"message":"All the models of '%s' are quota exceeded","status":"RESOURCE_EXHAUSTED"}}`, model),
}
}
respBody, err := c.APIRequest(ctx, "countTokens", rawJson, alt, false)
if err != nil {
if err.StatusCode == 429 {
now := time.Now()
c.modelQuotaExceeded[modelName] = &now
if c.cfg.QuotaExceeded.SwitchPreviewModel && c.glAPIKey == "" {
continue
}
}
return nil, err
}
delete(c.modelQuotaExceeded, modelName)
bodyBytes, errReadAll := io.ReadAll(respBody)
if errReadAll != nil {
return nil, &ErrorMessage{StatusCode: 500, Error: errReadAll}
}
return bodyBytes, nil
}
}
// SendRawMessage handles a single conversational turn, including tool calls.
func (c *Client) SendRawMessage(ctx context.Context, rawJson []byte) ([]byte, *ErrorMessage) {
func (c *Client) SendRawMessage(ctx context.Context, rawJson []byte, alt string) ([]byte, *ErrorMessage) {
if c.glAPIKey == "" {
rawJson, _ = sjson.SetBytes(rawJson, "project", c.GetProjectID())
}
@@ -618,7 +673,7 @@ func (c *Client) SendRawMessage(ctx context.Context, rawJson []byte) ([]byte, *E
}
}
respBody, err := c.APIRequest(ctx, "generateContent", rawJson, false)
respBody, err := c.APIRequest(ctx, "generateContent", rawJson, alt, false)
if err != nil {
if err.StatusCode == 429 {
now := time.Now()
@@ -639,7 +694,7 @@ func (c *Client) SendRawMessage(ctx context.Context, rawJson []byte) ([]byte, *E
}
// SendRawMessageStream handles a single conversational turn, including tool calls.
func (c *Client) SendRawMessageStream(ctx context.Context, rawJson []byte) (<-chan []byte, <-chan *ErrorMessage) {
func (c *Client) SendRawMessageStream(ctx context.Context, rawJson []byte, alt string) (<-chan []byte, <-chan *ErrorMessage) {
dataTag := []byte("data: ")
errChan := make(chan *ErrorMessage)
dataChan := make(chan []byte)
@@ -672,7 +727,7 @@ func (c *Client) SendRawMessageStream(ctx context.Context, rawJson []byte) (<-ch
return
}
var err *ErrorMessage
stream, err = c.APIRequest(ctx, "streamGenerateContent", rawJson, true)
stream, err = c.APIRequest(ctx, "streamGenerateContent", rawJson, alt, true)
if err != nil {
if err.StatusCode == 429 {
now := time.Now()
@@ -688,21 +743,32 @@ func (c *Client) SendRawMessageStream(ctx context.Context, rawJson []byte) (<-ch
break
}
scanner := bufio.NewScanner(stream)
for scanner.Scan() {
line := scanner.Bytes()
if bytes.HasPrefix(line, dataTag) {
dataChan <- line[6:]
if alt == "" {
scanner := bufio.NewScanner(stream)
for scanner.Scan() {
line := scanner.Bytes()
if bytes.HasPrefix(line, dataTag) {
dataChan <- line[6:]
}
}
}
if errScanner := scanner.Err(); errScanner != nil {
errChan <- &ErrorMessage{500, errScanner}
_ = stream.Close()
return
}
if errScanner := scanner.Err(); errScanner != nil {
errChan <- &ErrorMessage{500, errScanner}
_ = stream.Close()
return
}
} else {
data, err := io.ReadAll(stream)
if err != nil {
errChan <- &ErrorMessage{500, err}
_ = stream.Close()
return
}
dataChan <- data
}
_ = stream.Close()
}()
return dataChan, errChan
@@ -754,7 +820,7 @@ func (c *Client) CheckCloudAPIIsEnabled() (bool, error) {
// A simple request to test the API endpoint.
requestBody := fmt.Sprintf(`{"project":"%s","request":{"contents":[{"role":"user","parts":[{"text":"Be concise. What is the capital of France?"}]}],"generationConfig":{"thinkingConfig":{"include_thoughts":false,"thinkingBudget":0}}},"model":"gemini-2.5-flash"}`, c.tokenStorage.ProjectID)
stream, err := c.APIRequest(ctx, "streamGenerateContent", []byte(requestBody), true)
stream, err := c.APIRequest(ctx, "streamGenerateContent", []byte(requestBody), "", true)
if err != nil {
// If a 403 Forbidden error occurs, it likely means the API is not enabled.
if err.StatusCode == 403 {
@@ -771,6 +837,7 @@ func (c *Client) CheckCloudAPIIsEnabled() (bool, error) {
)
}
}
log.Warnf("\n\nPlease copy this message and create an issue.\n\n%s\n\n", errJson)
return false, nil
}
return false, err.Error

View File

@@ -73,6 +73,7 @@ func DoLogin(cfg *config.Config, projectID string) {
// If the check fails (returns false), the CheckCloudAPIIsEnabled function
// will have already printed instructions, so we can just exit.
if !isChecked {
log.Fatal("Failed to check if Cloud AI API is enabled. If you encounter an error message, please create an issue.")
return
}
}