mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-02 12:30:50 +08:00
Compare commits
144 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
92bb642e98 | ||
|
|
a83978f769 | ||
|
|
2513d908be | ||
|
|
4c033b3af7 | ||
|
|
843a81f68d | ||
|
|
f6e713ab6b | ||
|
|
1834c65116 | ||
|
|
fc6aa8ef77 | ||
|
|
c3f88126e6 | ||
|
|
b895018ff5 | ||
|
|
9c6832cc22 | ||
|
|
1ada33ab1d | ||
|
|
78738ca3f0 | ||
|
|
ac01c74c02 | ||
|
|
02e28bbbe9 | ||
|
|
b9c7b9eea5 | ||
|
|
57195fa0f5 | ||
|
|
11f090c223 | ||
|
|
829dd06b42 | ||
|
|
20787cd107 | ||
|
|
1aa568ce45 | ||
|
|
b2cdbbdd47 | ||
|
|
8056af42a3 | ||
|
|
01be94a0de | ||
|
|
d1933075c3 | ||
|
|
a602ae859b | ||
|
|
c5d7137d66 | ||
|
|
d45ebff66b | ||
|
|
d6f671250e | ||
|
|
6d822cf309 | ||
|
|
d03a75dba5 | ||
|
|
9ff21b67a8 | ||
|
|
5546c9d872 | ||
|
|
fb760718e2 | ||
|
|
d6721e4e75 | ||
|
|
514f5a8ad4 | ||
|
|
a68e0dd8aa | ||
|
|
75d7763c5c | ||
|
|
9bb7df7af7 | ||
|
|
43665cb649 | ||
|
|
39337627b9 | ||
|
|
4bc8a52771 | ||
|
|
b727e4e12e | ||
|
|
93588919e5 | ||
|
|
31659c790d | ||
|
|
c62ecc2442 | ||
|
|
b1fee5d266 | ||
|
|
4a10cfacc3 | ||
|
|
bbdd68a8b4 | ||
|
|
ac3ecd567c | ||
|
|
4fd70d5f1a | ||
|
|
49c52a01b0 | ||
|
|
389c8ecef1 | ||
|
|
f1f24f542a | ||
|
|
8ca041cfcf | ||
|
|
eac8b1a27f | ||
|
|
c8029b7166 | ||
|
|
64f4c18fea | ||
|
|
9abcaf177f | ||
|
|
b839e351c4 | ||
|
|
6b413a299b | ||
|
|
4657c98821 | ||
|
|
dd1e0da155 | ||
|
|
cf5476eb23 | ||
|
|
cf9a748159 | ||
|
|
2e328dd462 | ||
|
|
edd4b4d97f | ||
|
|
608d745159 | ||
|
|
fd795caf76 | ||
|
|
9e2d76f3ce | ||
|
|
ae646fba4b | ||
|
|
2eef6875e9 | ||
|
|
12c09f1a46 | ||
|
|
4a31f763af | ||
|
|
6629cadb87 | ||
|
|
41975c9e2b | ||
|
|
c589c0d998 | ||
|
|
7c157d6ab1 | ||
|
|
7c642bee09 | ||
|
|
beba2a7aa0 | ||
|
|
f2201dabfa | ||
|
|
108dcb7f70 | ||
|
|
8858e07d8b | ||
|
|
d33a89b89f | ||
|
|
1d70336a91 | ||
|
|
6080527e9e | ||
|
|
82187bffba | ||
|
|
f4977e5ef6 | ||
|
|
832268cae7 | ||
|
|
f6de2a709f | ||
|
|
de796ac1c2 | ||
|
|
6b5aefc27a | ||
|
|
5010b09329 | ||
|
|
368fd27393 | ||
|
|
b2ca49376c | ||
|
|
6d98a71796 | ||
|
|
1c91823308 | ||
|
|
352a67857b | ||
|
|
644a3ad220 | ||
|
|
19c32f58b2 | ||
|
|
d01c4904ff | ||
|
|
8cfa2282ef | ||
|
|
8e88a61021 | ||
|
|
ad4d045101 | ||
|
|
5888e04654 | ||
|
|
19b10cb894 | ||
|
|
aa25820698 | ||
|
|
9e3b84939f | ||
|
|
1dbb930660 | ||
|
|
6557d9b728 | ||
|
|
250628dae3 | ||
|
|
da72ac1f6d | ||
|
|
f9a170a3c4 | ||
|
|
88f06fc305 | ||
|
|
562a49a194 | ||
|
|
6136a77eb3 | ||
|
|
afff9216ea | ||
|
|
b56edd4db0 | ||
|
|
d512f20c56 | ||
|
|
57c9ba49f4 | ||
|
|
40255b128e | ||
|
|
6524d3a51e | ||
|
|
92c8cd7c72 | ||
|
|
c678ca21d5 | ||
|
|
6d4b43dd7a | ||
|
|
b0f2ad7cfe | ||
|
|
cd0b1be46c | ||
|
|
08856a97fb | ||
|
|
b6d5ce2d4d | ||
|
|
0f55e550cf | ||
|
|
e1de04230f | ||
|
|
a887a337a5 | ||
|
|
2717ba3e50 | ||
|
|
63af4c551d | ||
|
|
c675cf5e72 | ||
|
|
4fd95ead3b | ||
|
|
514add4b85 | ||
|
|
3ca01b60a5 | ||
|
|
39e398ae02 | ||
|
|
9bbe64489f | ||
|
|
7e54156f2f | ||
|
|
9b80820b17 | ||
|
|
e836b4ac10 | ||
|
|
f228a4dcca |
@@ -17,9 +17,6 @@ MANAGEMENT_API.md
|
||||
MANAGEMENT_API_CN.md
|
||||
LICENSE
|
||||
|
||||
# Example configuration
|
||||
config.example.yaml
|
||||
|
||||
# Runtime data folders (should be mounted as volumes)
|
||||
auths/*
|
||||
logs/*
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -10,5 +10,8 @@ auths/*
|
||||
.serena/*
|
||||
AGENTS.md
|
||||
CLAUDE.md
|
||||
GEMINI.md
|
||||
*.exe
|
||||
temp/*
|
||||
temp/*
|
||||
cli-proxy-api
|
||||
static/*
|
||||
@@ -22,6 +22,8 @@ RUN mkdir /CLIProxyAPI
|
||||
|
||||
COPY --from=builder ./app/CLIProxyAPI /CLIProxyAPI/CLIProxyAPI
|
||||
|
||||
COPY config.example.yaml /CLIProxyAPI/config.example.yaml
|
||||
|
||||
WORKDIR /CLIProxyAPI
|
||||
|
||||
EXPOSE 8317
|
||||
|
||||
3
LICENSE
3
LICENSE
@@ -1,6 +1,7 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Luis Pater
|
||||
Copyright (c) 2025-2005.9 Luis Pater
|
||||
Copyright (c) 2025.9-present Router-For.ME
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -95,7 +95,7 @@ If a plaintext key is detected in the config at startup, it will be bcrypt‑has
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01", "AI...02", "AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api"},{"api-key":"sk-...q2","base-url":"https://example.com"}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1"}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk...01"],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-keys":["sk...7e"],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}]}
|
||||
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01","AI...02","AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api","proxy-url":""},{"api-key":"sk-...q2","base-url":"https://example.com","proxy-url":""}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1","proxy-url":""}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk...01","proxy-url":""}],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-key-entries":[{"api-key":"sk...7e","proxy-url":"socks5://proxy.example.com:1080"}],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}]}
|
||||
```
|
||||
|
||||
### Debug
|
||||
@@ -335,14 +335,14 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
||||
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "", "proxy-url": "" } ] }
|
||||
```
|
||||
- PUT `/codex-api-key` — Replace the list
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
||||
-d '[{"api-key":"sk-a","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"sk-b","base-url":"https://c.example.com","proxy-url":""}]' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- Response:
|
||||
@@ -354,14 +354,14 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com","proxy-url":""}}' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- Request (by match):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":"","proxy-url":"socks5://proxy.example.com:1080"}}' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- Response:
|
||||
@@ -430,22 +430,22 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
||||
|
||||
### Claude API KEY (object array)
|
||||
- GET `/claude-api-key` — List all
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
||||
```
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "", "proxy-url": "" } ] }
|
||||
```
|
||||
- PUT `/claude-api-key` — Replace the list
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"sk-b","base-url":"https://c.example.com","proxy-url":""}]' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
@@ -455,16 +455,16 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com","proxy-url":""}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Request (by match):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":"","proxy-url":"socks5://proxy.example.com:1080"}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
@@ -491,14 +491,14 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-keys": [], "models": [] } ] }
|
||||
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-key-entries": [ { "api-key": "sk", "proxy-url": "" } ], "models": [] } ] }
|
||||
```
|
||||
- PUT `/openai-compatibility` — Replace the list
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk"],"models":[{"name":"m","alias":"a"}]}]' \
|
||||
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[{"name":"m","alias":"a"}]}]' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- Response:
|
||||
@@ -510,20 +510,23 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
||||
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[]}}' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- Request (by index):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
||||
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[]}}' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
- Notes:
|
||||
- Legacy `api-keys` input remains accepted; keys are migrated into `api-key-entries` automatically so the legacy field will eventually remain empty in responses.
|
||||
- DELETE `/openai-compatibility` — Delete (`?name=` or `?index=`)
|
||||
- Request (by name):
|
||||
```bash
|
||||
@@ -636,19 +639,6 @@ These endpoints initiate provider login flows and return a URL to open in a brow
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- POST `/gemini-web-token` — Save Gemini Web cookies directly
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"secure_1psid": "<__Secure-1PSID>", "secure_1psidts": "<__Secure-1PSIDTS>", "label": "<LABEL>"}' \
|
||||
http://localhost:8317/v0/management/gemini-web-token
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok", "file": "gemini-web-<hash>.json" }
|
||||
```
|
||||
|
||||
- GET `/qwen-auth-url` — Start Qwen login (device flow)
|
||||
- Request:
|
||||
```bash
|
||||
@@ -660,6 +650,17 @@ These endpoints initiate provider login flows and return a URL to open in a brow
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/iflow-auth-url` — Start iFlow login
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/iflow-auth-url
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/get-auth-status?state=<state>` — Poll OAuth flow status
|
||||
- Request:
|
||||
```bash
|
||||
|
||||
@@ -95,7 +95,7 @@
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01", "AI...02", "AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api"},{"api-key":"sk-...q2","base-url":"https://example.com"}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1"}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk...01"],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-keys":["sk...7e"],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}]}
|
||||
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01","AI...02","AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api","proxy-url":""},{"api-key":"sk-...q2","base-url":"https://example.com","proxy-url":""}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1","proxy-url":""}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk...01","proxy-url":""}],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-key-entries":[{"api-key":"sk...7e","proxy-url":"socks5://proxy.example.com:1080"}],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}]}
|
||||
```
|
||||
|
||||
### Debug
|
||||
@@ -335,14 +335,14 @@
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
||||
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "", "proxy-url": "" } ] }
|
||||
```
|
||||
- PUT `/codex-api-key` — 完整改写列表
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
||||
-d '[{"api-key":"sk-a","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"sk-b","base-url":"https://c.example.com","proxy-url":""}]' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- 响应:
|
||||
@@ -354,14 +354,14 @@
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com","proxy-url":""}}' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- 请求(按匹配):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":"","proxy-url":"socks5://proxy.example.com:1080"}}' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- 响应:
|
||||
@@ -430,22 +430,22 @@
|
||||
|
||||
### Claude API KEY(对象数组)
|
||||
- GET `/claude-api-key` — 列出全部
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
||||
```
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "", "proxy-url": "" } ] }
|
||||
```
|
||||
- PUT `/claude-api-key` — 完整改写列表
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"sk-b","base-url":"https://c.example.com","proxy-url":""}]' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
@@ -455,16 +455,16 @@
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com","proxy-url":""}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 请求(按匹配):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":"","proxy-url":"socks5://proxy.example.com:1080"}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
@@ -491,14 +491,14 @@
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-keys": [], "models": [] } ] }
|
||||
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-key-entries": [ { "api-key": "sk", "proxy-url": "" } ], "models": [] } ] }
|
||||
```
|
||||
- PUT `/openai-compatibility` — 完整改写列表
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk"],"models":[{"name":"m","alias":"a"}]}]' \
|
||||
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[{"name":"m","alias":"a"}]}]' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- 响应:
|
||||
@@ -510,20 +510,23 @@
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
||||
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[]}}' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- 请求(按索引):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
||||
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[]}}' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
- 说明:
|
||||
- 仍可提交遗留的 `api-keys` 字段,但所有密钥会自动迁移到 `api-key-entries` 中,返回结果中的 `api-keys` 会逐步留空。
|
||||
- DELETE `/openai-compatibility` — 删除(`?name=` 或 `?index=`)
|
||||
- 请求(按名称):
|
||||
```bash
|
||||
@@ -636,19 +639,6 @@
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- POST `/gemini-web-token` — 直接保存 Gemini Web Cookie
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"secure_1psid": "<__Secure-1PSID>", "secure_1psidts": "<__Secure-1PSIDTS>", "label": "<LABEL>"}' \
|
||||
http://localhost:8317/v0/management/gemini-web-token
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok", "file": "gemini-web-<hash>.json" }
|
||||
```
|
||||
|
||||
- GET `/qwen-auth-url` — 开始 Qwen 登录(设备授权流程)
|
||||
- 请求:
|
||||
```bash
|
||||
@@ -660,6 +650,17 @@
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/iflow-auth-url` — 开始 iFlow 登录
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/iflow-auth-url
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/get-auth-status?state=<state>` — 轮询 OAuth 流程状态
|
||||
- 请求:
|
||||
```bash
|
||||
|
||||
248
README.md
248
README.md
@@ -8,7 +8,7 @@ It now also supports OpenAI Codex (GPT models) and Claude Code via OAuth.
|
||||
|
||||
So you can use local or multi-account CLI access with OpenAI(include Responses)/Gemini/Claude-compatible clients and SDKs.
|
||||
|
||||
The first Chinese provider has now been added: [Qwen Code](https://github.com/QwenLM/qwen-code).
|
||||
Chinese providers have now been added: [Qwen Code](https://github.com/QwenLM/qwen-code), [iFlow](https://iflow.cn/).
|
||||
|
||||
## Features
|
||||
|
||||
@@ -16,19 +16,20 @@ The first Chinese provider has now been added: [Qwen Code](https://github.com/Qw
|
||||
- OpenAI Codex support (GPT models) via OAuth login
|
||||
- Claude Code support via OAuth login
|
||||
- Qwen Code support via OAuth login
|
||||
- Gemini Web support via cookie-based login
|
||||
- iFlow support via OAuth login
|
||||
- Streaming and non-streaming responses
|
||||
- Function calling/tools support
|
||||
- Multimodal input support (text and images)
|
||||
- Multiple accounts with round-robin load balancing (Gemini, OpenAI, Claude and Qwen)
|
||||
- Simple CLI authentication flows (Gemini, OpenAI, Claude and Qwen)
|
||||
- Multiple accounts with round-robin load balancing (Gemini, OpenAI, Claude, Qwen and iFlow)
|
||||
- Simple CLI authentication flows (Gemini, OpenAI, Claude, Qwen and iFlow)
|
||||
- Generative Language API Key support
|
||||
- Gemini CLI multi-account load balancing
|
||||
- Claude Code multi-account load balancing
|
||||
- Qwen Code multi-account load balancing
|
||||
- iFlow multi-account load balancing
|
||||
- OpenAI Codex multi-account load balancing
|
||||
- OpenAI-compatible upstream providers via config (e.g., OpenRouter)
|
||||
- Reusable Go SDK for embedding the proxy (see `docs/sdk-usage.md`, 中文: `docs/sdk-usage_CN.md`)
|
||||
- Reusable Go SDK for embedding the proxy (see `docs/sdk-usage.md`)
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -39,6 +40,7 @@ The first Chinese provider has now been added: [Qwen Code](https://github.com/Qw
|
||||
- An OpenAI account for Codex/GPT access (optional)
|
||||
- An Anthropic account for Claude Code access (optional)
|
||||
- A Qwen Chat account for Qwen Code access (optional)
|
||||
- An iFlow account for iFlow access (optional)
|
||||
|
||||
### Building from Source
|
||||
|
||||
@@ -59,6 +61,12 @@ The first Chinese provider has now been added: [Qwen Code](https://github.com/Qw
|
||||
go build -o cli-proxy-api.exe ./cmd/server
|
||||
```
|
||||
|
||||
### Installation via Homebrew
|
||||
|
||||
```bash
|
||||
brew install cliproxyapi
|
||||
brew services start cliproxyapi
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -72,9 +80,11 @@ A cross-platform desktop GUI client for CLIProxyAPI.
|
||||
|
||||
A web-based management center for CLIProxyAPI.
|
||||
|
||||
Set `remote-management.disable-control-panel` to `true` if you prefer to host the management UI elsewhere; the server will skip downloading `management.html` and `/management.html` will return 404.
|
||||
|
||||
### Authentication
|
||||
|
||||
You can authenticate for Gemini, OpenAI, and/or Claude. All can coexist in the same `auth-dir` and will be load balanced.
|
||||
You can authenticate for Gemini, OpenAI, Claude, Qwen, and/or iFlow. All can coexist in the same `auth-dir` and will be load balanced.
|
||||
|
||||
- Gemini (Google):
|
||||
```bash
|
||||
@@ -88,13 +98,6 @@ You can authenticate for Gemini, OpenAI, and/or Claude. All can coexist in the s
|
||||
|
||||
Options: add `--no-browser` to print the login URL instead of opening a browser. The local OAuth callback uses port `8085`.
|
||||
|
||||
- Gemini Web (via Cookies):
|
||||
This method authenticates by simulating a browser, using cookies obtained from the Gemini website.
|
||||
```bash
|
||||
./cli-proxy-api --gemini-web-auth
|
||||
```
|
||||
You will be prompted to enter your `__Secure-1PSID` and `__Secure-1PSIDTS` values. Please retrieve these cookies from your browser's developer tools.
|
||||
|
||||
- OpenAI (Codex/GPT via OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --codex-login
|
||||
@@ -113,6 +116,12 @@ You can authenticate for Gemini, OpenAI, and/or Claude. All can coexist in the s
|
||||
```
|
||||
Options: add `--no-browser` to print the login URL instead of opening a browser. Use the Qwen Chat's OAuth device flow.
|
||||
|
||||
- iFlow (iFlow via OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --iflow-login
|
||||
```
|
||||
Options: add `--no-browser` to print the login URL instead of opening a browser. The local OAuth callback uses port `11451`.
|
||||
|
||||
|
||||
### Starting the Server
|
||||
|
||||
@@ -154,7 +163,7 @@ Request body example:
|
||||
```
|
||||
|
||||
Notes:
|
||||
- Use a `gemini-*` model for Gemini (e.g., "gemini-2.5-pro"), a `gpt-*` model for OpenAI (e.g., "gpt-5"), a `claude-*` model for Claude (e.g., "claude-3-5-sonnet-20241022"), or a `qwen-*` model for Qwen (e.g., "qwen3-coder-plus"). The proxy will route to the correct provider automatically.
|
||||
- Use a `gemini-*` model for Gemini (e.g., "gemini-2.5-pro"), a `gpt-*` model for OpenAI (e.g., "gpt-5"), a `claude-*` model for Claude (e.g., "claude-3-5-sonnet-20241022"), a `qwen-*` model for Qwen (e.g., "qwen3-coder-plus"), or an iFlow-supported model (e.g., "tstars2.0", "deepseek-v3.1", "kimi-k2", etc.). The proxy will route to the correct provider automatically.
|
||||
|
||||
#### Claude Messages (SSE-compatible)
|
||||
|
||||
@@ -247,15 +256,29 @@ console.log(await claudeResponse.json());
|
||||
- gemini-2.5-pro
|
||||
- gemini-2.5-flash
|
||||
- gemini-2.5-flash-lite
|
||||
- gemini-2.5-flash-image
|
||||
- gemini-2.5-flash-image-preview
|
||||
- gpt-5
|
||||
- gpt-5-codex
|
||||
- claude-opus-4-1-20250805
|
||||
- claude-opus-4-20250514
|
||||
- claude-sonnet-4-20250514
|
||||
- claude-sonnet-4-5-20250929
|
||||
- claude-3-7-sonnet-20250219
|
||||
- claude-3-5-haiku-20241022
|
||||
- qwen3-coder-plus
|
||||
- qwen3-coder-flash
|
||||
- qwen3-max
|
||||
- qwen3-vl-plus
|
||||
- deepseek-v3.2
|
||||
- deepseek-v3.1
|
||||
- deepseek-r1
|
||||
- deepseek-v3
|
||||
- kimi-k2
|
||||
- glm-4.5
|
||||
- glm-4.6
|
||||
- tstars2.0
|
||||
- And other iFlow-supported models
|
||||
- Gemini models auto-switch to preview variants when needed
|
||||
|
||||
## Configuration
|
||||
@@ -276,37 +299,33 @@ The server uses a YAML configuration file (`config.yaml`) located in the project
|
||||
| `request-retry` | integer | 0 | Number of times to retry a request. Retries will occur if the HTTP response code is 403, 408, 500, 502, 503, or 504. |
|
||||
| `remote-management.allow-remote` | boolean | false | Whether to allow remote (non-localhost) access to the management API. If false, only localhost can access. A management key is still required for localhost. |
|
||||
| `remote-management.secret-key` | string | "" | Management key. If a plaintext value is provided, it will be hashed on startup using bcrypt and persisted back to the config file. If empty, the entire management API is disabled (404). |
|
||||
| `remote-management.disable-control-panel` | boolean | false | When true, skip downloading `management.html` and return 404 for `/management.html`, effectively disabling the bundled management UI. |
|
||||
| `quota-exceeded` | object | {} | Configuration for handling quota exceeded. |
|
||||
| `quota-exceeded.switch-project` | boolean | true | Whether to automatically switch to another project when a quota is exceeded. |
|
||||
| `quota-exceeded.switch-preview-model` | boolean | true | Whether to automatically switch to a preview model when a quota is exceeded. |
|
||||
| `debug` | boolean | false | Enable debug mode for verbose logging. |
|
||||
| `logging-to-file` | boolean | true | Write application logs to rotating files instead of stdout. Set to `false` to log to stdout/stderr. |
|
||||
| `usage-statistics-enabled` | boolean | true | Enable in-memory usage aggregation for management APIs. Disable to drop all collected usage metrics. |
|
||||
| `auth` | object | {} | Request authentication configuration. |
|
||||
| `auth.providers` | object[] | [] | Authentication providers. Includes built-in `config-api-key` for inline keys. |
|
||||
| `auth.providers.*.name` | string | "" | Provider instance name. |
|
||||
| `auth.providers.*.type` | string | "" | Provider implementation identifier (for example `config-api-key`). |
|
||||
| `auth.providers.*.api-keys` | string[] | [] | Inline API keys consumed by the `config-api-key` provider. |
|
||||
| `api-keys` | string[] | [] | Legacy shorthand for inline API keys. Values are mirrored into the `config-api-key` provider for backwards compatibility. |
|
||||
| `generative-language-api-key` | string[] | [] | List of Generative Language API keys. |
|
||||
| `codex-api-key` | object | {} | List of Codex API keys. |
|
||||
| `codex-api-key.api-key` | string | "" | Codex API key. |
|
||||
| `codex-api-key.base-url` | string | "" | Custom Codex API endpoint, if you use a third-party API endpoint. |
|
||||
| `claude-api-key` | object | {} | List of Claude API keys. |
|
||||
| `claude-api-key.api-key` | string | "" | Claude API key. |
|
||||
| `claude-api-key.base-url` | string | "" | Custom Claude API endpoint, if you use a third-party API endpoint. |
|
||||
| `openai-compatibility` | object[] | [] | Upstream OpenAI-compatible providers configuration (name, base-url, api-keys, models). |
|
||||
| `openai-compatibility.*.name` | string | "" | The name of the provider. It will be used in the user agent and other places. |
|
||||
| `openai-compatibility.*.base-url` | string | "" | The base URL of the provider. |
|
||||
| `openai-compatibility.*.api-keys` | string[] | [] | The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed. |
|
||||
| `openai-compatibility.*.models` | object[] | [] | The actual model name. |
|
||||
| `openai-compatibility.*.models.*.name` | string | "" | The models supported by the provider. |
|
||||
| `openai-compatibility.*.models.*.alias` | string | "" | The alias used in the API. |
|
||||
| `gemini-web` | object | {} | Configuration specific to the Gemini Web client. |
|
||||
| `gemini-web.context` | boolean | true | Enables conversation context reuse for continuous dialogue. |
|
||||
| `gemini-web.code-mode` | boolean | false | Enables code mode for optimized responses in coding-related tasks. |
|
||||
| `gemini-web.max-chars-per-request` | integer | 1,000,000 | The maximum number of characters to send to Gemini Web in a single request. |
|
||||
| `gemini-web.disable-continuation-hint` | boolean | false | Disables the continuation hint for split prompts. |
|
||||
| `codex-api-key` | object | {} | List of Codex API keys. |
|
||||
| `codex-api-key.api-key` | string | "" | Codex API key. |
|
||||
| `codex-api-key.base-url` | string | "" | Custom Codex API endpoint, if you use a third-party API endpoint. |
|
||||
| `codex-api-key.proxy-url` | string | "" | Proxy URL for this specific API key. Overrides the global proxy-url setting. Supports socks5/http/https protocols. |
|
||||
| `claude-api-key` | object | {} | List of Claude API keys. |
|
||||
| `claude-api-key.api-key` | string | "" | Claude API key. |
|
||||
| `claude-api-key.base-url` | string | "" | Custom Claude API endpoint, if you use a third-party API endpoint. |
|
||||
| `claude-api-key.proxy-url` | string | "" | Proxy URL for this specific API key. Overrides the global proxy-url setting. Supports socks5/http/https protocols. |
|
||||
| `openai-compatibility` | object[] | [] | Upstream OpenAI-compatible providers configuration (name, base-url, api-keys, models). |
|
||||
| `openai-compatibility.*.name` | string | "" | The name of the provider. It will be used in the user agent and other places. |
|
||||
| `openai-compatibility.*.base-url` | string | "" | The base URL of the provider. |
|
||||
| `openai-compatibility.*.api-keys` | string[] | [] | (Deprecated) The API keys for the provider. Use api-key-entries instead for per-key proxy support. |
|
||||
| `openai-compatibility.*.api-key-entries` | object[] | [] | API key entries with optional per-key proxy configuration. Preferred over api-keys. |
|
||||
| `openai-compatibility.*.api-key-entries.*.api-key` | string | "" | The API key for this entry. |
|
||||
| `openai-compatibility.*.api-key-entries.*.proxy-url` | string | "" | Proxy URL for this specific API key. Overrides the global proxy-url setting. Supports socks5/http/https protocols. |
|
||||
| `openai-compatibility.*.models` | object[] | [] | The actual model name. |
|
||||
| `openai-compatibility.*.models.*.name` | string | "" | The models supported by the provider. |
|
||||
| `openai-compatibility.*.models.*.alias` | string | "" | The alias used in the API. |
|
||||
|
||||
### Example Configuration File
|
||||
|
||||
@@ -325,9 +344,17 @@ remote-management:
|
||||
# Leave empty to disable the Management API entirely (404 for all /v0/management routes).
|
||||
secret-key: ""
|
||||
|
||||
# Disable the bundled management control panel asset download and HTTP route when true.
|
||||
disable-control-panel: false
|
||||
|
||||
# Authentication directory (supports ~ for home directory). If you use Windows, please set the directory like this: `C:/cli-proxy-api/`
|
||||
auth-dir: "~/.cli-proxy-api"
|
||||
|
||||
# API keys for authentication
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
|
||||
# Enable debug logging
|
||||
debug: false
|
||||
|
||||
@@ -348,21 +375,6 @@ quota-exceeded:
|
||||
switch-project: true # Whether to automatically switch to another project when a quota is exceeded
|
||||
switch-preview-model: true # Whether to automatically switch to a preview model when a quota is exceeded
|
||||
|
||||
# Gemini Web client configuration
|
||||
gemini-web:
|
||||
context: true # Enable conversation context reuse
|
||||
code-mode: false # Enable code mode
|
||||
max-chars-per-request: 1000000 # Max characters per request
|
||||
|
||||
# Request authentication providers
|
||||
auth:
|
||||
providers:
|
||||
- name: "default"
|
||||
type: "config-api-key"
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
|
||||
# API keys for official Generative Language API
|
||||
generative-language-api-key:
|
||||
- "AIzaSy...01"
|
||||
@@ -374,35 +386,81 @@ generative-language-api-key:
|
||||
codex-api-key:
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||
|
||||
proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||
|
||||
# Claude API keys
|
||||
claude-api-key:
|
||||
- api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||
proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||
|
||||
# OpenAI compatibility providers
|
||||
openai-compatibility:
|
||||
- name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||
base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||
api-keys: # The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed.
|
||||
- "sk-or-v1-...b780"
|
||||
- "sk-or-v1-...b781"
|
||||
# New format with per-key proxy support (recommended):
|
||||
api-key-entries:
|
||||
- api-key: "sk-or-v1-...b780"
|
||||
proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||
- api-key: "sk-or-v1-...b781" # without proxy-url
|
||||
# Legacy format (still supported, but cannot specify proxy per key):
|
||||
# api-keys:
|
||||
# - "sk-or-v1-...b780"
|
||||
# - "sk-or-v1-...b781"
|
||||
models: # The models supported by the provider.
|
||||
- name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||
alias: "kimi-k2" # The alias used in the API.
|
||||
```
|
||||
|
||||
### Git-backed Configuration and Token Store
|
||||
|
||||
The application can be configured to use a Git repository as a backend for storing both the `config.yaml` file and the authentication tokens from the `auth-dir`. This allows for centralized management and versioning of your configuration.
|
||||
|
||||
To enable this feature, set the `GITSTORE_GIT_URL` environment variable to the URL of your Git repository.
|
||||
|
||||
**Environment Variables**
|
||||
|
||||
| Variable | Required | Default | Description |
|
||||
| ----------------------- | -------- | ------------------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| `GITSTORE_GIT_URL` | Yes | | The HTTPS URL of the Git repository to use. |
|
||||
| `GITSTORE_LOCAL_PATH` | No | Current working directory | The local path where the Git repository will be cloned. Inside Docker, this defaults to `/CLIProxyAPI`. |
|
||||
| `GITSTORE_GIT_USERNAME` | No | | The username for Git authentication. |
|
||||
| `GITSTORE_GIT_TOKEN` | No | | The personal access token (or password) for Git authentication. |
|
||||
|
||||
**How it Works**
|
||||
|
||||
1. **Cloning:** On startup, the application clones the remote Git repository to the `GITSTORE_LOCAL_PATH`.
|
||||
2. **Configuration:** It then looks for a `config.yaml` inside a `config` directory within the cloned repository.
|
||||
3. **Bootstrapping:** If `config/config.yaml` does not exist in the repository, the application will copy the local `config.example.yaml` to that location, commit, and push it to the remote repository as an initial configuration. You must have `config.example.yaml` available.
|
||||
4. **Token Sync:** The `auth-dir` is also managed within this repository. Any changes to authentication tokens (e.g., through a new login) are automatically committed and pushed to the remote Git repository.
|
||||
|
||||
### OpenAI Compatibility Providers
|
||||
|
||||
Configure upstream OpenAI-compatible providers (e.g., OpenRouter) via `openai-compatibility`.
|
||||
|
||||
- name: provider identifier used internally
|
||||
- base-url: provider base URL
|
||||
- api-keys: optional list of API keys (omit if provider allows unauthenticated requests)
|
||||
- api-key-entries: list of API key entries with optional per-key proxy configuration (recommended)
|
||||
- api-keys: (deprecated) simple list of API keys without proxy support
|
||||
- models: list of mappings from upstream model `name` to local `alias`
|
||||
|
||||
Example:
|
||||
Example with per-key proxy support:
|
||||
|
||||
```yaml
|
||||
openai-compatibility:
|
||||
- name: "openrouter"
|
||||
base-url: "https://openrouter.ai/api/v1"
|
||||
api-key-entries:
|
||||
- api-key: "sk-or-v1-...b780"
|
||||
proxy-url: "socks5://proxy.example.com:1080"
|
||||
- api-key: "sk-or-v1-...b781"
|
||||
models:
|
||||
- name: "moonshotai/kimi-k2:free"
|
||||
alias: "kimi-k2"
|
||||
```
|
||||
|
||||
Legacy format (still supported):
|
||||
|
||||
```yaml
|
||||
openai-compatibility:
|
||||
@@ -510,6 +568,14 @@ export ANTHROPIC_MODEL=qwen3-coder-plus
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-coder-flash
|
||||
```
|
||||
|
||||
Using iFlow models:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=qwen3-max
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-235b-a22b-instruct
|
||||
```
|
||||
|
||||
## Codex with multiple account load balancing
|
||||
|
||||
Start CLI Proxy API server, and then edit the `~/.codex/config.toml` and `~/.codex/auth.json` files.
|
||||
@@ -541,12 +607,6 @@ Run the following command to login (Gemini OAuth on port 8085):
|
||||
docker run --rm -p 8085:8085 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --login
|
||||
```
|
||||
|
||||
Run the following command to login (Gemini Web Cookies):
|
||||
|
||||
```bash
|
||||
docker run -it --rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --gemini-web-auth
|
||||
```
|
||||
|
||||
Run the following command to login (OpenAI OAuth on port 1455):
|
||||
|
||||
```bash
|
||||
@@ -565,12 +625,30 @@ Run the following command to login (Qwen OAuth):
|
||||
docker run -it -rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --qwen-login
|
||||
```
|
||||
|
||||
Run the following command to login (iFlow OAuth on port 11451):
|
||||
|
||||
```bash
|
||||
docker run --rm -p 11451:11451 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --iflow-login
|
||||
```
|
||||
|
||||
Run the following command to start the server:
|
||||
|
||||
```bash
|
||||
docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> To use the Git-backed configuration store with Docker, you can pass the `GITSTORE_*` environment variables using the `-e` flag. For example:
|
||||
>
|
||||
> ```bash
|
||||
> docker run --rm -p 8317:8317 \
|
||||
> -e GITSTORE_GIT_URL="https://github.com/your/config-repo.git" \
|
||||
> -e GITSTORE_GIT_TOKEN="your_personal_access_token" \
|
||||
> -v /path/to/your/git-store:/CLIProxyAPI/remote \
|
||||
> eceasy/cli-proxy-api:latest
|
||||
> ```
|
||||
> In this case, you may not need to mount `config.yaml` or `auth-dir` directly, as they will be managed by the Git store inside the container at the `GITSTORE_LOCAL_PATH` (which defaults to `/CLIProxyAPI` and we are setting it to `/CLIProxyAPI/remote` in this example).
|
||||
|
||||
## Run with Docker Compose
|
||||
|
||||
1. Clone the repository and navigate into the directory:
|
||||
@@ -586,6 +664,27 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
||||
```
|
||||
*(Note for Windows users: You can use `copy config.example.yaml config.yaml` in CMD or PowerShell.)*
|
||||
|
||||
To use the Git-backed configuration store, you can add the `GITSTORE_*` environment variables to your `docker-compose.yml` file under the `cli-proxy-api` service definition. For example:
|
||||
```yaml
|
||||
services:
|
||||
cli-proxy-api:
|
||||
image: eceasy/cli-proxy-api:latest
|
||||
container_name: cli-proxy-api
|
||||
ports:
|
||||
- "8317:8317"
|
||||
- "8085:8085"
|
||||
- "1455:1455"
|
||||
- "54545:54545"
|
||||
- "11451:11451"
|
||||
environment:
|
||||
- GITSTORE_GIT_URL=https://github.com/your/config-repo.git
|
||||
- GITSTORE_GIT_TOKEN=your_personal_access_token
|
||||
volumes:
|
||||
- ./git-store:/CLIProxyAPI/remote # GITSTORE_LOCAL_PATH
|
||||
restart: unless-stopped
|
||||
```
|
||||
When using the Git store, you may not need to mount `config.yaml` or `auth-dir` directly.
|
||||
|
||||
3. Start the service:
|
||||
- **For most users (recommended):**
|
||||
Run the following command to start the service using the pre-built image from Docker Hub. The service will run in the background.
|
||||
@@ -611,10 +710,6 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --login
|
||||
```
|
||||
- **Gemini Web**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI --gemini-web-auth
|
||||
```
|
||||
- **OpenAI (Codex)**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --codex-login
|
||||
@@ -623,10 +718,14 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --claude-login
|
||||
```
|
||||
- **Qwen**:
|
||||
- **Qwen**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --qwen-login
|
||||
```
|
||||
- **iFlow**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --iflow-login
|
||||
```
|
||||
|
||||
5. To view the server logs:
|
||||
```bash
|
||||
@@ -660,6 +759,17 @@ Contributions are welcome! Please feel free to submit a Pull Request.
|
||||
4. Push to the branch (`git push origin feature/amazing-feature`)
|
||||
5. Open a Pull Request
|
||||
|
||||
## Who is with us?
|
||||
|
||||
Those projects are based on CLIProxyAPI:
|
||||
|
||||
### [vibeproxy](https://github.com/automazeio/vibeproxy)
|
||||
|
||||
Native macOS menu bar app to use your Claude Code & ChatGPT subscriptions with AI coding tools - no API keys needed
|
||||
|
||||
> [!NOTE]
|
||||
> If you developed a project based on CLIProxyAPI, please open a PR to add it to this list.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
||||
|
||||
248
README_CN.md
248
README_CN.md
@@ -28,7 +28,7 @@
|
||||
|
||||
您可以使用本地或多账户的CLI方式,通过任何与 OpenAI(包括Responses)/Gemini/Claude 兼容的客户端和SDK进行访问。
|
||||
|
||||
现已新增首个中国提供商:[Qwen Code](https://github.com/QwenLM/qwen-code)。
|
||||
现已新增国内提供商:[Qwen Code](https://github.com/QwenLM/qwen-code)、[iFlow](https://iflow.cn/)。
|
||||
|
||||
## 功能特性
|
||||
|
||||
@@ -36,19 +36,20 @@
|
||||
- 新增 OpenAI Codex(GPT 系列)支持(OAuth 登录)
|
||||
- 新增 Claude Code 支持(OAuth 登录)
|
||||
- 新增 Qwen Code 支持(OAuth 登录)
|
||||
- 新增 Gemini Web 支持(通过 Cookie 登录)
|
||||
- 新增 iFlow 支持(OAuth 登录)
|
||||
- 支持流式与非流式响应
|
||||
- 函数调用/工具支持
|
||||
- 多模态输入(文本、图片)
|
||||
- 多账户支持与轮询负载均衡(Gemini、OpenAI、Claude 与 Qwen)
|
||||
- 简单的 CLI 身份验证流程(Gemini、OpenAI、Claude 与 Qwen)
|
||||
- 多账户支持与轮询负载均衡(Gemini、OpenAI、Claude、Qwen 与 iFlow)
|
||||
- 简单的 CLI 身份验证流程(Gemini、OpenAI、Claude、Qwen 与 iFlow)
|
||||
- 支持 Gemini AIStudio API 密钥
|
||||
- 支持 Gemini CLI 多账户轮询
|
||||
- 支持 Claude Code 多账户轮询
|
||||
- 支持 Qwen Code 多账户轮询
|
||||
- 支持 iFlow 多账户轮询
|
||||
- 支持 OpenAI Codex 多账户轮询
|
||||
- 通过配置接入上游 OpenAI 兼容提供商(例如 OpenRouter)
|
||||
- 可复用的 Go SDK(见 `docs/sdk-usage.md`)
|
||||
- 可复用的 Go SDK(见 `docs/sdk-usage_CN.md`)
|
||||
|
||||
## 安装
|
||||
|
||||
@@ -59,6 +60,7 @@
|
||||
- 有权访问 OpenAI Codex/GPT 的 OpenAI 账户(可选)
|
||||
- 有权访问 Claude Code 的 Anthropic 账户(可选)
|
||||
- 有权访问 Qwen Code 的 Qwen Chat 账户(可选)
|
||||
- 有权访问 iFlow 的 iFlow 账户(可选)
|
||||
|
||||
### 从源码构建
|
||||
|
||||
@@ -73,6 +75,13 @@
|
||||
go build -o cli-proxy-api ./cmd/server
|
||||
```
|
||||
|
||||
### 通过 Homebrew 安装
|
||||
|
||||
```bash
|
||||
brew install cliproxyapi
|
||||
brew services start cliproxyapi
|
||||
```
|
||||
|
||||
## 使用方法
|
||||
|
||||
### 图形客户端与官方 WebUI
|
||||
@@ -85,9 +94,11 @@ CLIProxyAPI 的跨平台桌面图形客户端。
|
||||
|
||||
CLIProxyAPI 的基于 Web 的管理中心。
|
||||
|
||||
如果希望自行托管管理页面,可在配置中将 `remote-management.disable-control-panel` 设为 `true`,服务器将停止下载 `management.html`,并让 `/management.html` 返回 404。
|
||||
|
||||
### 身份验证
|
||||
|
||||
您可以分别为 Gemini、OpenAI 和 Claude 进行身份验证,三者可同时存在于同一个 `auth-dir` 中并参与负载均衡。
|
||||
您可以分别为 Gemini、OpenAI、Claude、Qwen 和 iFlow 进行身份验证,它们可同时存在于同一个 `auth-dir` 中并参与负载均衡。
|
||||
|
||||
- Gemini(Google):
|
||||
```bash
|
||||
@@ -101,13 +112,6 @@ CLIProxyAPI 的基于 Web 的管理中心。
|
||||
|
||||
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。本地 OAuth 回调端口为 `8085`。
|
||||
|
||||
- Gemini Web (通过 Cookie):
|
||||
此方法通过模拟浏览器行为,使用从 Gemini 网站获取的 Cookie 进行身份验证。
|
||||
```bash
|
||||
./cli-proxy-api --gemini-web-auth
|
||||
```
|
||||
程序将提示您输入 `__Secure-1PSID` 和 `__Secure-1PSIDTS` 的值。请从您的浏览器开发者工具中获取这些 Cookie。
|
||||
|
||||
- OpenAI(Codex/GPT,OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --codex-login
|
||||
@@ -126,6 +130,12 @@ CLIProxyAPI 的基于 Web 的管理中心。
|
||||
```
|
||||
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。使用 Qwen Chat 的 OAuth 设备登录流程。
|
||||
|
||||
- iFlow(iFlow,OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --iflow-login
|
||||
```
|
||||
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。本地 OAuth 回调端口为 `11451`。
|
||||
|
||||
### 启动服务器
|
||||
|
||||
身份验证完成后,启动服务器:
|
||||
@@ -166,7 +176,7 @@ POST http://localhost:8317/v1/chat/completions
|
||||
```
|
||||
|
||||
说明:
|
||||
- 使用 "gemini-*" 模型(例如 "gemini-2.5-pro")来调用 Gemini,使用 "gpt-*" 模型(例如 "gpt-5")来调用 OpenAI,使用 "claude-*" 模型(例如 "claude-3-5-sonnet-20241022")来调用 Claude,或者使用 "qwen-*" 模型(例如 "qwen3-coder-plus")来调用 Qwen。代理服务会自动将请求路由到相应的提供商。
|
||||
- 使用 "gemini-*" 模型(例如 "gemini-2.5-pro")来调用 Gemini,使用 "gpt-*" 模型(例如 "gpt-5")来调用 OpenAI,使用 "claude-*" 模型(例如 "claude-3-5-sonnet-20241022")来调用 Claude,使用 "qwen-*" 模型(例如 "qwen3-coder-plus")来调用 Qwen,或者使用 iFlow 支持的模型(例如 "tstars2.0"、"deepseek-v3.1"、"kimi-k2" 等)来调用 iFlow。代理服务会自动将请求路由到相应的提供商。
|
||||
|
||||
#### Claude 消息(SSE 兼容)
|
||||
|
||||
@@ -259,15 +269,29 @@ console.log(await claudeResponse.json());
|
||||
- gemini-2.5-pro
|
||||
- gemini-2.5-flash
|
||||
- gemini-2.5-flash-lite
|
||||
- gemini-2.5-flash-image
|
||||
- gemini-2.5-flash-image-preview
|
||||
- gpt-5
|
||||
- gpt-5-codex
|
||||
- claude-opus-4-1-20250805
|
||||
- claude-opus-4-20250514
|
||||
- claude-sonnet-4-20250514
|
||||
- claude-sonnet-4-5-20250929
|
||||
- claude-3-7-sonnet-20250219
|
||||
- claude-3-5-haiku-20241022
|
||||
- qwen3-coder-plus
|
||||
- qwen3-coder-flash
|
||||
- qwen3-max
|
||||
- qwen3-vl-plus
|
||||
- deepseek-v3.2
|
||||
- deepseek-v3.1
|
||||
- deepseek-r1
|
||||
- deepseek-v3
|
||||
- kimi-k2
|
||||
- glm-4.5
|
||||
- glm-4.6
|
||||
- tstars2.0
|
||||
- 以及其他 iFlow 支持的模型
|
||||
- Gemini 模型在需要时自动切换到对应的 preview 版本
|
||||
|
||||
## 配置
|
||||
@@ -288,37 +312,33 @@ console.log(await claudeResponse.json());
|
||||
| `request-retry` | integer | 0 | 请求重试次数。如果HTTP响应码为403、408、500、502、503或504,将会触发重试。 |
|
||||
| `remote-management.allow-remote` | boolean | false | 是否允许远程(非localhost)访问管理接口。为false时仅允许本地访问;本地访问同样需要管理密钥。 |
|
||||
| `remote-management.secret-key` | string | "" | 管理密钥。若配置为明文,启动时会自动进行bcrypt加密并写回配置文件。若为空,管理接口整体不可用(404)。 |
|
||||
| `remote-management.disable-control-panel` | boolean | false | 当为 true 时,不再下载 `management.html`,且 `/management.html` 会返回 404,从而禁用内置管理界面。 |
|
||||
| `quota-exceeded` | object | {} | 用于处理配额超限的配置。 |
|
||||
| `quota-exceeded.switch-project` | boolean | true | 当配额超限时,是否自动切换到另一个项目。 |
|
||||
| `quota-exceeded.switch-preview-model` | boolean | true | 当配额超限时,是否自动切换到预览模型。 |
|
||||
| `debug` | boolean | false | 启用调试模式以获取详细日志。 |
|
||||
| `logging-to-file` | boolean | true | 是否将应用日志写入滚动文件;设为 false 时输出到 stdout/stderr。 |
|
||||
| `usage-statistics-enabled` | boolean | true | 是否启用内存中的使用统计;设为 false 时直接丢弃所有统计数据。 |
|
||||
| `auth` | object | {} | 请求鉴权配置。 |
|
||||
| `auth.providers` | object[] | [] | 鉴权提供方列表,内置 `config-api-key` 支持内联密钥。 |
|
||||
| `auth.providers.*.name` | string | "" | 提供方实例名称。 |
|
||||
| `auth.providers.*.type` | string | "" | 提供方实现标识(例如 `config-api-key`)。 |
|
||||
| `auth.providers.*.api-keys` | string[] | [] | `config-api-key` 提供方使用的内联密钥。 |
|
||||
| `api-keys` | string[] | [] | 兼容旧配置的简写,会自动同步到默认 `config-api-key` 提供方。 |
|
||||
| `generative-language-api-key` | string[] | [] | 生成式语言API密钥列表。 |
|
||||
| `codex-api-key` | object | {} | Codex API密钥列表。 |
|
||||
| `codex-api-key.api-key` | string | "" | Codex API密钥。 |
|
||||
| `codex-api-key.base-url` | string | "" | 自定义的Codex API端点 |
|
||||
| `claude-api-key` | object | {} | Claude API密钥列表。 |
|
||||
| `claude-api-key.api-key` | string | "" | Claude API密钥。 |
|
||||
| `claude-api-key.base-url` | string | "" | 自定义的Claude API端点,如果您使用第三方的API端点。 |
|
||||
| `openai-compatibility` | object[] | [] | 上游OpenAI兼容提供商的配置(名称、基础URL、API密钥、模型)。 |
|
||||
| `openai-compatibility.*.name` | string | "" | 提供商的名称。它将被用于用户代理(User Agent)和其他地方。 |
|
||||
| `openai-compatibility.*.base-url` | string | "" | 提供商的基础URL。 |
|
||||
| `openai-compatibility.*.api-keys` | string[] | [] | 提供商的API密钥。如果需要,可以添加多个密钥。如果允许未经身份验证的访问,则可以省略。 |
|
||||
| `openai-compatibility.*.models` | object[] | [] | 实际的模型名称。 |
|
||||
| `openai-compatibility.*.models.*.name` | string | "" | 提供商支持的模型。 |
|
||||
| `openai-compatibility.*.models.*.alias` | string | "" | 在API中使用的别名。 |
|
||||
| `gemini-web` | object | {} | Gemini Web 客户端的特定配置。 |
|
||||
| `gemini-web.context` | boolean | true | 是否启用会话上下文重用,以实现连续对话。 |
|
||||
| `gemini-web.code-mode` | boolean | false | 是否启用代码模式,优化代码相关任务的响应。 |
|
||||
| `gemini-web.max-chars-per-request` | integer | 1,000,000 | 单次请求发送给 Gemini Web 的最大字符数。 |
|
||||
| `gemini-web.disable-continuation-hint` | boolean | false | 当提示被拆分时,是否禁用连续提示的暗示。 |
|
||||
| `codex-api-key` | object | {} | Codex API密钥列表。 |
|
||||
| `codex-api-key.api-key` | string | "" | Codex API密钥。 |
|
||||
| `codex-api-key.base-url` | string | "" | 自定义的Codex API端点 |
|
||||
| `codex-api-key.proxy-url` | string | "" | 针对该API密钥的代理URL。会覆盖全局proxy-url设置。支持socks5/http/https协议。 |
|
||||
| `claude-api-key` | object | {} | Claude API密钥列表。 |
|
||||
| `claude-api-key.api-key` | string | "" | Claude API密钥。 |
|
||||
| `claude-api-key.base-url` | string | "" | 自定义的Claude API端点,如果您使用第三方的API端点。 |
|
||||
| `claude-api-key.proxy-url` | string | "" | 针对该API密钥的代理URL。会覆盖全局proxy-url设置。支持socks5/http/https协议。 |
|
||||
| `openai-compatibility` | object[] | [] | 上游OpenAI兼容提供商的配置(名称、基础URL、API密钥、模型)。 |
|
||||
| `openai-compatibility.*.name` | string | "" | 提供商的名称。它将被用于用户代理(User Agent)和其他地方。 |
|
||||
| `openai-compatibility.*.base-url` | string | "" | 提供商的基础URL。 |
|
||||
| `openai-compatibility.*.api-keys` | string[] | [] | (已弃用) 提供商的API密钥。建议改用api-key-entries以获得每密钥代理支持。 |
|
||||
| `openai-compatibility.*.api-key-entries` | object[] | [] | API密钥条目,支持可选的每密钥代理配置。优先于api-keys。 |
|
||||
| `openai-compatibility.*.api-key-entries.*.api-key` | string | "" | 该条目的API密钥。 |
|
||||
| `openai-compatibility.*.api-key-entries.*.proxy-url` | string | "" | 针对该API密钥的代理URL。会覆盖全局proxy-url设置。支持socks5/http/https协议。 |
|
||||
| `openai-compatibility.*.models` | object[] | [] | 实际的模型名称。 |
|
||||
| `openai-compatibility.*.models.*.name` | string | "" | 提供商支持的模型。 |
|
||||
| `openai-compatibility.*.models.*.alias` | string | "" | 在API中使用的别名。 |
|
||||
|
||||
### 配置文件示例
|
||||
|
||||
@@ -336,9 +356,17 @@ remote-management:
|
||||
# 若为空,/v0/management 整体处于 404(禁用)。
|
||||
secret-key: ""
|
||||
|
||||
# 当设为 true 时,不下载管理面板文件,/management.html 将直接返回 404。
|
||||
disable-control-panel: false
|
||||
|
||||
# 身份验证目录(支持 ~ 表示主目录)。如果你使用Windows,建议设置成`C:/cli-proxy-api/`。
|
||||
auth-dir: "~/.cli-proxy-api"
|
||||
|
||||
# 请求认证使用的API密钥
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
|
||||
# 启用调试日志
|
||||
debug: false
|
||||
|
||||
@@ -360,21 +388,6 @@ quota-exceeded:
|
||||
switch-project: true # 当配额超限时是否自动切换到另一个项目
|
||||
switch-preview-model: true # 当配额超限时是否自动切换到预览模型
|
||||
|
||||
# Gemini Web 客户端配置
|
||||
gemini-web:
|
||||
context: true # 启用会话上下文重用
|
||||
code-mode: false # 启用代码模式
|
||||
max-chars-per-request: 1000000 # 单次请求最大字符数
|
||||
|
||||
# 请求鉴权提供方
|
||||
auth:
|
||||
providers:
|
||||
- name: "default"
|
||||
type: "config-api-key"
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
|
||||
# AIStduio Gemini API 的 API 密钥
|
||||
generative-language-api-key:
|
||||
- "AIzaSy...01"
|
||||
@@ -386,35 +399,81 @@ generative-language-api-key:
|
||||
codex-api-key:
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # 第三方 Codex API 中转服务端点
|
||||
proxy-url: "socks5://proxy.example.com:1080" # 可选:针对该密钥的代理设置
|
||||
|
||||
# Claude API 密钥
|
||||
claude-api-key:
|
||||
- api-key: "sk-atSM..." # 如果使用官方 Claude API,无需设置 base-url
|
||||
- api-key: "sk-atSM..." # 如果使用官方 Claude API,无需设置 base-url
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # 第三方 Claude API 中转服务端点
|
||||
proxy-url: "socks5://proxy.example.com:1080" # 可选:针对该密钥的代理设置
|
||||
|
||||
# OpenAI 兼容提供商
|
||||
openai-compatibility:
|
||||
- name: "openrouter" # 提供商的名称;它将被用于用户代理和其它地方。
|
||||
base-url: "https://openrouter.ai/api/v1" # 提供商的基础URL。
|
||||
api-keys: # 提供商的API密钥。如果需要,可以添加多个密钥。如果允许未经身份验证的访问,则可以省略。
|
||||
- "sk-or-v1-...b780"
|
||||
- "sk-or-v1-...b781"
|
||||
# 新格式:支持每密钥代理配置(推荐):
|
||||
api-key-entries:
|
||||
- api-key: "sk-or-v1-...b780"
|
||||
proxy-url: "socks5://proxy.example.com:1080" # 可选:针对该密钥的代理设置
|
||||
- api-key: "sk-or-v1-...b781" # 不进行额外代理设置
|
||||
# 旧格式(仍支持,但无法为每个密钥指定代理):
|
||||
# api-keys:
|
||||
# - "sk-or-v1-...b780"
|
||||
# - "sk-or-v1-...b781"
|
||||
models: # 提供商支持的模型。
|
||||
- name: "moonshotai/kimi-k2:free" # 实际的模型名称。
|
||||
alias: "kimi-k2" # 在API中使用的别名。
|
||||
```
|
||||
|
||||
### Git 支持的配置与令牌存储
|
||||
|
||||
应用程序可配置为使用 Git 仓库作为后端,用于存储 `config.yaml` 配置文件和来自 `auth-dir` 目录的身份验证令牌。这允许对您的配置进行集中管理和版本控制。
|
||||
|
||||
要启用此功能,请将 `GITSTORE_GIT_URL` 环境变量设置为您的 Git 仓库的 URL。
|
||||
|
||||
**环境变量**
|
||||
|
||||
| 变量 | 必需 | 默认值 | 描述 |
|
||||
| ----------------------- | ---- | --------------------- | ------------------------------------------------------------------------------------------------------ |
|
||||
| `GITSTORE_GIT_URL` | 是 | | 要使用的 Git 仓库的 HTTPS URL。 |
|
||||
| `GITSTORE_LOCAL_PATH` | 否 | 当前工作目录 | 将克隆 Git 仓库的本地路径。在 Docker 内部,此路径默认为 `/CLIProxyAPI`。 |
|
||||
| `GITSTORE_GIT_USERNAME` | 否 | | 用于 Git 身份验证的用户名。 |
|
||||
| `GITSTORE_GIT_TOKEN` | 否 | | 用于 Git 身份验证的个人访问令牌(或密码)。 |
|
||||
|
||||
**工作原理**
|
||||
|
||||
1. **克隆:** 启动时,应用程序会将远程 Git 仓库克隆到 `GITSTORE_LOCAL_PATH`。
|
||||
2. **配置:** 然后,它会在克隆的仓库内的 `config` 目录中查找 `config.yaml` 文件。
|
||||
3. **引导:** 如果仓库中不存在 `config/config.yaml`,应用程序会将本地的 `config.example.yaml` 复制到该位置,然后提交并推送到远程仓库作为初始配置。您必须确保 `config.example.yaml` 文件可用。
|
||||
4. **令牌同步:** `auth-dir` 也在此仓库中管理。对身份验证令牌的任何更改(例如,通过新的登录)都会自动提交并推送到远程 Git 仓库。
|
||||
|
||||
### OpenAI 兼容上游提供商
|
||||
|
||||
通过 `openai-compatibility` 配置上游 OpenAI 兼容提供商(例如 OpenRouter)。
|
||||
|
||||
- name:内部识别名
|
||||
- base-url:提供商基础地址
|
||||
- api-keys:可选,多密钥轮询(若提供商支持无鉴权可省略)
|
||||
- api-key-entries:API密钥条目列表,支持可选的每密钥代理配置(推荐)
|
||||
- api-keys:(已弃用) 简单的API密钥列表,不支持代理配置
|
||||
- models:将上游模型 `name` 映射为本地可用 `alias`
|
||||
|
||||
示例:
|
||||
支持每密钥代理配置的示例:
|
||||
|
||||
```yaml
|
||||
openai-compatibility:
|
||||
- name: "openrouter"
|
||||
base-url: "https://openrouter.ai/api/v1"
|
||||
api-key-entries:
|
||||
- api-key: "sk-or-v1-...b780"
|
||||
proxy-url: "socks5://proxy.example.com:1080"
|
||||
- api-key: "sk-or-v1-...b781"
|
||||
models:
|
||||
- name: "moonshotai/kimi-k2:free"
|
||||
alias: "kimi-k2"
|
||||
```
|
||||
|
||||
旧格式(仍支持):
|
||||
|
||||
```yaml
|
||||
openai-compatibility:
|
||||
@@ -518,6 +577,14 @@ export ANTHROPIC_MODEL=qwen3-coder-plus
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-coder-flash
|
||||
```
|
||||
|
||||
使用 iFlow 模型:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=qwen3-max
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-235b-a22b-instruct
|
||||
```
|
||||
|
||||
## Codex 多账户负载均衡
|
||||
|
||||
启动 CLI Proxy API 服务器, 修改 `~/.codex/config.toml` 和 `~/.codex/auth.json` 文件。
|
||||
@@ -549,12 +616,6 @@ auth.json:
|
||||
docker run --rm -p 8085:8085 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --login
|
||||
```
|
||||
|
||||
运行以下命令进行登录(Gemini Web Cookie):
|
||||
|
||||
```bash
|
||||
docker run -it --rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --gemini-web-auth
|
||||
```
|
||||
|
||||
运行以下命令进行登录(OpenAI OAuth,端口 1455):
|
||||
|
||||
```bash
|
||||
@@ -573,6 +634,12 @@ docker run --rm -p 54545:54545 -v /path/to/your/config.yaml:/CLIProxyAPI/config.
|
||||
docker run -it -rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --qwen-login
|
||||
```
|
||||
|
||||
运行以下命令进行登录(iFlow OAuth,端口 11451):
|
||||
|
||||
```bash
|
||||
docker run --rm -p 11451:11451 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --iflow-login
|
||||
```
|
||||
|
||||
|
||||
运行以下命令启动服务器:
|
||||
|
||||
@@ -580,6 +647,18 @@ docker run -it -rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /pat
|
||||
docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> 要在 Docker 中使用 Git 支持的配置存储,您可以使用 `-e` 标志传递 `GITSTORE_*` 环境变量。例如:
|
||||
>
|
||||
> ```bash
|
||||
> docker run --rm -p 8317:8317 \
|
||||
> -e GITSTORE_GIT_URL="https://github.com/your/config-repo.git" \
|
||||
> -e GITSTORE_GIT_TOKEN="your_personal_access_token" \
|
||||
> -v /path/to/your/git-store:/CLIProxyAPI/remote \
|
||||
> eceasy/cli-proxy-api:latest
|
||||
> ```
|
||||
> 在这种情况下,您可能不需要直接挂载 `config.yaml` 或 `auth-dir`,因为它们将由容器内的 Git 存储在 `GITSTORE_LOCAL_PATH`(默认为 `/CLIProxyAPI`,在此示例中我们将其设置为 `/CLIProxyAPI/remote`)进行管理。
|
||||
|
||||
## 使用 Docker Compose 运行
|
||||
|
||||
1. 克隆仓库并进入目录:
|
||||
@@ -595,6 +674,27 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
||||
```
|
||||
*(Windows 用户请注意:您可以在 CMD 或 PowerShell 中使用 `copy config.example.yaml config.yaml`。)*
|
||||
|
||||
要在 Docker Compose 中使用 Git 支持的配置存储,您可以将 `GITSTORE_*` 环境变量添加到 `docker-compose.yml` 文件中的 `cli-proxy-api` 服务定义下。例如:
|
||||
```yaml
|
||||
services:
|
||||
cli-proxy-api:
|
||||
image: eceasy/cli-proxy-api:latest
|
||||
container_name: cli-proxy-api
|
||||
ports:
|
||||
- "8317:8317"
|
||||
- "8085:8085"
|
||||
- "1455:1455"
|
||||
- "54545:54545"
|
||||
- "11451:11451"
|
||||
environment:
|
||||
- GITSTORE_GIT_URL=https://github.com/your/config-repo.git
|
||||
- GITSTORE_GIT_TOKEN=your_personal_access_token
|
||||
volumes:
|
||||
- ./git-store:/CLIProxyAPI/remote # GITSTORE_LOCAL_PATH
|
||||
restart: unless-stopped
|
||||
```
|
||||
在使用 Git 存储时,您可能不需要直接挂载 `config.yaml` 或 `auth-dir`。
|
||||
|
||||
3. 启动服务:
|
||||
- **适用于大多数用户(推荐):**
|
||||
运行以下命令,使用 Docker Hub 上的预构建镜像启动服务。服务将在后台运行。
|
||||
@@ -620,10 +720,6 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --login
|
||||
```
|
||||
- **Gemini Web**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI --gemini-web-auth
|
||||
```
|
||||
- **OpenAI (Codex)**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --codex-login
|
||||
@@ -636,6 +732,10 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --qwen-login
|
||||
```
|
||||
- **iFlow**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --iflow-login
|
||||
```
|
||||
|
||||
5. 查看服务器日志:
|
||||
```bash
|
||||
@@ -669,6 +769,18 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
||||
4. 推送到分支(`git push origin feature/amazing-feature`)
|
||||
5. 打开 Pull Request
|
||||
|
||||
## 谁与我们在一起?
|
||||
|
||||
这些项目基于 CLIProxyAPI:
|
||||
|
||||
### [vibeproxy](https://github.com/automazeio/vibeproxy)
|
||||
|
||||
一个原生 macOS 菜单栏应用,让您可以使用 Claude Code & ChatGPT 订阅服务和 AI 编程工具,无需 API 密钥。
|
||||
|
||||
> [!NOTE]
|
||||
> 如果你开发了基于 CLIProxyAPI 的项目,请提交一个 PR(拉取请求)将其添加到此列表中。
|
||||
|
||||
|
||||
## 许可证
|
||||
|
||||
此项目根据 MIT 许可证授权 - 有关详细信息,请参阅 [LICENSE](LICENSE) 文件。
|
||||
|
||||
@@ -4,15 +4,21 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
configaccess "github.com/router-for-me/CLIProxyAPI/v6/internal/access/config_access"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/cmd"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/store"
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/usage"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
@@ -21,9 +27,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
Version = "dev"
|
||||
Commit = "none"
|
||||
BuildDate = "unknown"
|
||||
Version = "dev"
|
||||
Commit = "none"
|
||||
BuildDate = "unknown"
|
||||
DefaultConfigPath = ""
|
||||
)
|
||||
|
||||
// init initializes the shared logger setup.
|
||||
@@ -42,7 +49,7 @@ func main() {
|
||||
var codexLogin bool
|
||||
var claudeLogin bool
|
||||
var qwenLogin bool
|
||||
var geminiWebAuth bool
|
||||
var iflowLogin bool
|
||||
var noBrowser bool
|
||||
var projectID string
|
||||
var configPath string
|
||||
@@ -53,10 +60,10 @@ func main() {
|
||||
flag.BoolVar(&codexLogin, "codex-login", false, "Login to Codex using OAuth")
|
||||
flag.BoolVar(&claudeLogin, "claude-login", false, "Login to Claude using OAuth")
|
||||
flag.BoolVar(&qwenLogin, "qwen-login", false, "Login to Qwen using OAuth")
|
||||
flag.BoolVar(&geminiWebAuth, "gemini-web-auth", false, "Auth Gemini Web using cookies")
|
||||
flag.BoolVar(&iflowLogin, "iflow-login", false, "Login to iFlow using OAuth")
|
||||
flag.BoolVar(&noBrowser, "no-browser", false, "Don't open browser automatically for OAuth")
|
||||
flag.StringVar(&projectID, "project_id", "", "Project ID (Gemini only, not required)")
|
||||
flag.StringVar(&configPath, "config", "", "Configure File Path")
|
||||
flag.StringVar(&configPath, "config", DefaultConfigPath, "Configure File Path")
|
||||
flag.StringVar(&password, "password", "", "")
|
||||
|
||||
flag.CommandLine.Usage = func() {
|
||||
@@ -67,7 +74,7 @@ func main() {
|
||||
return
|
||||
}
|
||||
s := fmt.Sprintf(" -%s", f.Name)
|
||||
name, usage := flag.UnquoteUsage(f)
|
||||
name, unquoteUsage := flag.UnquoteUsage(f)
|
||||
if name != "" {
|
||||
s += " " + name
|
||||
}
|
||||
@@ -76,8 +83,8 @@ func main() {
|
||||
} else {
|
||||
s += "\n "
|
||||
}
|
||||
if usage != "" {
|
||||
s += usage
|
||||
if unquoteUsage != "" {
|
||||
s += unquoteUsage
|
||||
}
|
||||
if f.DefValue != "" && f.DefValue != "false" && f.DefValue != "0" {
|
||||
s += fmt.Sprintf(" (default %s)", f.DefValue)
|
||||
@@ -92,26 +99,129 @@ func main() {
|
||||
// Core application variables.
|
||||
var err error
|
||||
var cfg *config.Config
|
||||
var wd string
|
||||
var isCloudDeploy bool
|
||||
var (
|
||||
gitStoreLocalPath string
|
||||
useGitStore bool
|
||||
gitStoreRemoteURL string
|
||||
gitStoreUser string
|
||||
gitStorePassword string
|
||||
gitStoreInst *store.GitTokenStore
|
||||
gitStoreRoot string
|
||||
)
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to get working directory: %v", err)
|
||||
}
|
||||
|
||||
lookupEnv := func(keys ...string) (string, bool) {
|
||||
for _, key := range keys {
|
||||
if value, ok := os.LookupEnv(key); ok {
|
||||
if trimmed := strings.TrimSpace(value); trimmed != "" {
|
||||
return trimmed, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
if value, ok := lookupEnv("GITSTORE_GIT_URL", "gitstore_git_url"); ok {
|
||||
useGitStore = true
|
||||
gitStoreRemoteURL = value
|
||||
}
|
||||
if value, ok := lookupEnv("GITSTORE_GIT_USERNAME", "gitstore_git_username"); ok {
|
||||
gitStoreUser = value
|
||||
}
|
||||
if value, ok := lookupEnv("GITSTORE_GIT_TOKEN", "gitstore_git_token"); ok {
|
||||
gitStorePassword = value
|
||||
}
|
||||
if value, ok := lookupEnv("GITSTORE_LOCAL_PATH", "gitstore_local_path"); ok {
|
||||
gitStoreLocalPath = value
|
||||
}
|
||||
|
||||
// Check for cloud deploy mode only on first execution
|
||||
// Read env var name in uppercase: DEPLOY
|
||||
deployEnv := os.Getenv("DEPLOY")
|
||||
if deployEnv == "cloud" {
|
||||
isCloudDeploy = true
|
||||
}
|
||||
|
||||
// Determine and load the configuration file.
|
||||
// If a config path is provided via flags, it is used directly.
|
||||
// Otherwise, it defaults to "config.yaml" in the current working directory.
|
||||
// If gitstore is configured, load from the cloned repository; otherwise use the provided path or default.
|
||||
var configFilePath string
|
||||
if configPath != "" {
|
||||
if useGitStore {
|
||||
if gitStoreLocalPath == "" {
|
||||
gitStoreLocalPath = wd
|
||||
}
|
||||
gitStoreRoot = filepath.Join(gitStoreLocalPath, "remote")
|
||||
authDir := filepath.Join(gitStoreRoot, "auths")
|
||||
gitStoreInst = store.NewGitTokenStore(gitStoreRemoteURL, gitStoreUser, gitStorePassword)
|
||||
gitStoreInst.SetBaseDir(authDir)
|
||||
if errRepo := gitStoreInst.EnsureRepository(); errRepo != nil {
|
||||
log.Fatalf("failed to prepare git token store: %v", errRepo)
|
||||
}
|
||||
configFilePath = gitStoreInst.ConfigPath()
|
||||
if configFilePath == "" {
|
||||
configFilePath = filepath.Join(gitStoreRoot, "config", "config.yaml")
|
||||
}
|
||||
if _, statErr := os.Stat(configFilePath); errors.Is(statErr, fs.ErrNotExist) {
|
||||
examplePath := filepath.Join(wd, "config.example.yaml")
|
||||
if _, errExample := os.Stat(examplePath); errExample != nil {
|
||||
log.Fatalf("failed to find template config file: %v", errExample)
|
||||
}
|
||||
if errCopy := misc.CopyConfigTemplate(examplePath, configFilePath); errCopy != nil {
|
||||
log.Fatalf("failed to bootstrap git-backed config: %v", errCopy)
|
||||
}
|
||||
if errCommit := gitStoreInst.CommitConfig(context.Background()); errCommit != nil {
|
||||
log.Fatalf("failed to commit initial git-backed config: %v", errCommit)
|
||||
}
|
||||
log.Infof("git-backed config initialized from template: %s", configFilePath)
|
||||
} else if statErr != nil {
|
||||
log.Fatalf("failed to inspect git-backed config: %v", statErr)
|
||||
}
|
||||
cfg, err = config.LoadConfigOptional(configFilePath, isCloudDeploy)
|
||||
if err == nil {
|
||||
cfg.AuthDir = gitStoreInst.AuthDir()
|
||||
log.Infof("git-backed token store enabled, repository path: %s", gitStoreRoot)
|
||||
}
|
||||
} else if configPath != "" {
|
||||
configFilePath = configPath
|
||||
cfg, err = config.LoadConfig(configPath)
|
||||
cfg, err = config.LoadConfigOptional(configPath, isCloudDeploy)
|
||||
} else {
|
||||
wd, err = os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to get working directory: %v", err)
|
||||
}
|
||||
configFilePath = filepath.Join(wd, "config.yaml")
|
||||
cfg, err = config.LoadConfig(configFilePath)
|
||||
cfg, err = config.LoadConfigOptional(configFilePath, isCloudDeploy)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("failed to load config: %v", err)
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = &config.Config{}
|
||||
}
|
||||
|
||||
// In cloud deploy mode, check if we have a valid configuration
|
||||
var configFileExists bool
|
||||
if isCloudDeploy {
|
||||
if info, errStat := os.Stat(configFilePath); errStat != nil {
|
||||
// Don't mislead: API server will not start until configuration is provided.
|
||||
log.Info("Cloud deploy mode: No configuration file detected; standing by for configuration")
|
||||
configFileExists = false
|
||||
} else if info.IsDir() {
|
||||
log.Info("Cloud deploy mode: Config path is a directory; standing by for configuration")
|
||||
configFileExists = false
|
||||
} else if cfg.Port == 0 {
|
||||
// LoadConfigOptional returns empty config when file is empty or invalid.
|
||||
// Config file exists but is empty or invalid; treat as missing config
|
||||
log.Info("Cloud deploy mode: Configuration file is empty or invalid; standing by for valid configuration")
|
||||
configFileExists = false
|
||||
} else {
|
||||
log.Info("Cloud deploy mode: Configuration file detected; starting service")
|
||||
configFileExists = true
|
||||
}
|
||||
}
|
||||
usage.SetStatisticsEnabled(cfg.UsageStatisticsEnabled)
|
||||
|
||||
if err = logging.ConfigureLogOutput(cfg.LoggingToFile); err != nil {
|
||||
@@ -123,22 +233,10 @@ func main() {
|
||||
// Set the log level based on the configuration.
|
||||
util.SetLogLevel(cfg)
|
||||
|
||||
// Expand the tilde (~) in the auth directory path to the user's home directory.
|
||||
if strings.HasPrefix(cfg.AuthDir, "~") {
|
||||
home, errUserHomeDir := os.UserHomeDir()
|
||||
if errUserHomeDir != nil {
|
||||
log.Fatalf("failed to get home directory: %v", errUserHomeDir)
|
||||
}
|
||||
// Reconstruct the path by replacing the tilde with the user's home directory.
|
||||
remainder := strings.TrimPrefix(cfg.AuthDir, "~")
|
||||
remainder = strings.TrimLeft(remainder, "/\\")
|
||||
if remainder == "" {
|
||||
cfg.AuthDir = home
|
||||
} else {
|
||||
// Normalize any slash style in the remainder so Windows paths keep nested directories.
|
||||
normalized := strings.ReplaceAll(remainder, "\\", "/")
|
||||
cfg.AuthDir = filepath.Join(home, filepath.FromSlash(normalized))
|
||||
}
|
||||
if resolvedAuthDir, errResolveAuthDir := util.ResolveAuthDir(cfg.AuthDir); errResolveAuthDir != nil {
|
||||
log.Fatalf("failed to resolve auth directory: %v", errResolveAuthDir)
|
||||
} else {
|
||||
cfg.AuthDir = resolvedAuthDir
|
||||
}
|
||||
|
||||
// Create login options to be used in authentication flows.
|
||||
@@ -147,7 +245,14 @@ func main() {
|
||||
}
|
||||
|
||||
// Register the shared token store once so all components use the same persistence backend.
|
||||
sdkAuth.RegisterTokenStore(sdkAuth.NewFileTokenStore())
|
||||
if useGitStore {
|
||||
sdkAuth.RegisterTokenStore(gitStoreInst)
|
||||
} else {
|
||||
sdkAuth.RegisterTokenStore(sdkAuth.NewFileTokenStore())
|
||||
}
|
||||
|
||||
// Register built-in access providers before constructing services.
|
||||
configaccess.Register()
|
||||
|
||||
// Handle different command modes based on the provided flags.
|
||||
|
||||
@@ -162,9 +267,15 @@ func main() {
|
||||
cmd.DoClaudeLogin(cfg, options)
|
||||
} else if qwenLogin {
|
||||
cmd.DoQwenLogin(cfg, options)
|
||||
} else if geminiWebAuth {
|
||||
cmd.DoGeminiWebAuth(cfg)
|
||||
} else if iflowLogin {
|
||||
cmd.DoIFlowLogin(cfg, options)
|
||||
} else {
|
||||
// In cloud deploy mode without config file, just wait for shutdown signals
|
||||
if isCloudDeploy && !configFileExists {
|
||||
// No config file available, just wait for shutdown
|
||||
cmd.WaitForCloudDeploy()
|
||||
return
|
||||
}
|
||||
// Start the main proxy service
|
||||
cmd.StartService(cfg, configFilePath, password)
|
||||
}
|
||||
|
||||
@@ -12,17 +12,25 @@ remote-management:
|
||||
# Leave empty to disable the Management API entirely (404 for all /v0/management routes).
|
||||
secret-key: ""
|
||||
|
||||
# Disable the bundled management control panel asset download and HTTP route when true.
|
||||
disable-control-panel: false
|
||||
|
||||
# Authentication directory (supports ~ for home directory)
|
||||
auth-dir: "~/.cli-proxy-api"
|
||||
|
||||
# API keys for authentication
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
|
||||
# Enable debug logging
|
||||
debug: false
|
||||
|
||||
# When true, write application logs to rotating files instead of stdout
|
||||
logging-to-file: true
|
||||
logging-to-file: false
|
||||
|
||||
# When false, disable in-memory usage statistics aggregation
|
||||
usage-statistics-enabled: true
|
||||
usage-statistics-enabled: false
|
||||
|
||||
# Proxy URL. Supports socks5/http/https protocols. Example: socks5://user:pass@192.168.1.1:1080/
|
||||
proxy-url: ""
|
||||
@@ -35,58 +43,39 @@ quota-exceeded:
|
||||
switch-project: true # Whether to automatically switch to another project when a quota is exceeded
|
||||
switch-preview-model: true # Whether to automatically switch to a preview model when a quota is exceeded
|
||||
|
||||
# Request authentication providers
|
||||
auth:
|
||||
providers:
|
||||
- name: "default"
|
||||
type: "config-api-key"
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
|
||||
# API keys for official Generative Language API
|
||||
generative-language-api-key:
|
||||
- "AIzaSy...01"
|
||||
- "AIzaSy...02"
|
||||
- "AIzaSy...03"
|
||||
- "AIzaSy...04"
|
||||
#generative-language-api-key:
|
||||
# - "AIzaSy...01"
|
||||
# - "AIzaSy...02"
|
||||
# - "AIzaSy...03"
|
||||
# - "AIzaSy...04"
|
||||
|
||||
# Codex API keys
|
||||
codex-api-key:
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||
#codex-api-key:
|
||||
# - api-key: "sk-atSM..."
|
||||
# base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||
# proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||
|
||||
# Claude API keys
|
||||
claude-api-key:
|
||||
- api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||
#claude-api-key:
|
||||
# - api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||
# - api-key: "sk-atSM..."
|
||||
# base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||
# proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||
|
||||
# OpenAI compatibility providers
|
||||
openai-compatibility:
|
||||
- name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||
base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||
api-keys: # The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed.
|
||||
- "sk-or-v1-...b780"
|
||||
- "sk-or-v1-...b781"
|
||||
models: # The models supported by the provider.
|
||||
- name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||
alias: "kimi-k2" # The alias used in the API.
|
||||
|
||||
# Gemini Web settings
|
||||
gemini-web:
|
||||
# Conversation reuse: set to true to enable (default), false to disable.
|
||||
context: true
|
||||
# Maximum characters per single request to Gemini Web. Requests exceeding this
|
||||
# size split into chunks. Only the last chunk carries files and yields the final answer.
|
||||
max-chars-per-request: 1000000
|
||||
# Disable the short continuation hint appended to intermediate chunks
|
||||
# when splitting long prompts. Default is false (hint enabled by default).
|
||||
disable-continuation-hint: false
|
||||
# Code mode:
|
||||
# - true: enable XML wrapping hint and attach the coding-partner Gem.
|
||||
# Thought merging (<think> into visible content) applies to STREAMING only;
|
||||
# non-stream responses keep reasoning/thought parts separate for clients
|
||||
# that expect explicit reasoning fields.
|
||||
# - false: disable XML hint and keep <think> separate
|
||||
code-mode: false
|
||||
#openai-compatibility:
|
||||
# - name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||
# base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||
# # New format with per-key proxy support (recommended):
|
||||
# api-key-entries:
|
||||
# - api-key: "sk-or-v1-...b780"
|
||||
# proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||
# - api-key: "sk-or-v1-...b781" # without proxy-url
|
||||
# # Legacy format (still supported, but cannot specify proxy per key):
|
||||
# # api-keys:
|
||||
# # - "sk-or-v1-...b780"
|
||||
# # - "sk-or-v1-...b781"
|
||||
# models: # The models supported by the provider.
|
||||
# - name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||
# alias: "kimi-k2" # The alias used in the API.
|
||||
|
||||
@@ -10,14 +10,16 @@ services:
|
||||
COMMIT: ${COMMIT:-none}
|
||||
BUILD_DATE: ${BUILD_DATE:-unknown}
|
||||
container_name: cli-proxy-api
|
||||
environment:
|
||||
DEPLOY: ${DEPLOY:-}
|
||||
ports:
|
||||
- "8317:8317"
|
||||
- "8085:8085"
|
||||
- "1455:1455"
|
||||
- "54545:54545"
|
||||
- "11451:11451"
|
||||
volumes:
|
||||
- ./config.yaml:/CLIProxyAPI/config.yaml
|
||||
- ./auths:/root/.cli-proxy-api
|
||||
- ./logs:/CLIProxyAPI/logs
|
||||
- ./conv:/CLIProxyAPI/conv
|
||||
restart: unless-stopped
|
||||
restart: unless-stopped
|
||||
|
||||
@@ -160,11 +160,7 @@ func main() {
|
||||
if dirSetter, ok := tokenStore.(interface{ SetBaseDir(string) }); ok {
|
||||
dirSetter.SetBaseDir(cfg.AuthDir)
|
||||
}
|
||||
store, ok := tokenStore.(coreauth.Store)
|
||||
if !ok {
|
||||
panic("token store does not implement coreauth.Store")
|
||||
}
|
||||
core := coreauth.NewManager(store, nil, nil)
|
||||
core := coreauth.NewManager(tokenStore, nil, nil)
|
||||
core.RegisterExecutor(MyExecutor{})
|
||||
|
||||
hooks := cliproxy.Hooks{
|
||||
|
||||
29
go.mod
29
go.mod
@@ -1,49 +1,60 @@
|
||||
module github.com/router-for-me/CLIProxyAPI/v6
|
||||
|
||||
go 1.24
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/go-git/go-git/v6 v6.0.0-20251009132922-75a182125145
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/klauspost/compress v1.17.3
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/tidwall/gjson v1.18.0
|
||||
github.com/tidwall/sjson v1.2.5
|
||||
go.etcd.io/bbolt v1.3.8
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/net v0.37.1-0.20250305215238-2914f4677317
|
||||
golang.org/x/crypto v0.43.0
|
||||
golang.org/x/net v0.46.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/bytedance/sonic v1.11.6 // indirect
|
||||
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-git/gcfg/v2 v2.0.2 // indirect
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20250627091229-31e2a16eef30 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.3 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/kevinburke/ssh_config v1.4.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pjbgf/sha1cd v0.5.0 // indirect
|
||||
github.com/sergi/go-diff v1.4.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
golang.org/x/arch v0.8.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
)
|
||||
|
||||
77
go.sum
77
go.sum
@@ -1,16 +1,32 @@
|
||||
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
|
||||
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
|
||||
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||
@@ -19,6 +35,16 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ=
|
||||
github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
||||
github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
|
||||
github.com/go-git/gcfg/v2 v2.0.2 h1:MY5SIIfTGGEMhdA7d7JePuVVxtKL7Hp+ApGDJAJ7dpo=
|
||||
github.com/go-git/gcfg/v2 v2.0.2/go.mod h1:/lv2NsxvhepuMrldsFilrgct6pxzpGdSRC13ydTLSLs=
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20250627091229-31e2a16eef30 h1:4KqVJTL5eanN8Sgg3BV6f2/QzfZEFbCd+rTak1fGRRA=
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20250627091229-31e2a16eef30/go.mod h1:snwvGrbywVFy2d6KJdQ132zapq4aLyzLMgpo79XdEfM=
|
||||
github.com/go-git/go-git-fixtures/v5 v5.1.1 h1:OH8i1ojV9bWfr0ZfasfpgtUXQHQyVS8HXik/V1C099w=
|
||||
github.com/go-git/go-git-fixtures/v5 v5.1.1/go.mod h1:Altk43lx3b1ks+dVoAG2300o5WWUnktvfY3VI6bcaXU=
|
||||
github.com/go-git/go-git/v6 v6.0.0-20251009132922-75a182125145 h1:C/oVxHd6KkkuvthQ/StZfHzZK07gl6xjfCfT3derko0=
|
||||
github.com/go-git/go-git/v6 v6.0.0-20251009132922-75a182125145/go.mod h1:gR+xpbL+o1wuJJDwRN4pOkpNwDS0D24Eo4AD5Aau2DY=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
@@ -29,6 +55,8 @@ github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBEx
|
||||
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -36,12 +64,20 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ=
|
||||
github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=
|
||||
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
|
||||
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
@@ -53,8 +89,14 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pjbgf/sha1cd v0.5.0 h1:a+UkboSi1znleCDUNT3M5YxjOnN1fz2FhN48FlwCxs0=
|
||||
github.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
|
||||
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
@@ -64,13 +106,15 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
@@ -84,32 +128,35 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
|
||||
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/net v0.37.1-0.20250305215238-2914f4677317 h1:wneCP+2d9NUmndnyTmY7VwUNYiP26xiN/AtdcojQ1lI=
|
||||
golang.org/x/net v0.37.1-0.20250305215238-2914f4677317/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -1,27 +1,33 @@
|
||||
package configapikey
|
||||
package configaccess
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
sdkconfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
)
|
||||
|
||||
var registerOnce sync.Once
|
||||
|
||||
// Register ensures the config-access provider is available to the access manager.
|
||||
func Register() {
|
||||
registerOnce.Do(func() {
|
||||
sdkaccess.RegisterProvider(sdkconfig.AccessProviderTypeConfigAPIKey, newProvider)
|
||||
})
|
||||
}
|
||||
|
||||
type provider struct {
|
||||
name string
|
||||
keys map[string]struct{}
|
||||
}
|
||||
|
||||
func init() {
|
||||
sdkaccess.RegisterProvider(config.AccessProviderTypeConfigAPIKey, newProvider)
|
||||
}
|
||||
|
||||
func newProvider(cfg *config.AccessProvider, _ *config.Config) (sdkaccess.Provider, error) {
|
||||
func newProvider(cfg *sdkconfig.AccessProvider, _ *sdkconfig.SDKConfig) (sdkaccess.Provider, error) {
|
||||
name := cfg.Name
|
||||
if name == "" {
|
||||
name = config.DefaultAccessProviderName
|
||||
name = sdkconfig.DefaultAccessProviderName
|
||||
}
|
||||
keys := make(map[string]struct{}, len(cfg.APIKeys))
|
||||
for _, key := range cfg.APIKeys {
|
||||
@@ -35,7 +41,7 @@ func newProvider(cfg *config.AccessProvider, _ *config.Config) (sdkaccess.Provid
|
||||
|
||||
func (p *provider) Identifier() string {
|
||||
if p == nil || p.name == "" {
|
||||
return config.DefaultAccessProviderName
|
||||
return sdkconfig.DefaultAccessProviderName
|
||||
}
|
||||
return p.name
|
||||
}
|
||||
270
internal/access/reconcile.go
Normal file
270
internal/access/reconcile.go
Normal file
@@ -0,0 +1,270 @@
|
||||
package access
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
sdkConfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ReconcileProviders builds the desired provider list by reusing existing providers when possible
|
||||
// and creating or removing providers only when their configuration changed. It returns the final
|
||||
// ordered provider slice along with the identifiers of providers that were added, updated, or
|
||||
// removed compared to the previous configuration.
|
||||
func ReconcileProviders(oldCfg, newCfg *config.Config, existing []sdkaccess.Provider) (result []sdkaccess.Provider, added, updated, removed []string, err error) {
|
||||
if newCfg == nil {
|
||||
return nil, nil, nil, nil, nil
|
||||
}
|
||||
|
||||
existingMap := make(map[string]sdkaccess.Provider, len(existing))
|
||||
for _, provider := range existing {
|
||||
if provider == nil {
|
||||
continue
|
||||
}
|
||||
existingMap[provider.Identifier()] = provider
|
||||
}
|
||||
|
||||
oldCfgMap := accessProviderMap(oldCfg)
|
||||
newEntries := collectProviderEntries(newCfg)
|
||||
|
||||
result = make([]sdkaccess.Provider, 0, len(newEntries))
|
||||
finalIDs := make(map[string]struct{}, len(newEntries))
|
||||
|
||||
isInlineProvider := func(id string) bool {
|
||||
return strings.EqualFold(id, sdkConfig.DefaultAccessProviderName)
|
||||
}
|
||||
appendChange := func(list *[]string, id string) {
|
||||
if isInlineProvider(id) {
|
||||
return
|
||||
}
|
||||
*list = append(*list, id)
|
||||
}
|
||||
|
||||
for _, providerCfg := range newEntries {
|
||||
key := providerIdentifier(providerCfg)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
forceRebuild := strings.EqualFold(strings.TrimSpace(providerCfg.Type), sdkConfig.AccessProviderTypeConfigAPIKey)
|
||||
if oldCfgProvider, ok := oldCfgMap[key]; ok {
|
||||
isAliased := oldCfgProvider == providerCfg
|
||||
if !forceRebuild && !isAliased && providerConfigEqual(oldCfgProvider, providerCfg) {
|
||||
if existingProvider, okExisting := existingMap[key]; okExisting {
|
||||
result = append(result, existingProvider)
|
||||
finalIDs[key] = struct{}{}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider, buildErr := sdkaccess.BuildProvider(providerCfg, &newCfg.SDKConfig)
|
||||
if buildErr != nil {
|
||||
return nil, nil, nil, nil, buildErr
|
||||
}
|
||||
if _, ok := oldCfgMap[key]; ok {
|
||||
if _, existed := existingMap[key]; existed {
|
||||
appendChange(&updated, key)
|
||||
} else {
|
||||
appendChange(&added, key)
|
||||
}
|
||||
} else {
|
||||
appendChange(&added, key)
|
||||
}
|
||||
result = append(result, provider)
|
||||
finalIDs[key] = struct{}{}
|
||||
}
|
||||
|
||||
if len(result) == 0 {
|
||||
if inline := sdkConfig.MakeInlineAPIKeyProvider(newCfg.APIKeys); inline != nil {
|
||||
key := providerIdentifier(inline)
|
||||
if key != "" {
|
||||
if oldCfgProvider, ok := oldCfgMap[key]; ok {
|
||||
if providerConfigEqual(oldCfgProvider, inline) {
|
||||
if existingProvider, okExisting := existingMap[key]; okExisting {
|
||||
result = append(result, existingProvider)
|
||||
finalIDs[key] = struct{}{}
|
||||
goto inlineDone
|
||||
}
|
||||
}
|
||||
}
|
||||
provider, buildErr := sdkaccess.BuildProvider(inline, &newCfg.SDKConfig)
|
||||
if buildErr != nil {
|
||||
return nil, nil, nil, nil, buildErr
|
||||
}
|
||||
if _, existed := existingMap[key]; existed {
|
||||
appendChange(&updated, key)
|
||||
} else if _, hadOld := oldCfgMap[key]; hadOld {
|
||||
appendChange(&updated, key)
|
||||
} else {
|
||||
appendChange(&added, key)
|
||||
}
|
||||
result = append(result, provider)
|
||||
finalIDs[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
inlineDone:
|
||||
}
|
||||
|
||||
removedSet := make(map[string]struct{})
|
||||
for id := range existingMap {
|
||||
if _, ok := finalIDs[id]; !ok {
|
||||
if isInlineProvider(id) {
|
||||
continue
|
||||
}
|
||||
removedSet[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
removed = make([]string, 0, len(removedSet))
|
||||
for id := range removedSet {
|
||||
removed = append(removed, id)
|
||||
}
|
||||
|
||||
sort.Strings(added)
|
||||
sort.Strings(updated)
|
||||
sort.Strings(removed)
|
||||
|
||||
return result, added, updated, removed, nil
|
||||
}
|
||||
|
||||
// ApplyAccessProviders reconciles the configured access providers against the
|
||||
// currently registered providers and updates the manager. It logs a concise
|
||||
// summary of the detected changes and returns whether any provider changed.
|
||||
func ApplyAccessProviders(manager *sdkaccess.Manager, oldCfg, newCfg *config.Config) (bool, error) {
|
||||
if manager == nil || newCfg == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
existing := manager.Providers()
|
||||
providers, added, updated, removed, err := ReconcileProviders(oldCfg, newCfg, existing)
|
||||
if err != nil {
|
||||
log.Errorf("failed to reconcile request auth providers: %v", err)
|
||||
return false, fmt.Errorf("reconciling access providers: %w", err)
|
||||
}
|
||||
|
||||
manager.SetProviders(providers)
|
||||
|
||||
if len(added)+len(updated)+len(removed) > 0 {
|
||||
log.Debugf("auth providers reconciled (added=%d updated=%d removed=%d)", len(added), len(updated), len(removed))
|
||||
log.Debugf("auth providers changes details - added=%v updated=%v removed=%v", added, updated, removed)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Debug("auth providers unchanged after config update")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func accessProviderMap(cfg *config.Config) map[string]*sdkConfig.AccessProvider {
|
||||
result := make(map[string]*sdkConfig.AccessProvider)
|
||||
if cfg == nil {
|
||||
return result
|
||||
}
|
||||
for i := range cfg.Access.Providers {
|
||||
providerCfg := &cfg.Access.Providers[i]
|
||||
if providerCfg.Type == "" {
|
||||
continue
|
||||
}
|
||||
key := providerIdentifier(providerCfg)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
result[key] = providerCfg
|
||||
}
|
||||
if len(result) == 0 && len(cfg.APIKeys) > 0 {
|
||||
if provider := sdkConfig.MakeInlineAPIKeyProvider(cfg.APIKeys); provider != nil {
|
||||
if key := providerIdentifier(provider); key != "" {
|
||||
result[key] = provider
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func collectProviderEntries(cfg *config.Config) []*sdkConfig.AccessProvider {
|
||||
entries := make([]*sdkConfig.AccessProvider, 0, len(cfg.Access.Providers))
|
||||
for i := range cfg.Access.Providers {
|
||||
providerCfg := &cfg.Access.Providers[i]
|
||||
if providerCfg.Type == "" {
|
||||
continue
|
||||
}
|
||||
if key := providerIdentifier(providerCfg); key != "" {
|
||||
entries = append(entries, providerCfg)
|
||||
}
|
||||
}
|
||||
if len(entries) == 0 && len(cfg.APIKeys) > 0 {
|
||||
if inline := sdkConfig.MakeInlineAPIKeyProvider(cfg.APIKeys); inline != nil {
|
||||
entries = append(entries, inline)
|
||||
}
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
func providerIdentifier(provider *sdkConfig.AccessProvider) string {
|
||||
if provider == nil {
|
||||
return ""
|
||||
}
|
||||
if name := strings.TrimSpace(provider.Name); name != "" {
|
||||
return name
|
||||
}
|
||||
typ := strings.TrimSpace(provider.Type)
|
||||
if typ == "" {
|
||||
return ""
|
||||
}
|
||||
if strings.EqualFold(typ, sdkConfig.AccessProviderTypeConfigAPIKey) {
|
||||
return sdkConfig.DefaultAccessProviderName
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
func providerConfigEqual(a, b *sdkConfig.AccessProvider) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == nil && b == nil
|
||||
}
|
||||
if !strings.EqualFold(strings.TrimSpace(a.Type), strings.TrimSpace(b.Type)) {
|
||||
return false
|
||||
}
|
||||
if strings.TrimSpace(a.SDK) != strings.TrimSpace(b.SDK) {
|
||||
return false
|
||||
}
|
||||
if !stringSetEqual(a.APIKeys, b.APIKeys) {
|
||||
return false
|
||||
}
|
||||
if len(a.Config) != len(b.Config) {
|
||||
return false
|
||||
}
|
||||
if len(a.Config) > 0 && !reflect.DeepEqual(a.Config, b.Config) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func stringSetEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
if len(a) == 0 {
|
||||
return true
|
||||
}
|
||||
seen := make(map[string]int, len(a))
|
||||
for _, val := range a {
|
||||
seen[val]++
|
||||
}
|
||||
for _, val := range b {
|
||||
count := seen[val]
|
||||
if count == 0 {
|
||||
return false
|
||||
}
|
||||
if count == 1 {
|
||||
delete(seen, val)
|
||||
} else {
|
||||
seen[val] = count - 1
|
||||
}
|
||||
}
|
||||
return len(seen) == 0
|
||||
}
|
||||
@@ -1,26 +1,29 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/claude"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/codex"
|
||||
geminiAuth "github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||
iflowauth "github.com/router-for-me/CLIProxyAPI/v6/internal/auth/iflow"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/qwen"
|
||||
// legacy client removed
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
@@ -37,6 +40,28 @@ var (
|
||||
|
||||
var lastRefreshKeys = []string{"last_refresh", "lastRefresh", "last_refreshed_at", "lastRefreshedAt"}
|
||||
|
||||
const (
|
||||
anthropicCallbackPort = 54545
|
||||
geminiCallbackPort = 8085
|
||||
codexCallbackPort = 1455
|
||||
geminiCLIEndpoint = "https://cloudcode-pa.googleapis.com"
|
||||
geminiCLIVersion = "v1internal"
|
||||
geminiCLIUserAgent = "google-api-nodejs-client/9.15.1"
|
||||
geminiCLIApiClient = "gl-node/22.17.0"
|
||||
geminiCLIClientMetadata = "ideType=IDE_UNSPECIFIED,platform=PLATFORM_UNSPECIFIED,pluginType=GEMINI"
|
||||
)
|
||||
|
||||
type callbackForwarder struct {
|
||||
provider string
|
||||
server *http.Server
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
var (
|
||||
callbackForwardersMu sync.Mutex
|
||||
callbackForwarders = make(map[int]*callbackForwarder)
|
||||
)
|
||||
|
||||
func extractLastRefreshTimestamp(meta map[string]any) (time.Time, bool) {
|
||||
if len(meta) == 0 {
|
||||
return time.Time{}, false
|
||||
@@ -90,6 +115,120 @@ func parseLastRefreshValue(v any) (time.Time, bool) {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func isWebUIRequest(c *gin.Context) bool {
|
||||
raw := strings.TrimSpace(c.Query("is_webui"))
|
||||
if raw == "" {
|
||||
return false
|
||||
}
|
||||
switch strings.ToLower(raw) {
|
||||
case "1", "true", "yes", "on":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func startCallbackForwarder(port int, provider, targetBase string) (*callbackForwarder, error) {
|
||||
callbackForwardersMu.Lock()
|
||||
prev := callbackForwarders[port]
|
||||
if prev != nil {
|
||||
delete(callbackForwarders, port)
|
||||
}
|
||||
callbackForwardersMu.Unlock()
|
||||
|
||||
if prev != nil {
|
||||
stopForwarderInstance(port, prev)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("127.0.0.1:%d", port)
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to listen on %s: %w", addr, err)
|
||||
}
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
target := targetBase
|
||||
if raw := r.URL.RawQuery; raw != "" {
|
||||
if strings.Contains(target, "?") {
|
||||
target = target + "&" + raw
|
||||
} else {
|
||||
target = target + "?" + raw
|
||||
}
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
http.Redirect(w, r, target, http.StatusFound)
|
||||
})
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: handler,
|
||||
ReadHeaderTimeout: 5 * time.Second,
|
||||
WriteTimeout: 5 * time.Second,
|
||||
}
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
if errServe := srv.Serve(ln); errServe != nil && !errors.Is(errServe, http.ErrServerClosed) {
|
||||
log.WithError(errServe).Warnf("callback forwarder for %s stopped unexpectedly", provider)
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
forwarder := &callbackForwarder{
|
||||
provider: provider,
|
||||
server: srv,
|
||||
done: done,
|
||||
}
|
||||
|
||||
callbackForwardersMu.Lock()
|
||||
callbackForwarders[port] = forwarder
|
||||
callbackForwardersMu.Unlock()
|
||||
|
||||
log.Infof("callback forwarder for %s listening on %s", provider, addr)
|
||||
|
||||
return forwarder, nil
|
||||
}
|
||||
|
||||
func stopCallbackForwarder(port int) {
|
||||
callbackForwardersMu.Lock()
|
||||
forwarder := callbackForwarders[port]
|
||||
if forwarder != nil {
|
||||
delete(callbackForwarders, port)
|
||||
}
|
||||
callbackForwardersMu.Unlock()
|
||||
|
||||
stopForwarderInstance(port, forwarder)
|
||||
}
|
||||
|
||||
func stopForwarderInstance(port int, forwarder *callbackForwarder) {
|
||||
if forwarder == nil || forwarder.server == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := forwarder.server.Shutdown(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.WithError(err).Warnf("failed to shut down callback forwarder on port %d", port)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-forwarder.done:
|
||||
case <-time.After(2 * time.Second):
|
||||
}
|
||||
|
||||
log.Infof("callback forwarder on port %d stopped", port)
|
||||
}
|
||||
|
||||
func (h *Handler) managementCallbackURL(path string) (string, error) {
|
||||
if h == nil || h.cfg == nil || h.cfg.Port <= 0 {
|
||||
return "", fmt.Errorf("server port is not configured")
|
||||
}
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = "/" + path
|
||||
}
|
||||
return fmt.Sprintf("http://127.0.0.1:%d%s", h.cfg.Port, path), nil
|
||||
}
|
||||
|
||||
// List auth files
|
||||
func (h *Handler) ListAuthFiles(c *gin.Context) {
|
||||
entries, err := os.ReadDir(h.cfg.AuthDir)
|
||||
@@ -113,7 +252,9 @@ func (h *Handler) ListAuthFiles(c *gin.Context) {
|
||||
full := filepath.Join(h.cfg.AuthDir, name)
|
||||
if data, errRead := os.ReadFile(full); errRead == nil {
|
||||
typeValue := gjson.GetBytes(data, "type").String()
|
||||
emailValue := gjson.GetBytes(data, "email").String()
|
||||
fileData["type"] = typeValue
|
||||
fileData["email"] = emailValue
|
||||
}
|
||||
|
||||
files = append(files, fileData)
|
||||
@@ -344,7 +485,7 @@ func (h *Handler) disableAuth(ctx context.Context, id string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) saveTokenRecord(ctx context.Context, record *sdkAuth.TokenRecord) (string, error) {
|
||||
func (h *Handler) saveTokenRecord(ctx context.Context, record *coreauth.Auth) (string, error) {
|
||||
if record == nil {
|
||||
return "", fmt.Errorf("token record is nil")
|
||||
}
|
||||
@@ -353,7 +494,12 @@ func (h *Handler) saveTokenRecord(ctx context.Context, record *sdkAuth.TokenReco
|
||||
store = sdkAuth.GetTokenStore()
|
||||
h.tokenStore = store
|
||||
}
|
||||
return store.Save(ctx, h.cfg, record)
|
||||
if h.cfg != nil {
|
||||
if dirSetter, ok := store.(interface{ SetBaseDir(string) }); ok {
|
||||
dirSetter.SetBaseDir(h.cfg.AuthDir)
|
||||
}
|
||||
}
|
||||
return store.Save(ctx, record)
|
||||
}
|
||||
|
||||
func (h *Handler) RequestAnthropicToken(c *gin.Context) {
|
||||
@@ -384,9 +530,27 @@ func (h *Handler) RequestAnthropicToken(c *gin.Context) {
|
||||
log.Fatalf("Failed to generate authorization URL: %v", err)
|
||||
return
|
||||
}
|
||||
// Override redirect_uri in authorization URL to current server port
|
||||
|
||||
isWebUI := isWebUIRequest(c)
|
||||
if isWebUI {
|
||||
targetURL, errTarget := h.managementCallbackURL("/anthropic/callback")
|
||||
if errTarget != nil {
|
||||
log.WithError(errTarget).Error("failed to compute anthropic callback target")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "callback server unavailable"})
|
||||
return
|
||||
}
|
||||
if _, errStart := startCallbackForwarder(anthropicCallbackPort, "anthropic", targetURL); errStart != nil {
|
||||
log.WithError(errStart).Error("failed to start anthropic callback forwarder")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to start callback server"})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
if isWebUI {
|
||||
defer stopCallbackForwarder(anthropicCallbackPort)
|
||||
}
|
||||
|
||||
// Helper: wait for callback file
|
||||
waitFile := filepath.Join(h.cfg.AuthDir, fmt.Sprintf(".oauth-anthropic-%s.oauth", state))
|
||||
waitForFile := func(path string, timeout time.Duration) (map[string]string, error) {
|
||||
@@ -449,7 +613,7 @@ func (h *Handler) RequestAnthropicToken(c *gin.Context) {
|
||||
}
|
||||
bodyJSON, _ := json.Marshal(bodyMap)
|
||||
|
||||
httpClient := util.SetProxy(h.cfg, &http.Client{})
|
||||
httpClient := util.SetProxy(&h.cfg.SDKConfig, &http.Client{})
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", "https://console.anthropic.com/v1/oauth/token", strings.NewReader(string(bodyJSON)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
@@ -496,11 +660,12 @@ func (h *Handler) RequestAnthropicToken(c *gin.Context) {
|
||||
|
||||
// Create token storage
|
||||
tokenStorage := anthropicAuth.CreateTokenStorage(bundle)
|
||||
record := &sdkAuth.TokenRecord{
|
||||
record := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("claude-%s.json", tokenStorage.Email),
|
||||
Provider: "claude",
|
||||
FileName: fmt.Sprintf("claude-%s.json", tokenStorage.Email),
|
||||
Storage: tokenStorage,
|
||||
Metadata: map[string]string{"email": tokenStorage.Email},
|
||||
Metadata: map[string]any{"email": tokenStorage.Email},
|
||||
}
|
||||
savedPath, errSave := h.saveTokenRecord(ctx, record)
|
||||
if errSave != nil {
|
||||
@@ -546,7 +711,26 @@ func (h *Handler) RequestGeminiCLIToken(c *gin.Context) {
|
||||
state := fmt.Sprintf("gem-%d", time.Now().UnixNano())
|
||||
authURL := conf.AuthCodeURL(state, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", "consent"))
|
||||
|
||||
isWebUI := isWebUIRequest(c)
|
||||
if isWebUI {
|
||||
targetURL, errTarget := h.managementCallbackURL("/google/callback")
|
||||
if errTarget != nil {
|
||||
log.WithError(errTarget).Error("failed to compute gemini callback target")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "callback server unavailable"})
|
||||
return
|
||||
}
|
||||
if _, errStart := startCallbackForwarder(geminiCallbackPort, "gemini", targetURL); errStart != nil {
|
||||
log.WithError(errStart).Error("failed to start gemini callback forwarder")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to start callback server"})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
if isWebUI {
|
||||
defer stopCallbackForwarder(geminiCallbackPort)
|
||||
}
|
||||
|
||||
// Wait for callback file written by server route
|
||||
waitFile := filepath.Join(h.cfg.AuthDir, fmt.Sprintf(".oauth-gemini-%s.oauth", state))
|
||||
fmt.Println("Waiting for authentication callback...")
|
||||
@@ -586,6 +770,8 @@ func (h *Handler) RequestGeminiCLIToken(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
requestedProjectID := strings.TrimSpace(projectID)
|
||||
|
||||
// Create token storage (mirrors internal/auth/gemini createTokenStorage)
|
||||
httpClient := conf.Client(ctx, token)
|
||||
req, errNewRequest := http.NewRequestWithContext(ctx, "GET", "https://www.googleapis.com/oauth2/v1/userinfo?alt=json", nil)
|
||||
@@ -645,13 +831,14 @@ func (h *Handler) RequestGeminiCLIToken(c *gin.Context) {
|
||||
|
||||
ts := geminiAuth.GeminiTokenStorage{
|
||||
Token: ifToken,
|
||||
ProjectID: projectID,
|
||||
ProjectID: requestedProjectID,
|
||||
Email: email,
|
||||
Auto: requestedProjectID == "",
|
||||
}
|
||||
|
||||
// Initialize authenticated HTTP client via GeminiAuth to honor proxy settings
|
||||
gemAuth := geminiAuth.NewGeminiAuth()
|
||||
_, errGetClient := gemAuth.GetAuthenticatedClient(ctx, &ts, h.cfg, true)
|
||||
gemClient, errGetClient := gemAuth.GetAuthenticatedClient(ctx, &ts, h.cfg, true)
|
||||
if errGetClient != nil {
|
||||
log.Fatalf("failed to get authenticated client: %v", errGetClient)
|
||||
oauthStatus[state] = "Failed to get authenticated client"
|
||||
@@ -659,14 +846,44 @@ func (h *Handler) RequestGeminiCLIToken(c *gin.Context) {
|
||||
}
|
||||
fmt.Println("Authentication successful.")
|
||||
|
||||
record := &sdkAuth.TokenRecord{
|
||||
if errEnsure := ensureGeminiProjectAndOnboard(ctx, gemClient, &ts, requestedProjectID); errEnsure != nil {
|
||||
log.Errorf("Failed to complete Gemini CLI onboarding: %v", errEnsure)
|
||||
oauthStatus[state] = "Failed to complete Gemini CLI onboarding"
|
||||
return
|
||||
}
|
||||
|
||||
if strings.TrimSpace(ts.ProjectID) == "" {
|
||||
log.Error("Onboarding did not return a project ID")
|
||||
oauthStatus[state] = "Failed to resolve project ID"
|
||||
return
|
||||
}
|
||||
|
||||
isChecked, errCheck := checkCloudAPIIsEnabled(ctx, gemClient, ts.ProjectID)
|
||||
if errCheck != nil {
|
||||
log.Errorf("Failed to verify Cloud AI API status: %v", errCheck)
|
||||
oauthStatus[state] = "Failed to verify Cloud AI API status"
|
||||
return
|
||||
}
|
||||
ts.Checked = isChecked
|
||||
if !isChecked {
|
||||
log.Error("Cloud AI API is not enabled for the selected project")
|
||||
oauthStatus[state] = "Cloud AI API not enabled"
|
||||
return
|
||||
}
|
||||
|
||||
recordMetadata := map[string]any{
|
||||
"email": ts.Email,
|
||||
"project_id": ts.ProjectID,
|
||||
"auto": ts.Auto,
|
||||
"checked": ts.Checked,
|
||||
}
|
||||
|
||||
record := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("gemini-%s-%s.json", ts.Email, ts.ProjectID),
|
||||
Provider: "gemini",
|
||||
FileName: fmt.Sprintf("gemini-%s.json", ts.Email),
|
||||
FileName: fmt.Sprintf("gemini-%s-%s.json", ts.Email, ts.ProjectID),
|
||||
Storage: &ts,
|
||||
Metadata: map[string]string{
|
||||
"email": ts.Email,
|
||||
"project_id": ts.ProjectID,
|
||||
},
|
||||
Metadata: recordMetadata,
|
||||
}
|
||||
savedPath, errSave := h.saveTokenRecord(ctx, record)
|
||||
if errSave != nil {
|
||||
@@ -683,64 +900,6 @@ func (h *Handler) RequestGeminiCLIToken(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"status": "ok", "url": authURL, "state": state})
|
||||
}
|
||||
|
||||
func (h *Handler) CreateGeminiWebToken(c *gin.Context) {
|
||||
ctx := c.Request.Context()
|
||||
|
||||
var payload struct {
|
||||
Secure1PSID string `json:"secure_1psid"`
|
||||
Secure1PSIDTS string `json:"secure_1psidts"`
|
||||
Label string `json:"label"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&payload); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid body"})
|
||||
return
|
||||
}
|
||||
payload.Secure1PSID = strings.TrimSpace(payload.Secure1PSID)
|
||||
payload.Secure1PSIDTS = strings.TrimSpace(payload.Secure1PSIDTS)
|
||||
payload.Label = strings.TrimSpace(payload.Label)
|
||||
if payload.Secure1PSID == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "secure_1psid is required"})
|
||||
return
|
||||
}
|
||||
if payload.Secure1PSIDTS == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "secure_1psidts is required"})
|
||||
return
|
||||
}
|
||||
if payload.Label == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "label is required"})
|
||||
return
|
||||
}
|
||||
|
||||
sha := sha256.New()
|
||||
sha.Write([]byte(payload.Secure1PSID))
|
||||
hash := hex.EncodeToString(sha.Sum(nil))
|
||||
fileName := fmt.Sprintf("gemini-web-%s.json", hash[:16])
|
||||
|
||||
tokenStorage := &geminiAuth.GeminiWebTokenStorage{
|
||||
Secure1PSID: payload.Secure1PSID,
|
||||
Secure1PSIDTS: payload.Secure1PSIDTS,
|
||||
Label: payload.Label,
|
||||
}
|
||||
// Provide a stable label (gemini-web-<hash>) for logging and identification
|
||||
tokenStorage.Label = strings.TrimSuffix(fileName, ".json")
|
||||
|
||||
record := &sdkAuth.TokenRecord{
|
||||
Provider: "gemini-web",
|
||||
FileName: fileName,
|
||||
Storage: tokenStorage,
|
||||
}
|
||||
|
||||
savedPath, errSave := h.saveTokenRecord(ctx, record)
|
||||
if errSave != nil {
|
||||
log.Errorf("Failed to save Gemini Web token: %v", errSave)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to save token"})
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully saved Gemini Web token to: %s\n", savedPath)
|
||||
c.JSON(http.StatusOK, gin.H{"status": "ok", "file": filepath.Base(savedPath)})
|
||||
}
|
||||
|
||||
func (h *Handler) RequestCodexToken(c *gin.Context) {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -770,7 +929,26 @@ func (h *Handler) RequestCodexToken(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
isWebUI := isWebUIRequest(c)
|
||||
if isWebUI {
|
||||
targetURL, errTarget := h.managementCallbackURL("/codex/callback")
|
||||
if errTarget != nil {
|
||||
log.WithError(errTarget).Error("failed to compute codex callback target")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "callback server unavailable"})
|
||||
return
|
||||
}
|
||||
if _, errStart := startCallbackForwarder(codexCallbackPort, "codex", targetURL); errStart != nil {
|
||||
log.WithError(errStart).Error("failed to start codex callback forwarder")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to start callback server"})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
if isWebUI {
|
||||
defer stopCallbackForwarder(codexCallbackPort)
|
||||
}
|
||||
|
||||
// Wait for callback file
|
||||
waitFile := filepath.Join(h.cfg.AuthDir, fmt.Sprintf(".oauth-codex-%s.oauth", state))
|
||||
deadline := time.Now().Add(5 * time.Minute)
|
||||
@@ -818,7 +996,7 @@ func (h *Handler) RequestCodexToken(c *gin.Context) {
|
||||
"redirect_uri": {"http://localhost:1455/auth/callback"},
|
||||
"code_verifier": {pkceCodes.CodeVerifier},
|
||||
}
|
||||
httpClient := util.SetProxy(h.cfg, &http.Client{})
|
||||
httpClient := util.SetProxy(&h.cfg.SDKConfig, &http.Client{})
|
||||
req, _ := http.NewRequestWithContext(ctx, "POST", "https://auth.openai.com/oauth/token", strings.NewReader(form.Encode()))
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
@@ -869,11 +1047,12 @@ func (h *Handler) RequestCodexToken(c *gin.Context) {
|
||||
|
||||
// Create token storage and persist
|
||||
tokenStorage := openaiAuth.CreateTokenStorage(bundle)
|
||||
record := &sdkAuth.TokenRecord{
|
||||
record := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("codex-%s.json", tokenStorage.Email),
|
||||
Provider: "codex",
|
||||
FileName: fmt.Sprintf("codex-%s.json", tokenStorage.Email),
|
||||
Storage: tokenStorage,
|
||||
Metadata: map[string]string{
|
||||
Metadata: map[string]any{
|
||||
"email": tokenStorage.Email,
|
||||
"account_id": tokenStorage.AccountID,
|
||||
},
|
||||
@@ -926,11 +1105,12 @@ func (h *Handler) RequestQwenToken(c *gin.Context) {
|
||||
tokenStorage := qwenAuth.CreateTokenStorage(tokenData)
|
||||
|
||||
tokenStorage.Email = fmt.Sprintf("qwen-%d", time.Now().UnixMilli())
|
||||
record := &sdkAuth.TokenRecord{
|
||||
record := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("qwen-%s.json", tokenStorage.Email),
|
||||
Provider: "qwen",
|
||||
FileName: fmt.Sprintf("qwen-%s.json", tokenStorage.Email),
|
||||
Storage: tokenStorage,
|
||||
Metadata: map[string]string{"email": tokenStorage.Email},
|
||||
Metadata: map[string]any{"email": tokenStorage.Email},
|
||||
}
|
||||
savedPath, errSave := h.saveTokenRecord(ctx, record)
|
||||
if errSave != nil {
|
||||
@@ -948,6 +1128,474 @@ func (h *Handler) RequestQwenToken(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"status": "ok", "url": authURL, "state": state})
|
||||
}
|
||||
|
||||
func (h *Handler) RequestIFlowToken(c *gin.Context) {
|
||||
ctx := context.Background()
|
||||
|
||||
fmt.Println("Initializing iFlow authentication...")
|
||||
|
||||
state := fmt.Sprintf("ifl-%d", time.Now().UnixNano())
|
||||
authSvc := iflowauth.NewIFlowAuth(h.cfg)
|
||||
authURL, redirectURI := authSvc.AuthorizationURL(state, iflowauth.CallbackPort)
|
||||
|
||||
isWebUI := isWebUIRequest(c)
|
||||
if isWebUI {
|
||||
targetURL, errTarget := h.managementCallbackURL("/iflow/callback")
|
||||
if errTarget != nil {
|
||||
log.WithError(errTarget).Error("failed to compute iflow callback target")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"status": "error", "error": "callback server unavailable"})
|
||||
return
|
||||
}
|
||||
if _, errStart := startCallbackForwarder(iflowauth.CallbackPort, "iflow", targetURL); errStart != nil {
|
||||
log.WithError(errStart).Error("failed to start iflow callback forwarder")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"status": "error", "error": "failed to start callback server"})
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer stopCallbackForwarder(iflowauth.CallbackPort)
|
||||
fmt.Println("Waiting for authentication...")
|
||||
|
||||
waitFile := filepath.Join(h.cfg.AuthDir, fmt.Sprintf(".oauth-iflow-%s.oauth", state))
|
||||
deadline := time.Now().Add(5 * time.Minute)
|
||||
var resultMap map[string]string
|
||||
for {
|
||||
if time.Now().After(deadline) {
|
||||
oauthStatus[state] = "Authentication failed"
|
||||
fmt.Println("Authentication failed: timeout waiting for callback")
|
||||
return
|
||||
}
|
||||
if data, errR := os.ReadFile(waitFile); errR == nil {
|
||||
_ = os.Remove(waitFile)
|
||||
_ = json.Unmarshal(data, &resultMap)
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
if errStr := strings.TrimSpace(resultMap["error"]); errStr != "" {
|
||||
oauthStatus[state] = "Authentication failed"
|
||||
fmt.Printf("Authentication failed: %s\n", errStr)
|
||||
return
|
||||
}
|
||||
if resultState := strings.TrimSpace(resultMap["state"]); resultState != state {
|
||||
oauthStatus[state] = "Authentication failed"
|
||||
fmt.Println("Authentication failed: state mismatch")
|
||||
return
|
||||
}
|
||||
|
||||
code := strings.TrimSpace(resultMap["code"])
|
||||
if code == "" {
|
||||
oauthStatus[state] = "Authentication failed"
|
||||
fmt.Println("Authentication failed: code missing")
|
||||
return
|
||||
}
|
||||
|
||||
tokenData, errExchange := authSvc.ExchangeCodeForTokens(ctx, code, redirectURI)
|
||||
if errExchange != nil {
|
||||
oauthStatus[state] = "Authentication failed"
|
||||
fmt.Printf("Authentication failed: %v\n", errExchange)
|
||||
return
|
||||
}
|
||||
|
||||
tokenStorage := authSvc.CreateTokenStorage(tokenData)
|
||||
identifier := strings.TrimSpace(tokenStorage.Email)
|
||||
if identifier == "" {
|
||||
identifier = fmt.Sprintf("iflow-%d", time.Now().UnixMilli())
|
||||
tokenStorage.Email = identifier
|
||||
}
|
||||
record := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("iflow-%s.json", identifier),
|
||||
Provider: "iflow",
|
||||
FileName: fmt.Sprintf("iflow-%s.json", identifier),
|
||||
Storage: tokenStorage,
|
||||
Metadata: map[string]any{"email": identifier, "api_key": tokenStorage.APIKey},
|
||||
Attributes: map[string]string{"api_key": tokenStorage.APIKey},
|
||||
}
|
||||
|
||||
savedPath, errSave := h.saveTokenRecord(ctx, record)
|
||||
if errSave != nil {
|
||||
oauthStatus[state] = "Failed to save authentication tokens"
|
||||
log.Fatalf("Failed to save authentication tokens: %v", errSave)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Authentication successful! Token saved to %s\n", savedPath)
|
||||
if tokenStorage.APIKey != "" {
|
||||
fmt.Println("API key obtained and saved")
|
||||
}
|
||||
fmt.Println("You can now use iFlow services through this CLI")
|
||||
delete(oauthStatus, state)
|
||||
}()
|
||||
|
||||
oauthStatus[state] = ""
|
||||
c.JSON(http.StatusOK, gin.H{"status": "ok", "url": authURL, "state": state})
|
||||
return
|
||||
}
|
||||
|
||||
oauthServer := iflowauth.NewOAuthServer(iflowauth.CallbackPort)
|
||||
if err := oauthServer.Start(); err != nil {
|
||||
oauthStatus[state] = "Failed to start authentication server"
|
||||
log.Errorf("Failed to start iFlow OAuth server: %v", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"status": "error", "error": "failed to start local oauth server"})
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
fmt.Println("Waiting for authentication...")
|
||||
defer func() {
|
||||
stopCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
if err := oauthServer.Stop(stopCtx); err != nil {
|
||||
log.Warnf("Failed to stop iFlow OAuth server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
result, err := oauthServer.WaitForCallback(5 * time.Minute)
|
||||
if err != nil {
|
||||
oauthStatus[state] = "Authentication failed"
|
||||
fmt.Printf("Authentication failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if result.Error != "" {
|
||||
oauthStatus[state] = "Authentication failed"
|
||||
fmt.Printf("Authentication failed: %s\n", result.Error)
|
||||
return
|
||||
}
|
||||
|
||||
if result.State != state {
|
||||
oauthStatus[state] = "Authentication failed"
|
||||
fmt.Println("Authentication failed: state mismatch")
|
||||
return
|
||||
}
|
||||
|
||||
tokenData, errExchange := authSvc.ExchangeCodeForTokens(ctx, result.Code, redirectURI)
|
||||
if errExchange != nil {
|
||||
oauthStatus[state] = "Authentication failed"
|
||||
fmt.Printf("Authentication failed: %v\n", errExchange)
|
||||
return
|
||||
}
|
||||
|
||||
tokenStorage := authSvc.CreateTokenStorage(tokenData)
|
||||
identifier := strings.TrimSpace(tokenStorage.Email)
|
||||
if identifier == "" {
|
||||
identifier = fmt.Sprintf("iflow-%d", time.Now().UnixMilli())
|
||||
tokenStorage.Email = identifier
|
||||
}
|
||||
record := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("iflow-%s.json", identifier),
|
||||
Provider: "iflow",
|
||||
FileName: fmt.Sprintf("iflow-%s.json", identifier),
|
||||
Storage: tokenStorage,
|
||||
Metadata: map[string]any{"email": identifier, "api_key": tokenStorage.APIKey},
|
||||
Attributes: map[string]string{"api_key": tokenStorage.APIKey},
|
||||
}
|
||||
|
||||
savedPath, errSave := h.saveTokenRecord(ctx, record)
|
||||
if errSave != nil {
|
||||
oauthStatus[state] = "Failed to save authentication tokens"
|
||||
log.Fatalf("Failed to save authentication tokens: %v", errSave)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Authentication successful! Token saved to %s\n", savedPath)
|
||||
if tokenStorage.APIKey != "" {
|
||||
fmt.Println("API key obtained and saved")
|
||||
}
|
||||
fmt.Println("You can now use iFlow services through this CLI")
|
||||
delete(oauthStatus, state)
|
||||
}()
|
||||
|
||||
oauthStatus[state] = ""
|
||||
c.JSON(http.StatusOK, gin.H{"status": "ok", "url": authURL, "state": state})
|
||||
}
|
||||
|
||||
type projectSelectionRequiredError struct{}
|
||||
|
||||
func (e *projectSelectionRequiredError) Error() string {
|
||||
return "gemini cli: project selection required"
|
||||
}
|
||||
|
||||
func ensureGeminiProjectAndOnboard(ctx context.Context, httpClient *http.Client, storage *geminiAuth.GeminiTokenStorage, requestedProject string) error {
|
||||
if storage == nil {
|
||||
return fmt.Errorf("gemini storage is nil")
|
||||
}
|
||||
|
||||
trimmedRequest := strings.TrimSpace(requestedProject)
|
||||
if trimmedRequest == "" {
|
||||
projects, errProjects := fetchGCPProjects(ctx, httpClient)
|
||||
if errProjects != nil {
|
||||
return fmt.Errorf("fetch project list: %w", errProjects)
|
||||
}
|
||||
if len(projects) == 0 {
|
||||
return fmt.Errorf("no Google Cloud projects available for this account")
|
||||
}
|
||||
trimmedRequest = strings.TrimSpace(projects[0].ProjectID)
|
||||
if trimmedRequest == "" {
|
||||
return fmt.Errorf("resolved project id is empty")
|
||||
}
|
||||
storage.Auto = true
|
||||
} else {
|
||||
storage.Auto = false
|
||||
}
|
||||
|
||||
if err := performGeminiCLISetup(ctx, httpClient, storage, trimmedRequest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.TrimSpace(storage.ProjectID) == "" {
|
||||
storage.ProjectID = trimmedRequest
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func performGeminiCLISetup(ctx context.Context, httpClient *http.Client, storage *geminiAuth.GeminiTokenStorage, requestedProject string) error {
|
||||
metadata := map[string]string{
|
||||
"ideType": "IDE_UNSPECIFIED",
|
||||
"platform": "PLATFORM_UNSPECIFIED",
|
||||
"pluginType": "GEMINI",
|
||||
}
|
||||
|
||||
trimmedRequest := strings.TrimSpace(requestedProject)
|
||||
explicitProject := trimmedRequest != ""
|
||||
|
||||
loadReqBody := map[string]any{
|
||||
"metadata": metadata,
|
||||
}
|
||||
if explicitProject {
|
||||
loadReqBody["cloudaicompanionProject"] = trimmedRequest
|
||||
}
|
||||
|
||||
var loadResp map[string]any
|
||||
if errLoad := callGeminiCLI(ctx, httpClient, "loadCodeAssist", loadReqBody, &loadResp); errLoad != nil {
|
||||
return fmt.Errorf("load code assist: %w", errLoad)
|
||||
}
|
||||
|
||||
tierID := "legacy-tier"
|
||||
if tiers, okTiers := loadResp["allowedTiers"].([]any); okTiers {
|
||||
for _, rawTier := range tiers {
|
||||
tier, okTier := rawTier.(map[string]any)
|
||||
if !okTier {
|
||||
continue
|
||||
}
|
||||
if isDefault, okDefault := tier["isDefault"].(bool); okDefault && isDefault {
|
||||
if id, okID := tier["id"].(string); okID && strings.TrimSpace(id) != "" {
|
||||
tierID = strings.TrimSpace(id)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
projectID := trimmedRequest
|
||||
if projectID == "" {
|
||||
if id, okProject := loadResp["cloudaicompanionProject"].(string); okProject {
|
||||
projectID = strings.TrimSpace(id)
|
||||
}
|
||||
if projectID == "" {
|
||||
if projectMap, okProject := loadResp["cloudaicompanionProject"].(map[string]any); okProject {
|
||||
if id, okID := projectMap["id"].(string); okID {
|
||||
projectID = strings.TrimSpace(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if projectID == "" {
|
||||
return &projectSelectionRequiredError{}
|
||||
}
|
||||
|
||||
onboardReqBody := map[string]any{
|
||||
"tierId": tierID,
|
||||
"metadata": metadata,
|
||||
"cloudaicompanionProject": projectID,
|
||||
}
|
||||
|
||||
storage.ProjectID = projectID
|
||||
|
||||
for {
|
||||
var onboardResp map[string]any
|
||||
if errOnboard := callGeminiCLI(ctx, httpClient, "onboardUser", onboardReqBody, &onboardResp); errOnboard != nil {
|
||||
return fmt.Errorf("onboard user: %w", errOnboard)
|
||||
}
|
||||
|
||||
if done, okDone := onboardResp["done"].(bool); okDone && done {
|
||||
responseProjectID := ""
|
||||
if resp, okResp := onboardResp["response"].(map[string]any); okResp {
|
||||
switch projectValue := resp["cloudaicompanionProject"].(type) {
|
||||
case map[string]any:
|
||||
if id, okID := projectValue["id"].(string); okID {
|
||||
responseProjectID = strings.TrimSpace(id)
|
||||
}
|
||||
case string:
|
||||
responseProjectID = strings.TrimSpace(projectValue)
|
||||
}
|
||||
}
|
||||
|
||||
finalProjectID := projectID
|
||||
if responseProjectID != "" {
|
||||
if explicitProject && !strings.EqualFold(responseProjectID, projectID) {
|
||||
log.Warnf("Gemini onboarding returned project %s instead of requested %s; keeping requested project ID.", responseProjectID, projectID)
|
||||
} else {
|
||||
finalProjectID = responseProjectID
|
||||
}
|
||||
}
|
||||
|
||||
storage.ProjectID = strings.TrimSpace(finalProjectID)
|
||||
if storage.ProjectID == "" {
|
||||
storage.ProjectID = strings.TrimSpace(projectID)
|
||||
}
|
||||
if storage.ProjectID == "" {
|
||||
return fmt.Errorf("onboard user completed without project id")
|
||||
}
|
||||
log.Infof("Onboarding complete. Using Project ID: %s", storage.ProjectID)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Println("Onboarding in progress, waiting 5 seconds...")
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func callGeminiCLI(ctx context.Context, httpClient *http.Client, endpoint string, body any, result any) error {
|
||||
endPointURL := fmt.Sprintf("%s/%s:%s", geminiCLIEndpoint, geminiCLIVersion, endpoint)
|
||||
if strings.HasPrefix(endpoint, "operations/") {
|
||||
endPointURL = fmt.Sprintf("%s/%s", geminiCLIEndpoint, endpoint)
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
if body != nil {
|
||||
rawBody, errMarshal := json.Marshal(body)
|
||||
if errMarshal != nil {
|
||||
return fmt.Errorf("marshal request body: %w", errMarshal)
|
||||
}
|
||||
reader = bytes.NewReader(rawBody)
|
||||
}
|
||||
|
||||
req, errRequest := http.NewRequestWithContext(ctx, http.MethodPost, endPointURL, reader)
|
||||
if errRequest != nil {
|
||||
return fmt.Errorf("create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||
req.Header.Set("X-Goog-Api-Client", geminiCLIApiClient)
|
||||
req.Header.Set("Client-Metadata", geminiCLIClientMetadata)
|
||||
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return fmt.Errorf("execute request: %w", errDo)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("response body close error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("api request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||
}
|
||||
|
||||
if result == nil {
|
||||
_, _ = io.Copy(io.Discard, resp.Body)
|
||||
return nil
|
||||
}
|
||||
|
||||
if errDecode := json.NewDecoder(resp.Body).Decode(result); errDecode != nil {
|
||||
return fmt.Errorf("decode response body: %w", errDecode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchGCPProjects(ctx context.Context, httpClient *http.Client) ([]interfaces.GCPProjectProjects, error) {
|
||||
req, errRequest := http.NewRequestWithContext(ctx, http.MethodGet, "https://cloudresourcemanager.googleapis.com/v1/projects", nil)
|
||||
if errRequest != nil {
|
||||
return nil, fmt.Errorf("could not create project list request: %w", errRequest)
|
||||
}
|
||||
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return nil, fmt.Errorf("failed to execute project list request: %w", errDo)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("response body close error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("project list request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||
}
|
||||
|
||||
var projects interfaces.GCPProject
|
||||
if errDecode := json.NewDecoder(resp.Body).Decode(&projects); errDecode != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal project list: %w", errDecode)
|
||||
}
|
||||
|
||||
return projects.Projects, nil
|
||||
}
|
||||
|
||||
func checkCloudAPIIsEnabled(ctx context.Context, httpClient *http.Client, projectID string) (bool, error) {
|
||||
serviceUsageURL := "https://serviceusage.googleapis.com"
|
||||
requiredServices := []string{
|
||||
"cloudaicompanion.googleapis.com",
|
||||
}
|
||||
for _, service := range requiredServices {
|
||||
checkURL := fmt.Sprintf("%s/v1/projects/%s/services/%s", serviceUsageURL, projectID, service)
|
||||
req, errRequest := http.NewRequestWithContext(ctx, http.MethodGet, checkURL, nil)
|
||||
if errRequest != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return false, fmt.Errorf("failed to execute request: %w", errDo)
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
if gjson.GetBytes(bodyBytes, "state").String() == "ENABLED" {
|
||||
_ = resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
|
||||
enableURL := fmt.Sprintf("%s/v1/projects/%s/services/%s:enable", serviceUsageURL, projectID, service)
|
||||
req, errRequest = http.NewRequestWithContext(ctx, http.MethodPost, enableURL, strings.NewReader("{}"))
|
||||
if errRequest != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||
resp, errDo = httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return false, fmt.Errorf("failed to execute request: %w", errDo)
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
errMessage := string(bodyBytes)
|
||||
errMessageResult := gjson.GetBytes(bodyBytes, "error.message")
|
||||
if errMessageResult.Exists() {
|
||||
errMessage = errMessageResult.String()
|
||||
}
|
||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusCreated {
|
||||
_ = resp.Body.Close()
|
||||
continue
|
||||
} else if resp.StatusCode == http.StatusBadRequest {
|
||||
_ = resp.Body.Close()
|
||||
if strings.Contains(strings.ToLower(errMessage), "already enabled") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("project activation required: %s", errMessage)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (h *Handler) GetAuthStatus(c *gin.Context) {
|
||||
state := c.Query("state")
|
||||
if err, ok := oauthStatus[state]; ok {
|
||||
|
||||
@@ -3,6 +3,7 @@ package management
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
@@ -106,13 +107,16 @@ func (h *Handler) deleteFromStringList(c *gin.Context, target *[]string, after f
|
||||
// api-keys
|
||||
func (h *Handler) GetAPIKeys(c *gin.Context) { c.JSON(200, gin.H{"api-keys": h.cfg.APIKeys}) }
|
||||
func (h *Handler) PutAPIKeys(c *gin.Context) {
|
||||
h.putStringList(c, func(v []string) { config.SyncInlineAPIKeys(h.cfg, v) }, nil)
|
||||
h.putStringList(c, func(v []string) {
|
||||
h.cfg.APIKeys = append([]string(nil), v...)
|
||||
h.cfg.Access.Providers = nil
|
||||
}, nil)
|
||||
}
|
||||
func (h *Handler) PatchAPIKeys(c *gin.Context) {
|
||||
h.patchStringList(c, &h.cfg.APIKeys, func() { config.SyncInlineAPIKeys(h.cfg, h.cfg.APIKeys) })
|
||||
h.patchStringList(c, &h.cfg.APIKeys, func() { h.cfg.Access.Providers = nil })
|
||||
}
|
||||
func (h *Handler) DeleteAPIKeys(c *gin.Context) {
|
||||
h.deleteFromStringList(c, &h.cfg.APIKeys, func() { config.SyncInlineAPIKeys(h.cfg, h.cfg.APIKeys) })
|
||||
h.deleteFromStringList(c, &h.cfg.APIKeys, func() { h.cfg.Access.Providers = nil })
|
||||
}
|
||||
|
||||
// generative-language-api-key
|
||||
@@ -201,7 +205,7 @@ func (h *Handler) DeleteClaudeKey(c *gin.Context) {
|
||||
|
||||
// openai-compatibility: []OpenAICompatibility
|
||||
func (h *Handler) GetOpenAICompat(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"openai-compatibility": h.cfg.OpenAICompatibility})
|
||||
c.JSON(200, gin.H{"openai-compatibility": normalizedOpenAICompatibilityEntries(h.cfg.OpenAICompatibility)})
|
||||
}
|
||||
func (h *Handler) PutOpenAICompat(c *gin.Context) {
|
||||
data, err := c.GetRawData()
|
||||
@@ -220,6 +224,9 @@ func (h *Handler) PutOpenAICompat(c *gin.Context) {
|
||||
}
|
||||
arr = obj.Items
|
||||
}
|
||||
for i := range arr {
|
||||
normalizeOpenAICompatibilityEntry(&arr[i])
|
||||
}
|
||||
h.cfg.OpenAICompatibility = arr
|
||||
h.persist(c)
|
||||
}
|
||||
@@ -233,6 +240,7 @@ func (h *Handler) PatchOpenAICompat(c *gin.Context) {
|
||||
c.JSON(400, gin.H{"error": "invalid body"})
|
||||
return
|
||||
}
|
||||
normalizeOpenAICompatibilityEntry(body.Value)
|
||||
if body.Index != nil && *body.Index >= 0 && *body.Index < len(h.cfg.OpenAICompatibility) {
|
||||
h.cfg.OpenAICompatibility[*body.Index] = *body.Value
|
||||
h.persist(c)
|
||||
@@ -346,3 +354,51 @@ func (h *Handler) DeleteCodexKey(c *gin.Context) {
|
||||
}
|
||||
c.JSON(400, gin.H{"error": "missing api-key or index"})
|
||||
}
|
||||
|
||||
func normalizeOpenAICompatibilityEntry(entry *config.OpenAICompatibility) {
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
existing := make(map[string]struct{}, len(entry.APIKeyEntries))
|
||||
for i := range entry.APIKeyEntries {
|
||||
trimmed := strings.TrimSpace(entry.APIKeyEntries[i].APIKey)
|
||||
entry.APIKeyEntries[i].APIKey = trimmed
|
||||
if trimmed != "" {
|
||||
existing[trimmed] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(entry.APIKeys) == 0 {
|
||||
return
|
||||
}
|
||||
for _, legacyKey := range entry.APIKeys {
|
||||
trimmed := strings.TrimSpace(legacyKey)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := existing[trimmed]; ok {
|
||||
continue
|
||||
}
|
||||
entry.APIKeyEntries = append(entry.APIKeyEntries, config.OpenAICompatibilityAPIKey{APIKey: trimmed})
|
||||
existing[trimmed] = struct{}{}
|
||||
}
|
||||
entry.APIKeys = nil
|
||||
}
|
||||
|
||||
func normalizedOpenAICompatibilityEntries(entries []config.OpenAICompatibility) []config.OpenAICompatibility {
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]config.OpenAICompatibility, len(entries))
|
||||
for i := range entries {
|
||||
copyEntry := entries[i]
|
||||
if len(copyEntry.APIKeyEntries) > 0 {
|
||||
copyEntry.APIKeyEntries = append([]config.OpenAICompatibilityAPIKey(nil), copyEntry.APIKeyEntries...)
|
||||
}
|
||||
if len(copyEntry.APIKeys) > 0 {
|
||||
copyEntry.APIKeys = append([]string(nil), copyEntry.APIKeys...)
|
||||
}
|
||||
normalizeOpenAICompatibilityEntry(©Entry)
|
||||
out[i] = copyEntry
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -25,28 +26,33 @@ type attemptInfo struct {
|
||||
|
||||
// Handler aggregates config reference, persistence path and helpers.
|
||||
type Handler struct {
|
||||
cfg *config.Config
|
||||
configFilePath string
|
||||
mu sync.Mutex
|
||||
|
||||
attemptsMu sync.Mutex
|
||||
failedAttempts map[string]*attemptInfo // keyed by client IP
|
||||
authManager *coreauth.Manager
|
||||
usageStats *usage.RequestStatistics
|
||||
tokenStore sdkAuth.TokenStore
|
||||
|
||||
localPassword string
|
||||
cfg *config.Config
|
||||
configFilePath string
|
||||
mu sync.Mutex
|
||||
attemptsMu sync.Mutex
|
||||
failedAttempts map[string]*attemptInfo // keyed by client IP
|
||||
authManager *coreauth.Manager
|
||||
usageStats *usage.RequestStatistics
|
||||
tokenStore coreauth.Store
|
||||
localPassword string
|
||||
allowRemoteOverride bool
|
||||
envSecret string
|
||||
}
|
||||
|
||||
// NewHandler creates a new management handler instance.
|
||||
func NewHandler(cfg *config.Config, configFilePath string, manager *coreauth.Manager) *Handler {
|
||||
envSecret, _ := os.LookupEnv("MANAGEMENT_PASSWORD")
|
||||
envSecret = strings.TrimSpace(envSecret)
|
||||
|
||||
return &Handler{
|
||||
cfg: cfg,
|
||||
configFilePath: configFilePath,
|
||||
failedAttempts: make(map[string]*attemptInfo),
|
||||
authManager: manager,
|
||||
usageStats: usage.GetRequestStatistics(),
|
||||
tokenStore: sdkAuth.GetTokenStore(),
|
||||
cfg: cfg,
|
||||
configFilePath: configFilePath,
|
||||
failedAttempts: make(map[string]*attemptInfo),
|
||||
authManager: manager,
|
||||
usageStats: usage.GetRequestStatistics(),
|
||||
tokenStore: sdkAuth.GetTokenStore(),
|
||||
allowRemoteOverride: envSecret != "",
|
||||
envSecret: envSecret,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,6 +78,19 @@ func (h *Handler) Middleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
clientIP := c.ClientIP()
|
||||
localClient := clientIP == "127.0.0.1" || clientIP == "::1"
|
||||
cfg := h.cfg
|
||||
var (
|
||||
allowRemote bool
|
||||
secretHash string
|
||||
)
|
||||
if cfg != nil {
|
||||
allowRemote = cfg.RemoteManagement.AllowRemote
|
||||
secretHash = cfg.RemoteManagement.SecretKey
|
||||
}
|
||||
if h.allowRemoteOverride {
|
||||
allowRemote = true
|
||||
}
|
||||
envSecret := h.envSecret
|
||||
|
||||
fail := func() {}
|
||||
if !localClient {
|
||||
@@ -92,7 +111,7 @@ func (h *Handler) Middleware() gin.HandlerFunc {
|
||||
}
|
||||
h.attemptsMu.Unlock()
|
||||
|
||||
if !h.cfg.RemoteManagement.AllowRemote {
|
||||
if !allowRemote {
|
||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "remote management disabled"})
|
||||
return
|
||||
}
|
||||
@@ -112,8 +131,7 @@ func (h *Handler) Middleware() gin.HandlerFunc {
|
||||
h.attemptsMu.Unlock()
|
||||
}
|
||||
}
|
||||
secret := h.cfg.RemoteManagement.SecretKey
|
||||
if secret == "" {
|
||||
if secretHash == "" && envSecret == "" {
|
||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "remote management key not set"})
|
||||
return
|
||||
}
|
||||
@@ -149,7 +167,20 @@ func (h *Handler) Middleware() gin.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(secret), []byte(provided)); err != nil {
|
||||
if envSecret != "" && subtle.ConstantTimeCompare([]byte(provided), []byte(envSecret)) == 1 {
|
||||
if !localClient {
|
||||
h.attemptsMu.Lock()
|
||||
if ai := h.failedAttempts[clientIP]; ai != nil {
|
||||
ai.count = 0
|
||||
ai.blockedUntil = time.Time{}
|
||||
}
|
||||
h.attemptsMu.Unlock()
|
||||
}
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
if secretHash == "" || bcrypt.CompareHashAndPassword([]byte(secretHash), []byte(provided)) != nil {
|
||||
if !localClient {
|
||||
fail()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package middleware
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||
@@ -17,6 +18,12 @@ import (
|
||||
// logger, the middleware has minimal overhead.
|
||||
func RequestLoggingMiddleware(logger logging.RequestLogger) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
path := c.Request.URL.Path
|
||||
if strings.HasPrefix(path, "/v0/management") || path == "/keep-alive" {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// Early return if logging is disabled (zero overhead)
|
||||
if !logger.IsEnabled() {
|
||||
c.Next()
|
||||
|
||||
@@ -13,24 +13,29 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/claude"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/access"
|
||||
managementHandlers "github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/management"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/openai"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/middleware"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/managementasset"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/usage"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/claude"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/openai"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const oauthCallbackSuccessHTML = `<html><head><meta charset="utf-8"><title>Authentication successful</title><script>setTimeout(function(){window.close();},5000);</script></head><body><h1>Authentication successful!</h1><p>You can close this window.</p><p>This window will close automatically in 5 seconds.</p></body></html>`
|
||||
|
||||
type serverOptionConfig struct {
|
||||
extraMiddleware []gin.HandlerFunc
|
||||
engineConfigurator func(*gin.Engine)
|
||||
@@ -121,9 +126,20 @@ type Server struct {
|
||||
// configFilePath is the absolute path to the YAML config file for persistence.
|
||||
configFilePath string
|
||||
|
||||
// currentPath is the absolute path to the current working directory.
|
||||
currentPath string
|
||||
|
||||
// management handler
|
||||
mgmt *managementHandlers.Handler
|
||||
|
||||
// managementRoutesRegistered tracks whether the management routes have been attached to the engine.
|
||||
managementRoutesRegistered atomic.Bool
|
||||
// managementRoutesEnabled controls whether management endpoints serve real handlers.
|
||||
managementRoutesEnabled atomic.Bool
|
||||
|
||||
// envManagementSecret indicates whether MANAGEMENT_PASSWORD is configured.
|
||||
envManagementSecret bool
|
||||
|
||||
localPassword string
|
||||
|
||||
keepAliveEnabled bool
|
||||
@@ -183,18 +199,28 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
||||
}
|
||||
|
||||
engine.Use(corsMiddleware())
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
wd = configFilePath
|
||||
}
|
||||
|
||||
envAdminPassword, envAdminPasswordSet := os.LookupEnv("MANAGEMENT_PASSWORD")
|
||||
envAdminPassword = strings.TrimSpace(envAdminPassword)
|
||||
envManagementSecret := envAdminPasswordSet && envAdminPassword != ""
|
||||
|
||||
// Create server instance
|
||||
s := &Server{
|
||||
engine: engine,
|
||||
handlers: handlers.NewBaseAPIHandlers(cfg, authManager),
|
||||
cfg: cfg,
|
||||
accessManager: accessManager,
|
||||
requestLogger: requestLogger,
|
||||
loggerToggle: toggle,
|
||||
configFilePath: configFilePath,
|
||||
engine: engine,
|
||||
handlers: handlers.NewBaseAPIHandlers(&cfg.SDKConfig, authManager),
|
||||
cfg: cfg,
|
||||
accessManager: accessManager,
|
||||
requestLogger: requestLogger,
|
||||
loggerToggle: toggle,
|
||||
configFilePath: configFilePath,
|
||||
currentPath: wd,
|
||||
envManagementSecret: envManagementSecret,
|
||||
}
|
||||
s.applyAccessConfig(cfg)
|
||||
s.applyAccessConfig(nil, cfg)
|
||||
// Initialize management handler
|
||||
s.mgmt = managementHandlers.NewHandler(cfg, configFilePath, authManager)
|
||||
if optionState.localPassword != "" {
|
||||
@@ -208,6 +234,13 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
||||
optionState.routerConfigurator(engine, s.handlers, cfg)
|
||||
}
|
||||
|
||||
// Register management routes when configuration or environment secrets are available.
|
||||
hasManagementSecret := cfg.RemoteManagement.SecretKey != "" || envManagementSecret
|
||||
s.managementRoutesEnabled.Store(hasManagementSecret)
|
||||
if hasManagementSecret {
|
||||
s.registerManagementRoutes()
|
||||
}
|
||||
|
||||
if optionState.keepAliveEnabled {
|
||||
s.enableKeepAlive(optionState.keepAliveTimeout, optionState.keepAliveOnTimeout)
|
||||
}
|
||||
@@ -224,6 +257,7 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
||||
// setupRoutes configures the API routes for the server.
|
||||
// It defines the endpoints and associates them with their respective handlers.
|
||||
func (s *Server) setupRoutes() {
|
||||
s.engine.GET("/management.html", s.serveManagementControlPanel)
|
||||
openaiHandlers := openai.NewOpenAIAPIHandler(s.handlers)
|
||||
geminiHandlers := gemini.NewGeminiAPIHandler(s.handlers)
|
||||
geminiCLIHandlers := gemini.NewGeminiCLIAPIHandler(s.handlers)
|
||||
@@ -255,7 +289,6 @@ func (s *Server) setupRoutes() {
|
||||
s.engine.GET("/", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "CLI Proxy API Server",
|
||||
"version": "1.0.0",
|
||||
"endpoints": []string{
|
||||
"POST /v1/chat/completions",
|
||||
"POST /v1/completions",
|
||||
@@ -278,7 +311,7 @@ func (s *Server) setupRoutes() {
|
||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||
}
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.String(http.StatusOK, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
||||
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||
})
|
||||
|
||||
s.engine.GET("/codex/callback", func(c *gin.Context) {
|
||||
@@ -290,7 +323,7 @@ func (s *Server) setupRoutes() {
|
||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||
}
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.String(http.StatusOK, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
||||
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||
})
|
||||
|
||||
s.engine.GET("/google/callback", func(c *gin.Context) {
|
||||
@@ -302,89 +335,148 @@ func (s *Server) setupRoutes() {
|
||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||
}
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.String(http.StatusOK, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
||||
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||
})
|
||||
|
||||
// Management API routes (delegated to management handlers)
|
||||
// New logic: if remote-management-key is empty, do not expose any management endpoint (404).
|
||||
if s.cfg.RemoteManagement.SecretKey != "" {
|
||||
mgmt := s.engine.Group("/v0/management")
|
||||
mgmt.Use(s.mgmt.Middleware())
|
||||
{
|
||||
mgmt.GET("/usage", s.mgmt.GetUsageStatistics)
|
||||
mgmt.GET("/config", s.mgmt.GetConfig)
|
||||
|
||||
mgmt.GET("/debug", s.mgmt.GetDebug)
|
||||
mgmt.PUT("/debug", s.mgmt.PutDebug)
|
||||
mgmt.PATCH("/debug", s.mgmt.PutDebug)
|
||||
|
||||
mgmt.GET("/logging-to-file", s.mgmt.GetLoggingToFile)
|
||||
mgmt.PUT("/logging-to-file", s.mgmt.PutLoggingToFile)
|
||||
mgmt.PATCH("/logging-to-file", s.mgmt.PutLoggingToFile)
|
||||
|
||||
mgmt.GET("/usage-statistics-enabled", s.mgmt.GetUsageStatisticsEnabled)
|
||||
mgmt.PUT("/usage-statistics-enabled", s.mgmt.PutUsageStatisticsEnabled)
|
||||
mgmt.PATCH("/usage-statistics-enabled", s.mgmt.PutUsageStatisticsEnabled)
|
||||
|
||||
mgmt.GET("/proxy-url", s.mgmt.GetProxyURL)
|
||||
mgmt.PUT("/proxy-url", s.mgmt.PutProxyURL)
|
||||
mgmt.PATCH("/proxy-url", s.mgmt.PutProxyURL)
|
||||
mgmt.DELETE("/proxy-url", s.mgmt.DeleteProxyURL)
|
||||
|
||||
mgmt.GET("/quota-exceeded/switch-project", s.mgmt.GetSwitchProject)
|
||||
mgmt.PUT("/quota-exceeded/switch-project", s.mgmt.PutSwitchProject)
|
||||
mgmt.PATCH("/quota-exceeded/switch-project", s.mgmt.PutSwitchProject)
|
||||
|
||||
mgmt.GET("/quota-exceeded/switch-preview-model", s.mgmt.GetSwitchPreviewModel)
|
||||
mgmt.PUT("/quota-exceeded/switch-preview-model", s.mgmt.PutSwitchPreviewModel)
|
||||
mgmt.PATCH("/quota-exceeded/switch-preview-model", s.mgmt.PutSwitchPreviewModel)
|
||||
|
||||
mgmt.GET("/api-keys", s.mgmt.GetAPIKeys)
|
||||
mgmt.PUT("/api-keys", s.mgmt.PutAPIKeys)
|
||||
mgmt.PATCH("/api-keys", s.mgmt.PatchAPIKeys)
|
||||
mgmt.DELETE("/api-keys", s.mgmt.DeleteAPIKeys)
|
||||
|
||||
mgmt.GET("/generative-language-api-key", s.mgmt.GetGlKeys)
|
||||
mgmt.PUT("/generative-language-api-key", s.mgmt.PutGlKeys)
|
||||
mgmt.PATCH("/generative-language-api-key", s.mgmt.PatchGlKeys)
|
||||
mgmt.DELETE("/generative-language-api-key", s.mgmt.DeleteGlKeys)
|
||||
|
||||
mgmt.GET("/request-log", s.mgmt.GetRequestLog)
|
||||
mgmt.PUT("/request-log", s.mgmt.PutRequestLog)
|
||||
mgmt.PATCH("/request-log", s.mgmt.PutRequestLog)
|
||||
|
||||
mgmt.GET("/request-retry", s.mgmt.GetRequestRetry)
|
||||
mgmt.PUT("/request-retry", s.mgmt.PutRequestRetry)
|
||||
mgmt.PATCH("/request-retry", s.mgmt.PutRequestRetry)
|
||||
|
||||
mgmt.GET("/claude-api-key", s.mgmt.GetClaudeKeys)
|
||||
mgmt.PUT("/claude-api-key", s.mgmt.PutClaudeKeys)
|
||||
mgmt.PATCH("/claude-api-key", s.mgmt.PatchClaudeKey)
|
||||
mgmt.DELETE("/claude-api-key", s.mgmt.DeleteClaudeKey)
|
||||
|
||||
mgmt.GET("/codex-api-key", s.mgmt.GetCodexKeys)
|
||||
mgmt.PUT("/codex-api-key", s.mgmt.PutCodexKeys)
|
||||
mgmt.PATCH("/codex-api-key", s.mgmt.PatchCodexKey)
|
||||
mgmt.DELETE("/codex-api-key", s.mgmt.DeleteCodexKey)
|
||||
|
||||
mgmt.GET("/openai-compatibility", s.mgmt.GetOpenAICompat)
|
||||
mgmt.PUT("/openai-compatibility", s.mgmt.PutOpenAICompat)
|
||||
mgmt.PATCH("/openai-compatibility", s.mgmt.PatchOpenAICompat)
|
||||
mgmt.DELETE("/openai-compatibility", s.mgmt.DeleteOpenAICompat)
|
||||
|
||||
mgmt.GET("/auth-files", s.mgmt.ListAuthFiles)
|
||||
mgmt.GET("/auth-files/download", s.mgmt.DownloadAuthFile)
|
||||
mgmt.POST("/auth-files", s.mgmt.UploadAuthFile)
|
||||
mgmt.DELETE("/auth-files", s.mgmt.DeleteAuthFile)
|
||||
|
||||
mgmt.GET("/anthropic-auth-url", s.mgmt.RequestAnthropicToken)
|
||||
mgmt.GET("/codex-auth-url", s.mgmt.RequestCodexToken)
|
||||
mgmt.GET("/gemini-cli-auth-url", s.mgmt.RequestGeminiCLIToken)
|
||||
mgmt.POST("/gemini-web-token", s.mgmt.CreateGeminiWebToken)
|
||||
mgmt.GET("/qwen-auth-url", s.mgmt.RequestQwenToken)
|
||||
mgmt.GET("/get-auth-status", s.mgmt.GetAuthStatus)
|
||||
s.engine.GET("/iflow/callback", func(c *gin.Context) {
|
||||
code := c.Query("code")
|
||||
state := c.Query("state")
|
||||
errStr := c.Query("error")
|
||||
if state != "" {
|
||||
file := fmt.Sprintf("%s/.oauth-iflow-%s.oauth", s.cfg.AuthDir, state)
|
||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||
}
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||
})
|
||||
|
||||
// Management routes are registered lazily by registerManagementRoutes when a secret is configured.
|
||||
}
|
||||
|
||||
func (s *Server) registerManagementRoutes() {
|
||||
if s == nil || s.engine == nil || s.mgmt == nil {
|
||||
return
|
||||
}
|
||||
if !s.managementRoutesRegistered.CompareAndSwap(false, true) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("management routes registered after secret key configuration")
|
||||
|
||||
mgmt := s.engine.Group("/v0/management")
|
||||
mgmt.Use(s.managementAvailabilityMiddleware(), s.mgmt.Middleware())
|
||||
{
|
||||
mgmt.GET("/usage", s.mgmt.GetUsageStatistics)
|
||||
mgmt.GET("/config", s.mgmt.GetConfig)
|
||||
|
||||
mgmt.GET("/debug", s.mgmt.GetDebug)
|
||||
mgmt.PUT("/debug", s.mgmt.PutDebug)
|
||||
mgmt.PATCH("/debug", s.mgmt.PutDebug)
|
||||
|
||||
mgmt.GET("/logging-to-file", s.mgmt.GetLoggingToFile)
|
||||
mgmt.PUT("/logging-to-file", s.mgmt.PutLoggingToFile)
|
||||
mgmt.PATCH("/logging-to-file", s.mgmt.PutLoggingToFile)
|
||||
|
||||
mgmt.GET("/usage-statistics-enabled", s.mgmt.GetUsageStatisticsEnabled)
|
||||
mgmt.PUT("/usage-statistics-enabled", s.mgmt.PutUsageStatisticsEnabled)
|
||||
mgmt.PATCH("/usage-statistics-enabled", s.mgmt.PutUsageStatisticsEnabled)
|
||||
|
||||
mgmt.GET("/proxy-url", s.mgmt.GetProxyURL)
|
||||
mgmt.PUT("/proxy-url", s.mgmt.PutProxyURL)
|
||||
mgmt.PATCH("/proxy-url", s.mgmt.PutProxyURL)
|
||||
mgmt.DELETE("/proxy-url", s.mgmt.DeleteProxyURL)
|
||||
|
||||
mgmt.GET("/quota-exceeded/switch-project", s.mgmt.GetSwitchProject)
|
||||
mgmt.PUT("/quota-exceeded/switch-project", s.mgmt.PutSwitchProject)
|
||||
mgmt.PATCH("/quota-exceeded/switch-project", s.mgmt.PutSwitchProject)
|
||||
|
||||
mgmt.GET("/quota-exceeded/switch-preview-model", s.mgmt.GetSwitchPreviewModel)
|
||||
mgmt.PUT("/quota-exceeded/switch-preview-model", s.mgmt.PutSwitchPreviewModel)
|
||||
mgmt.PATCH("/quota-exceeded/switch-preview-model", s.mgmt.PutSwitchPreviewModel)
|
||||
|
||||
mgmt.GET("/api-keys", s.mgmt.GetAPIKeys)
|
||||
mgmt.PUT("/api-keys", s.mgmt.PutAPIKeys)
|
||||
mgmt.PATCH("/api-keys", s.mgmt.PatchAPIKeys)
|
||||
mgmt.DELETE("/api-keys", s.mgmt.DeleteAPIKeys)
|
||||
|
||||
mgmt.GET("/generative-language-api-key", s.mgmt.GetGlKeys)
|
||||
mgmt.PUT("/generative-language-api-key", s.mgmt.PutGlKeys)
|
||||
mgmt.PATCH("/generative-language-api-key", s.mgmt.PatchGlKeys)
|
||||
mgmt.DELETE("/generative-language-api-key", s.mgmt.DeleteGlKeys)
|
||||
|
||||
mgmt.GET("/request-log", s.mgmt.GetRequestLog)
|
||||
mgmt.PUT("/request-log", s.mgmt.PutRequestLog)
|
||||
mgmt.PATCH("/request-log", s.mgmt.PutRequestLog)
|
||||
|
||||
mgmt.GET("/request-retry", s.mgmt.GetRequestRetry)
|
||||
mgmt.PUT("/request-retry", s.mgmt.PutRequestRetry)
|
||||
mgmt.PATCH("/request-retry", s.mgmt.PutRequestRetry)
|
||||
|
||||
mgmt.GET("/claude-api-key", s.mgmt.GetClaudeKeys)
|
||||
mgmt.PUT("/claude-api-key", s.mgmt.PutClaudeKeys)
|
||||
mgmt.PATCH("/claude-api-key", s.mgmt.PatchClaudeKey)
|
||||
mgmt.DELETE("/claude-api-key", s.mgmt.DeleteClaudeKey)
|
||||
|
||||
mgmt.GET("/codex-api-key", s.mgmt.GetCodexKeys)
|
||||
mgmt.PUT("/codex-api-key", s.mgmt.PutCodexKeys)
|
||||
mgmt.PATCH("/codex-api-key", s.mgmt.PatchCodexKey)
|
||||
mgmt.DELETE("/codex-api-key", s.mgmt.DeleteCodexKey)
|
||||
|
||||
mgmt.GET("/openai-compatibility", s.mgmt.GetOpenAICompat)
|
||||
mgmt.PUT("/openai-compatibility", s.mgmt.PutOpenAICompat)
|
||||
mgmt.PATCH("/openai-compatibility", s.mgmt.PatchOpenAICompat)
|
||||
mgmt.DELETE("/openai-compatibility", s.mgmt.DeleteOpenAICompat)
|
||||
|
||||
mgmt.GET("/auth-files", s.mgmt.ListAuthFiles)
|
||||
mgmt.GET("/auth-files/download", s.mgmt.DownloadAuthFile)
|
||||
mgmt.POST("/auth-files", s.mgmt.UploadAuthFile)
|
||||
mgmt.DELETE("/auth-files", s.mgmt.DeleteAuthFile)
|
||||
|
||||
mgmt.GET("/anthropic-auth-url", s.mgmt.RequestAnthropicToken)
|
||||
mgmt.GET("/codex-auth-url", s.mgmt.RequestCodexToken)
|
||||
mgmt.GET("/gemini-cli-auth-url", s.mgmt.RequestGeminiCLIToken)
|
||||
mgmt.GET("/qwen-auth-url", s.mgmt.RequestQwenToken)
|
||||
mgmt.GET("/iflow-auth-url", s.mgmt.RequestIFlowToken)
|
||||
mgmt.GET("/get-auth-status", s.mgmt.GetAuthStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) managementAvailabilityMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if !s.managementRoutesEnabled.Load() {
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) serveManagementControlPanel(c *gin.Context) {
|
||||
cfg := s.cfg
|
||||
if cfg == nil || cfg.RemoteManagement.DisableControlPanel {
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
println(s.currentPath)
|
||||
filePath := managementasset.FilePath(s.currentPath)
|
||||
if strings.TrimSpace(filePath) == "" {
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
go managementasset.EnsureLatestManagementHTML(context.Background(), managementasset.StaticDir(s.currentPath), cfg.ProxyURL)
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
log.WithError(err).Error("failed to stat management control panel asset")
|
||||
c.AbortWithStatus(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
c.File(filePath)
|
||||
}
|
||||
|
||||
func (s *Server) enableKeepAlive(timeout time.Duration, onTimeout func()) {
|
||||
@@ -547,16 +639,13 @@ func corsMiddleware() gin.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) applyAccessConfig(cfg *config.Config) {
|
||||
if s == nil || s.accessManager == nil {
|
||||
func (s *Server) applyAccessConfig(oldCfg, newCfg *config.Config) {
|
||||
if s == nil || s.accessManager == nil || newCfg == nil {
|
||||
return
|
||||
}
|
||||
providers, err := sdkaccess.BuildProviders(cfg)
|
||||
if err != nil {
|
||||
log.Errorf("failed to update request auth providers: %v", err)
|
||||
if _, err := access.ApplyAccessProviders(s.accessManager, oldCfg, newCfg); err != nil {
|
||||
return
|
||||
}
|
||||
s.accessManager.SetProviders(providers)
|
||||
}
|
||||
|
||||
// UpdateClients updates the server's client list and configuration.
|
||||
@@ -566,44 +655,97 @@ func (s *Server) applyAccessConfig(cfg *config.Config) {
|
||||
// - clients: The new slice of AI service clients
|
||||
// - cfg: The new application configuration
|
||||
func (s *Server) UpdateClients(cfg *config.Config) {
|
||||
oldCfg := s.cfg
|
||||
|
||||
// Update request logger enabled state if it has changed
|
||||
if s.requestLogger != nil && s.cfg.RequestLog != cfg.RequestLog {
|
||||
previousRequestLog := false
|
||||
if oldCfg != nil {
|
||||
previousRequestLog = oldCfg.RequestLog
|
||||
}
|
||||
if s.requestLogger != nil && (oldCfg == nil || previousRequestLog != cfg.RequestLog) {
|
||||
if s.loggerToggle != nil {
|
||||
s.loggerToggle(cfg.RequestLog)
|
||||
} else if toggler, ok := s.requestLogger.(interface{ SetEnabled(bool) }); ok {
|
||||
toggler.SetEnabled(cfg.RequestLog)
|
||||
}
|
||||
log.Debugf("request logging updated from %t to %t", s.cfg.RequestLog, cfg.RequestLog)
|
||||
}
|
||||
|
||||
if s.cfg.LoggingToFile != cfg.LoggingToFile {
|
||||
if err := logging.ConfigureLogOutput(cfg.LoggingToFile); err != nil {
|
||||
log.Errorf("failed to reconfigure log output: %v", err)
|
||||
if oldCfg != nil {
|
||||
log.Debugf("request logging updated from %t to %t", previousRequestLog, cfg.RequestLog)
|
||||
} else {
|
||||
log.Debugf("logging_to_file updated from %t to %t", s.cfg.LoggingToFile, cfg.LoggingToFile)
|
||||
log.Debugf("request logging toggled to %t", cfg.RequestLog)
|
||||
}
|
||||
}
|
||||
|
||||
if s.cfg == nil || s.cfg.UsageStatisticsEnabled != cfg.UsageStatisticsEnabled {
|
||||
if oldCfg != nil && oldCfg.LoggingToFile != cfg.LoggingToFile {
|
||||
if err := logging.ConfigureLogOutput(cfg.LoggingToFile); err != nil {
|
||||
log.Errorf("failed to reconfigure log output: %v", err)
|
||||
} else {
|
||||
log.Debugf("logging_to_file updated from %t to %t", oldCfg.LoggingToFile, cfg.LoggingToFile)
|
||||
}
|
||||
}
|
||||
|
||||
if oldCfg == nil || oldCfg.UsageStatisticsEnabled != cfg.UsageStatisticsEnabled {
|
||||
usage.SetStatisticsEnabled(cfg.UsageStatisticsEnabled)
|
||||
if s.cfg != nil {
|
||||
log.Debugf("usage_statistics_enabled updated from %t to %t", s.cfg.UsageStatisticsEnabled, cfg.UsageStatisticsEnabled)
|
||||
if oldCfg != nil {
|
||||
log.Debugf("usage_statistics_enabled updated from %t to %t", oldCfg.UsageStatisticsEnabled, cfg.UsageStatisticsEnabled)
|
||||
} else {
|
||||
log.Debugf("usage_statistics_enabled toggled to %t", cfg.UsageStatisticsEnabled)
|
||||
}
|
||||
}
|
||||
|
||||
// Update log level dynamically when debug flag changes
|
||||
if s.cfg.Debug != cfg.Debug {
|
||||
if oldCfg == nil || oldCfg.Debug != cfg.Debug {
|
||||
util.SetLogLevel(cfg)
|
||||
log.Debugf("debug mode updated from %t to %t", s.cfg.Debug, cfg.Debug)
|
||||
if oldCfg != nil {
|
||||
log.Debugf("debug mode updated from %t to %t", oldCfg.Debug, cfg.Debug)
|
||||
} else {
|
||||
log.Debugf("debug mode toggled to %t", cfg.Debug)
|
||||
}
|
||||
}
|
||||
|
||||
prevSecretEmpty := true
|
||||
if oldCfg != nil {
|
||||
prevSecretEmpty = oldCfg.RemoteManagement.SecretKey == ""
|
||||
}
|
||||
newSecretEmpty := cfg.RemoteManagement.SecretKey == ""
|
||||
if s.envManagementSecret {
|
||||
s.registerManagementRoutes()
|
||||
if s.managementRoutesEnabled.CompareAndSwap(false, true) {
|
||||
log.Info("management routes enabled via MANAGEMENT_PASSWORD")
|
||||
} else {
|
||||
s.managementRoutesEnabled.Store(true)
|
||||
}
|
||||
} else {
|
||||
switch {
|
||||
case prevSecretEmpty && !newSecretEmpty:
|
||||
s.registerManagementRoutes()
|
||||
if s.managementRoutesEnabled.CompareAndSwap(false, true) {
|
||||
log.Info("management routes enabled after secret key update")
|
||||
} else {
|
||||
s.managementRoutesEnabled.Store(true)
|
||||
}
|
||||
case !prevSecretEmpty && newSecretEmpty:
|
||||
if s.managementRoutesEnabled.CompareAndSwap(true, false) {
|
||||
log.Info("management routes disabled after secret key removal")
|
||||
} else {
|
||||
s.managementRoutesEnabled.Store(false)
|
||||
}
|
||||
default:
|
||||
s.managementRoutesEnabled.Store(!newSecretEmpty)
|
||||
}
|
||||
}
|
||||
|
||||
s.applyAccessConfig(oldCfg, cfg)
|
||||
s.cfg = cfg
|
||||
s.handlers.UpdateClients(cfg)
|
||||
s.handlers.UpdateClients(&cfg.SDKConfig)
|
||||
|
||||
if !cfg.RemoteManagement.DisableControlPanel {
|
||||
staticDir := managementasset.StaticDir(s.currentPath)
|
||||
go managementasset.EnsureLatestManagementHTML(context.Background(), staticDir, cfg.ProxyURL)
|
||||
}
|
||||
if s.mgmt != nil {
|
||||
s.mgmt.SetConfig(cfg)
|
||||
s.mgmt.SetAuthManager(s.handlers.AuthManager)
|
||||
}
|
||||
s.applyAccessConfig(cfg)
|
||||
|
||||
// Count client sources from configuration and auth directory
|
||||
authFiles := util.CountAuthFiles(cfg.AuthDir)
|
||||
@@ -612,7 +754,12 @@ func (s *Server) UpdateClients(cfg *config.Config) {
|
||||
codexAPIKeyCount := len(cfg.CodexKey)
|
||||
openAICompatCount := 0
|
||||
for i := range cfg.OpenAICompatibility {
|
||||
openAICompatCount += len(cfg.OpenAICompatibility[i].APIKeys)
|
||||
entry := cfg.OpenAICompatibility[i]
|
||||
if len(entry.APIKeyEntries) > 0 {
|
||||
openAICompatCount += len(entry.APIKeyEntries)
|
||||
continue
|
||||
}
|
||||
openAICompatCount += len(entry.APIKeys)
|
||||
}
|
||||
|
||||
total := authFiles + glAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + openAICompatCount
|
||||
|
||||
@@ -59,7 +59,7 @@ type ClaudeAuth struct {
|
||||
// - *ClaudeAuth: A new Claude authentication service instance
|
||||
func NewClaudeAuth(cfg *config.Config) *ClaudeAuth {
|
||||
return &ClaudeAuth{
|
||||
httpClient: util.SetProxy(cfg, &http.Client{}),
|
||||
httpClient: util.SetProxy(&cfg.SDKConfig, &http.Client{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ type CodexAuth struct {
|
||||
// It initializes an HTTP client with proxy settings from the provided configuration.
|
||||
func NewCodexAuth(cfg *config.Config) *CodexAuth {
|
||||
return &CodexAuth{
|
||||
httpClient: util.SetProxy(cfg, &http.Client{}),
|
||||
httpClient: util.SetProxy(&cfg.SDKConfig, &http.Client{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
// Package gemini provides authentication and token management functionality
|
||||
// for Google's Gemini AI services. It handles OAuth2 token storage, serialization,
|
||||
// and retrieval for maintaining authenticated sessions with the Gemini API.
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GeminiWebTokenStorage stores cookie information for Google Gemini Web authentication.
|
||||
type GeminiWebTokenStorage struct {
|
||||
Secure1PSID string `json:"secure_1psid"`
|
||||
Secure1PSIDTS string `json:"secure_1psidts"`
|
||||
Type string `json:"type"`
|
||||
LastRefresh string `json:"last_refresh,omitempty"`
|
||||
// Label is a stable account identifier used for logging, e.g. "gemini-web-<hash>".
|
||||
// It is derived from the auth file name when not explicitly set.
|
||||
Label string `json:"label,omitempty"`
|
||||
}
|
||||
|
||||
// SaveTokenToFile serializes the Gemini Web token storage to a JSON file.
|
||||
func (ts *GeminiWebTokenStorage) SaveTokenToFile(authFilePath string) error {
|
||||
misc.LogSavingCredentials(authFilePath)
|
||||
ts.Type = "gemini-web"
|
||||
// Auto-derive a stable label from the file name if missing.
|
||||
if ts.Label == "" {
|
||||
base := filepath.Base(authFilePath)
|
||||
if strings.HasSuffix(strings.ToLower(base), ".json") {
|
||||
base = strings.TrimSuffix(base, filepath.Ext(base))
|
||||
}
|
||||
if base != "" {
|
||||
ts.Label = base
|
||||
}
|
||||
}
|
||||
if ts.LastRefresh == "" {
|
||||
ts.LastRefresh = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(authFilePath), 0700); err != nil {
|
||||
return fmt.Errorf("failed to create directory: %v", err)
|
||||
}
|
||||
|
||||
f, err := os.Create(authFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create token file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := f.Close(); errClose != nil {
|
||||
log.Errorf("failed to close file: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if err = json.NewEncoder(f).Encode(ts); err != nil {
|
||||
return fmt.Errorf("failed to write token to file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
275
internal/auth/iflow/iflow_auth.go
Normal file
275
internal/auth/iflow/iflow_auth.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package iflow
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// OAuth endpoints and client metadata are derived from the reference Python implementation.
|
||||
iFlowOAuthTokenEndpoint = "https://iflow.cn/oauth/token"
|
||||
iFlowOAuthAuthorizeEndpoint = "https://iflow.cn/oauth"
|
||||
iFlowUserInfoEndpoint = "https://iflow.cn/api/oauth/getUserInfo"
|
||||
iFlowSuccessRedirectURL = "https://iflow.cn/oauth/success"
|
||||
|
||||
// Client credentials provided by iFlow for the Code Assist integration.
|
||||
iFlowOAuthClientID = "10009311001"
|
||||
iFlowOAuthClientSecret = "4Z3YjXycVsQvyGF1etiNlIBB4RsqSDtW"
|
||||
)
|
||||
|
||||
// DefaultAPIBaseURL is the canonical chat completions endpoint.
|
||||
const DefaultAPIBaseURL = "https://apis.iflow.cn/v1"
|
||||
|
||||
// SuccessRedirectURL is exposed for consumers needing the official success page.
|
||||
const SuccessRedirectURL = iFlowSuccessRedirectURL
|
||||
|
||||
// CallbackPort defines the local port used for OAuth callbacks.
|
||||
const CallbackPort = 11451
|
||||
|
||||
// IFlowAuth encapsulates the HTTP client helpers for the OAuth flow.
|
||||
type IFlowAuth struct {
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// NewIFlowAuth constructs a new IFlowAuth with proxy-aware transport.
|
||||
func NewIFlowAuth(cfg *config.Config) *IFlowAuth {
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
return &IFlowAuth{httpClient: util.SetProxy(&cfg.SDKConfig, client)}
|
||||
}
|
||||
|
||||
// AuthorizationURL builds the authorization URL and matching redirect URI.
|
||||
func (ia *IFlowAuth) AuthorizationURL(state string, port int) (authURL, redirectURI string) {
|
||||
redirectURI = fmt.Sprintf("http://localhost:%d/oauth2callback", port)
|
||||
values := url.Values{}
|
||||
values.Set("loginMethod", "phone")
|
||||
values.Set("type", "phone")
|
||||
values.Set("redirect", redirectURI)
|
||||
values.Set("state", state)
|
||||
values.Set("client_id", iFlowOAuthClientID)
|
||||
authURL = fmt.Sprintf("%s?%s", iFlowOAuthAuthorizeEndpoint, values.Encode())
|
||||
return authURL, redirectURI
|
||||
}
|
||||
|
||||
// ExchangeCodeForTokens exchanges an authorization code for access and refresh tokens.
|
||||
func (ia *IFlowAuth) ExchangeCodeForTokens(ctx context.Context, code, redirectURI string) (*IFlowTokenData, error) {
|
||||
form := url.Values{}
|
||||
form.Set("grant_type", "authorization_code")
|
||||
form.Set("code", code)
|
||||
form.Set("redirect_uri", redirectURI)
|
||||
form.Set("client_id", iFlowOAuthClientID)
|
||||
form.Set("client_secret", iFlowOAuthClientSecret)
|
||||
|
||||
req, err := ia.newTokenRequest(ctx, form)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ia.doTokenRequest(ctx, req)
|
||||
}
|
||||
|
||||
// RefreshTokens exchanges a refresh token for a new access token.
|
||||
func (ia *IFlowAuth) RefreshTokens(ctx context.Context, refreshToken string) (*IFlowTokenData, error) {
|
||||
form := url.Values{}
|
||||
form.Set("grant_type", "refresh_token")
|
||||
form.Set("refresh_token", refreshToken)
|
||||
form.Set("client_id", iFlowOAuthClientID)
|
||||
form.Set("client_secret", iFlowOAuthClientSecret)
|
||||
|
||||
req, err := ia.newTokenRequest(ctx, form)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ia.doTokenRequest(ctx, req)
|
||||
}
|
||||
|
||||
func (ia *IFlowAuth) newTokenRequest(ctx context.Context, form url.Values) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, iFlowOAuthTokenEndpoint, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow token: create request failed: %w", err)
|
||||
}
|
||||
|
||||
basic := base64.StdEncoding.EncodeToString([]byte(iFlowOAuthClientID + ":" + iFlowOAuthClientSecret))
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", "Basic "+basic)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (ia *IFlowAuth) doTokenRequest(ctx context.Context, req *http.Request) (*IFlowTokenData, error) {
|
||||
resp, err := ia.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow token: request failed: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow token: read response failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Debugf("iflow token request failed: status=%d body=%s", resp.StatusCode, string(body))
|
||||
return nil, fmt.Errorf("iflow token: %d %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
var tokenResp IFlowTokenResponse
|
||||
if err = json.Unmarshal(body, &tokenResp); err != nil {
|
||||
return nil, fmt.Errorf("iflow token: decode response failed: %w", err)
|
||||
}
|
||||
|
||||
data := &IFlowTokenData{
|
||||
AccessToken: tokenResp.AccessToken,
|
||||
RefreshToken: tokenResp.RefreshToken,
|
||||
TokenType: tokenResp.TokenType,
|
||||
Scope: tokenResp.Scope,
|
||||
Expire: time.Now().Add(time.Duration(tokenResp.ExpiresIn) * time.Second).Format(time.RFC3339),
|
||||
}
|
||||
|
||||
if tokenResp.AccessToken == "" {
|
||||
return nil, fmt.Errorf("iflow token: missing access token in response")
|
||||
}
|
||||
|
||||
info, errAPI := ia.FetchUserInfo(ctx, tokenResp.AccessToken)
|
||||
if errAPI != nil {
|
||||
return nil, fmt.Errorf("iflow token: fetch user info failed: %w", errAPI)
|
||||
}
|
||||
if strings.TrimSpace(info.APIKey) == "" {
|
||||
return nil, fmt.Errorf("iflow token: empty api key returned")
|
||||
}
|
||||
email := strings.TrimSpace(info.Email)
|
||||
if email == "" {
|
||||
email = strings.TrimSpace(info.Phone)
|
||||
}
|
||||
if email == "" {
|
||||
return nil, fmt.Errorf("iflow token: missing account email/phone in user info")
|
||||
}
|
||||
data.APIKey = info.APIKey
|
||||
data.Email = email
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// FetchUserInfo retrieves account metadata (including API key) for the provided access token.
|
||||
func (ia *IFlowAuth) FetchUserInfo(ctx context.Context, accessToken string) (*userInfoData, error) {
|
||||
if strings.TrimSpace(accessToken) == "" {
|
||||
return nil, fmt.Errorf("iflow api key: access token is empty")
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("%s?accessToken=%s", iFlowUserInfoEndpoint, url.QueryEscape(accessToken))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow api key: create request failed: %w", err)
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
resp, err := ia.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow api key: request failed: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow api key: read response failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Debugf("iflow api key failed: status=%d body=%s", resp.StatusCode, string(body))
|
||||
return nil, fmt.Errorf("iflow api key: %d %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
var result userInfoResponse
|
||||
if err = json.Unmarshal(body, &result); err != nil {
|
||||
return nil, fmt.Errorf("iflow api key: decode body failed: %w", err)
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return nil, fmt.Errorf("iflow api key: request not successful")
|
||||
}
|
||||
|
||||
if result.Data.APIKey == "" {
|
||||
return nil, fmt.Errorf("iflow api key: missing api key in response")
|
||||
}
|
||||
|
||||
return &result.Data, nil
|
||||
}
|
||||
|
||||
// CreateTokenStorage converts token data into persistence storage.
|
||||
func (ia *IFlowAuth) CreateTokenStorage(data *IFlowTokenData) *IFlowTokenStorage {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
return &IFlowTokenStorage{
|
||||
AccessToken: data.AccessToken,
|
||||
RefreshToken: data.RefreshToken,
|
||||
LastRefresh: time.Now().Format(time.RFC3339),
|
||||
Expire: data.Expire,
|
||||
APIKey: data.APIKey,
|
||||
Email: data.Email,
|
||||
TokenType: data.TokenType,
|
||||
Scope: data.Scope,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateTokenStorage updates the persisted token storage with latest token data.
|
||||
func (ia *IFlowAuth) UpdateTokenStorage(storage *IFlowTokenStorage, data *IFlowTokenData) {
|
||||
if storage == nil || data == nil {
|
||||
return
|
||||
}
|
||||
storage.AccessToken = data.AccessToken
|
||||
storage.RefreshToken = data.RefreshToken
|
||||
storage.LastRefresh = time.Now().Format(time.RFC3339)
|
||||
storage.Expire = data.Expire
|
||||
if data.APIKey != "" {
|
||||
storage.APIKey = data.APIKey
|
||||
}
|
||||
if data.Email != "" {
|
||||
storage.Email = data.Email
|
||||
}
|
||||
storage.TokenType = data.TokenType
|
||||
storage.Scope = data.Scope
|
||||
}
|
||||
|
||||
// IFlowTokenResponse models the OAuth token endpoint response.
|
||||
type IFlowTokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
TokenType string `json:"token_type"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// IFlowTokenData captures processed token details.
|
||||
type IFlowTokenData struct {
|
||||
AccessToken string
|
||||
RefreshToken string
|
||||
TokenType string
|
||||
Scope string
|
||||
Expire string
|
||||
APIKey string
|
||||
Email string
|
||||
}
|
||||
|
||||
// userInfoResponse represents the structure returned by the user info endpoint.
|
||||
type userInfoResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Data userInfoData `json:"data"`
|
||||
}
|
||||
|
||||
type userInfoData struct {
|
||||
APIKey string `json:"apiKey"`
|
||||
Email string `json:"email"`
|
||||
Phone string `json:"phone"`
|
||||
}
|
||||
43
internal/auth/iflow/iflow_token.go
Normal file
43
internal/auth/iflow/iflow_token.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package iflow
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
)
|
||||
|
||||
// IFlowTokenStorage persists iFlow OAuth credentials alongside the derived API key.
|
||||
type IFlowTokenStorage struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
LastRefresh string `json:"last_refresh"`
|
||||
Expire string `json:"expired"`
|
||||
APIKey string `json:"api_key"`
|
||||
Email string `json:"email"`
|
||||
TokenType string `json:"token_type"`
|
||||
Scope string `json:"scope"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// SaveTokenToFile serialises the token storage to disk.
|
||||
func (ts *IFlowTokenStorage) SaveTokenToFile(authFilePath string) error {
|
||||
misc.LogSavingCredentials(authFilePath)
|
||||
ts.Type = "iflow"
|
||||
if err := os.MkdirAll(filepath.Dir(authFilePath), 0o700); err != nil {
|
||||
return fmt.Errorf("iflow token: create directory failed: %w", err)
|
||||
}
|
||||
|
||||
f, err := os.Create(authFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("iflow token: create file failed: %w", err)
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
if err = json.NewEncoder(f).Encode(ts); err != nil {
|
||||
return fmt.Errorf("iflow token: encode token failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
143
internal/auth/iflow/oauth_server.go
Normal file
143
internal/auth/iflow/oauth_server.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package iflow
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const errorRedirectURL = "https://iflow.cn/oauth/error"
|
||||
|
||||
// OAuthResult captures the outcome of the local OAuth callback.
|
||||
type OAuthResult struct {
|
||||
Code string
|
||||
State string
|
||||
Error string
|
||||
}
|
||||
|
||||
// OAuthServer provides a minimal HTTP server for handling the iFlow OAuth callback.
|
||||
type OAuthServer struct {
|
||||
server *http.Server
|
||||
port int
|
||||
result chan *OAuthResult
|
||||
errChan chan error
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
}
|
||||
|
||||
// NewOAuthServer constructs a new OAuthServer bound to the provided port.
|
||||
func NewOAuthServer(port int) *OAuthServer {
|
||||
return &OAuthServer{
|
||||
port: port,
|
||||
result: make(chan *OAuthResult, 1),
|
||||
errChan: make(chan error, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Start launches the callback listener.
|
||||
func (s *OAuthServer) Start() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.running {
|
||||
return fmt.Errorf("iflow oauth server already running")
|
||||
}
|
||||
if !s.isPortAvailable() {
|
||||
return fmt.Errorf("port %d is already in use", s.port)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/oauth2callback", s.handleCallback)
|
||||
|
||||
s.server = &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", s.port),
|
||||
Handler: mux,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
s.running = true
|
||||
|
||||
go func() {
|
||||
if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
s.errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully terminates the callback listener.
|
||||
func (s *OAuthServer) Stop(ctx context.Context) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.running || s.server == nil {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
s.running = false
|
||||
s.server = nil
|
||||
}()
|
||||
return s.server.Shutdown(ctx)
|
||||
}
|
||||
|
||||
// WaitForCallback blocks until a callback result, server error, or timeout occurs.
|
||||
func (s *OAuthServer) WaitForCallback(timeout time.Duration) (*OAuthResult, error) {
|
||||
select {
|
||||
case res := <-s.result:
|
||||
return res, nil
|
||||
case err := <-s.errChan:
|
||||
return nil, err
|
||||
case <-time.After(timeout):
|
||||
return nil, fmt.Errorf("timeout waiting for OAuth callback")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *OAuthServer) handleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
query := r.URL.Query()
|
||||
if errParam := strings.TrimSpace(query.Get("error")); errParam != "" {
|
||||
s.sendResult(&OAuthResult{Error: errParam})
|
||||
http.Redirect(w, r, errorRedirectURL, http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
code := strings.TrimSpace(query.Get("code"))
|
||||
if code == "" {
|
||||
s.sendResult(&OAuthResult{Error: "missing_code"})
|
||||
http.Redirect(w, r, errorRedirectURL, http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
state := query.Get("state")
|
||||
s.sendResult(&OAuthResult{Code: code, State: state})
|
||||
http.Redirect(w, r, SuccessRedirectURL, http.StatusFound)
|
||||
}
|
||||
|
||||
func (s *OAuthServer) sendResult(res *OAuthResult) {
|
||||
select {
|
||||
case s.result <- res:
|
||||
default:
|
||||
log.Debug("iflow oauth result channel full, dropping result")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *OAuthServer) isPortAvailable() bool {
|
||||
addr := fmt.Sprintf(":%d", s.port)
|
||||
listener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_ = listener.Close()
|
||||
return true
|
||||
}
|
||||
@@ -85,7 +85,7 @@ type QwenAuth struct {
|
||||
// NewQwenAuth creates a new QwenAuth instance with a proxy-configured HTTP client.
|
||||
func NewQwenAuth(cfg *config.Config) *QwenAuth {
|
||||
return &QwenAuth{
|
||||
httpClient: util.SetProxy(cfg, &http.Client{}),
|
||||
httpClient: util.SetProxy(&cfg.SDKConfig, &http.Client{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ func newAuthManager() *sdkAuth.Manager {
|
||||
sdkAuth.NewCodexAuthenticator(),
|
||||
sdkAuth.NewClaudeAuthenticator(),
|
||||
sdkAuth.NewQwenAuthenticator(),
|
||||
sdkAuth.NewIFlowAuthenticator(),
|
||||
)
|
||||
return manager
|
||||
}
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
// Package cmd provides command-line interface functionality for the CLI Proxy API.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DoGeminiWebAuth handles the process of creating a Gemini Web token file.
|
||||
// New flow:
|
||||
// 1. Prompt user to paste the full cookie string.
|
||||
// 2. Extract __Secure-1PSID and __Secure-1PSIDTS from the cookie string.
|
||||
// 3. Call https://accounts.google.com/ListAccounts with the cookie to obtain email.
|
||||
// 4. Save auth file with the same structure, and set Label to the email.
|
||||
func DoGeminiWebAuth(cfg *config.Config) {
|
||||
var secure1psid, secure1psidts, email string
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
isMacOS := strings.HasPrefix(runtime.GOOS, "darwin")
|
||||
if !isMacOS {
|
||||
fmt.Print("Paste your full Google Cookie and press Enter: ")
|
||||
rawCookie, _ := reader.ReadString('\n')
|
||||
rawCookie = strings.TrimSpace(rawCookie)
|
||||
if rawCookie == "" {
|
||||
log.Fatal("Cookie cannot be empty")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse K=V cookie pairs separated by ';'
|
||||
cookieMap := make(map[string]string)
|
||||
parts := strings.Split(rawCookie, ";")
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
if eq := strings.Index(p, "="); eq > 0 {
|
||||
k := strings.TrimSpace(p[:eq])
|
||||
v := strings.TrimSpace(p[eq+1:])
|
||||
if k != "" {
|
||||
cookieMap[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
secure1psid = strings.TrimSpace(cookieMap["__Secure-1PSID"])
|
||||
secure1psidts = strings.TrimSpace(cookieMap["__Secure-1PSIDTS"])
|
||||
|
||||
// Build HTTP client with proxy settings respected.
|
||||
httpClient := &http.Client{Timeout: 15 * time.Second}
|
||||
httpClient = util.SetProxy(cfg, httpClient)
|
||||
|
||||
// Request ListAccounts to extract email as label (use POST per upstream behavior).
|
||||
req, err := http.NewRequest(http.MethodPost, "https://accounts.google.com/ListAccounts", nil)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create request: %v\n", err)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Cookie", rawCookie)
|
||||
req.Header.Set("Accept", "application/json, text/plain, */*")
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36")
|
||||
req.Header.Set("Origin", "https://accounts.google.com")
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded;charset=UTF-8")
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Request to ListAccounts failed: %v\n", err)
|
||||
} else {
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
fmt.Printf("ListAccounts returned status code: %d\n", resp.StatusCode)
|
||||
} else {
|
||||
var payload []any
|
||||
if err = json.NewDecoder(resp.Body).Decode(&payload); err != nil {
|
||||
fmt.Printf("Failed to parse ListAccounts response: %v\n", err)
|
||||
} else {
|
||||
// Expected structure like: ["gaia.l.a.r", [["gaia.l.a",1,"Name","email@example.com", ... ]]]
|
||||
if len(payload) >= 2 {
|
||||
if accounts, ok := payload[1].([]any); ok && len(accounts) >= 1 {
|
||||
if first, ok1 := accounts[0].([]any); ok1 && len(first) >= 4 {
|
||||
if em, ok2 := first[3].(string); ok2 {
|
||||
email = strings.TrimSpace(em)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if email == "" {
|
||||
fmt.Println("Failed to parse email from ListAccounts response")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: prompt user to input missing values
|
||||
if secure1psid == "" {
|
||||
if !isMacOS {
|
||||
fmt.Print("Cookie missing __Secure-1PSID. ")
|
||||
}
|
||||
fmt.Print("Enter __Secure-1PSID: ")
|
||||
v, _ := reader.ReadString('\n')
|
||||
secure1psid = strings.TrimSpace(v)
|
||||
}
|
||||
if secure1psidts == "" {
|
||||
if !isMacOS {
|
||||
fmt.Print("Cookie missing __Secure-1PSID. ")
|
||||
}
|
||||
fmt.Print("Enter __Secure-1PSIDTS: ")
|
||||
v, _ := reader.ReadString('\n')
|
||||
secure1psidts = strings.TrimSpace(v)
|
||||
}
|
||||
if secure1psid == "" || secure1psidts == "" {
|
||||
log.Fatal("__Secure-1PSID and __Secure-1PSIDTS cannot be empty")
|
||||
return
|
||||
}
|
||||
if isMacOS {
|
||||
fmt.Print("Enter your account email: ")
|
||||
v, _ := reader.ReadString('\n')
|
||||
email = strings.TrimSpace(v)
|
||||
}
|
||||
|
||||
// Generate a filename based on the SHA256 hash of the PSID
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(secure1psid))
|
||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||
fileName := fmt.Sprintf("gemini-web-%s.json", hash[:16])
|
||||
|
||||
// Decide label: prefer email; fallback prompt then file name without .json
|
||||
defaultLabel := strings.TrimSuffix(fileName, ".json")
|
||||
label := email
|
||||
if label == "" {
|
||||
fmt.Printf("Enter label for this auth (default: %s): ", defaultLabel)
|
||||
v, _ := reader.ReadString('\n')
|
||||
v = strings.TrimSpace(v)
|
||||
if v != "" {
|
||||
label = v
|
||||
} else {
|
||||
label = defaultLabel
|
||||
}
|
||||
}
|
||||
|
||||
tokenStorage := &gemini.GeminiWebTokenStorage{
|
||||
Secure1PSID: secure1psid,
|
||||
Secure1PSIDTS: secure1psidts,
|
||||
Label: label,
|
||||
}
|
||||
record := &sdkAuth.TokenRecord{
|
||||
Provider: "gemini-web",
|
||||
FileName: fileName,
|
||||
Storage: tokenStorage,
|
||||
}
|
||||
store := sdkAuth.GetTokenStore()
|
||||
savedPath, err := store.Save(context.Background(), cfg, record)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to save Gemini Web token to file: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully saved Gemini Web token to: %s\n", savedPath)
|
||||
}
|
||||
54
internal/cmd/iflow_login.go
Normal file
54
internal/cmd/iflow_login.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DoIFlowLogin performs the iFlow OAuth login via the shared authentication manager.
|
||||
func DoIFlowLogin(cfg *config.Config, options *LoginOptions) {
|
||||
if options == nil {
|
||||
options = &LoginOptions{}
|
||||
}
|
||||
|
||||
manager := newAuthManager()
|
||||
|
||||
promptFn := options.Prompt
|
||||
if promptFn == nil {
|
||||
promptFn = func(prompt string) (string, error) {
|
||||
fmt.Println()
|
||||
fmt.Println(prompt)
|
||||
var value string
|
||||
_, err := fmt.Scanln(&value)
|
||||
return value, err
|
||||
}
|
||||
}
|
||||
|
||||
authOpts := &sdkAuth.LoginOptions{
|
||||
NoBrowser: options.NoBrowser,
|
||||
Metadata: map[string]string{},
|
||||
Prompt: promptFn,
|
||||
}
|
||||
|
||||
_, savedPath, err := manager.Login(context.Background(), "iflow", cfg, authOpts)
|
||||
if err != nil {
|
||||
var emailErr *sdkAuth.EmailRequiredError
|
||||
if errors.As(err, &emailErr) {
|
||||
log.Error(emailErr.Error())
|
||||
return
|
||||
}
|
||||
fmt.Printf("iFlow authentication failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if savedPath != "" {
|
||||
fmt.Printf("Authentication saved to %s\n", savedPath)
|
||||
}
|
||||
|
||||
fmt.Println("iFlow authentication successful!")
|
||||
}
|
||||
@@ -4,18 +4,45 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
const (
|
||||
geminiCLIEndpoint = "https://cloudcode-pa.googleapis.com"
|
||||
geminiCLIVersion = "v1internal"
|
||||
geminiCLIUserAgent = "google-api-nodejs-client/9.15.1"
|
||||
geminiCLIApiClient = "gl-node/22.17.0"
|
||||
geminiCLIClientMetadata = "ideType=IDE_UNSPECIFIED,platform=PLATFORM_UNSPECIFIED,pluginType=GEMINI"
|
||||
)
|
||||
|
||||
type projectSelectionRequiredError struct{}
|
||||
|
||||
func (e *projectSelectionRequiredError) Error() string {
|
||||
return "gemini cli: project selection required"
|
||||
}
|
||||
|
||||
// DoLogin handles Google Gemini authentication using the shared authentication manager.
|
||||
// It initiates the OAuth flow for Google Gemini services and saves the authentication
|
||||
// tokens to the configured auth directory.
|
||||
// It initiates the OAuth flow for Google Gemini services, performs the legacy CLI user setup,
|
||||
// and saves the authentication tokens to the configured auth directory.
|
||||
//
|
||||
// Parameters:
|
||||
// - cfg: The application configuration
|
||||
@@ -26,38 +53,90 @@ func DoLogin(cfg *config.Config, projectID string, options *LoginOptions) {
|
||||
options = &LoginOptions{}
|
||||
}
|
||||
|
||||
manager := newAuthManager()
|
||||
ctx := context.Background()
|
||||
|
||||
metadata := map[string]string{}
|
||||
if projectID != "" {
|
||||
metadata["project_id"] = projectID
|
||||
}
|
||||
|
||||
authOpts := &sdkAuth.LoginOptions{
|
||||
loginOpts := &sdkAuth.LoginOptions{
|
||||
NoBrowser: options.NoBrowser,
|
||||
ProjectID: projectID,
|
||||
Metadata: metadata,
|
||||
ProjectID: strings.TrimSpace(projectID),
|
||||
Metadata: map[string]string{},
|
||||
Prompt: options.Prompt,
|
||||
}
|
||||
|
||||
_, savedPath, err := manager.Login(context.Background(), "gemini", cfg, authOpts)
|
||||
if err != nil {
|
||||
var selectionErr *sdkAuth.ProjectSelectionError
|
||||
if errors.As(err, &selectionErr) {
|
||||
fmt.Println(selectionErr.Error())
|
||||
projects := selectionErr.ProjectsDisplay()
|
||||
if len(projects) > 0 {
|
||||
fmt.Println("========================================================================")
|
||||
for _, p := range projects {
|
||||
fmt.Printf("Project ID: %s\n", p.ProjectID)
|
||||
fmt.Printf("Project Name: %s\n", p.Name)
|
||||
fmt.Println("------------------------------------------------------------------------")
|
||||
}
|
||||
fmt.Println("Please rerun the login command with --project_id <project_id>.")
|
||||
}
|
||||
authenticator := sdkAuth.NewGeminiAuthenticator()
|
||||
record, errLogin := authenticator.Login(ctx, cfg, loginOpts)
|
||||
if errLogin != nil {
|
||||
log.Fatalf("Gemini authentication failed: %v", errLogin)
|
||||
return
|
||||
}
|
||||
|
||||
storage, okStorage := record.Storage.(*gemini.GeminiTokenStorage)
|
||||
if !okStorage || storage == nil {
|
||||
log.Fatal("Gemini authentication failed: unsupported token storage")
|
||||
return
|
||||
}
|
||||
|
||||
geminiAuth := gemini.NewGeminiAuth()
|
||||
httpClient, errClient := geminiAuth.GetAuthenticatedClient(ctx, storage, cfg, options.NoBrowser)
|
||||
if errClient != nil {
|
||||
log.Fatalf("Gemini authentication failed: %v", errClient)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Authentication successful.")
|
||||
|
||||
projects, errProjects := fetchGCPProjects(ctx, httpClient)
|
||||
if errProjects != nil {
|
||||
log.Fatalf("Failed to get project list: %v", errProjects)
|
||||
return
|
||||
}
|
||||
|
||||
promptFn := options.Prompt
|
||||
if promptFn == nil {
|
||||
promptFn = defaultProjectPrompt()
|
||||
}
|
||||
|
||||
selectedProjectID := promptForProjectSelection(projects, strings.TrimSpace(projectID), promptFn)
|
||||
if strings.TrimSpace(selectedProjectID) == "" {
|
||||
log.Fatal("No project selected; aborting login.")
|
||||
return
|
||||
}
|
||||
|
||||
if errSetup := performGeminiCLISetup(ctx, httpClient, storage, selectedProjectID); errSetup != nil {
|
||||
var projectErr *projectSelectionRequiredError
|
||||
if errors.As(errSetup, &projectErr) {
|
||||
log.Error("Failed to start user onboarding: A project ID is required.")
|
||||
showProjectSelectionHelp(storage.Email, projects)
|
||||
return
|
||||
}
|
||||
log.Fatalf("Gemini authentication failed: %v", err)
|
||||
log.Fatalf("Failed to complete user setup: %v", errSetup)
|
||||
return
|
||||
}
|
||||
|
||||
storage.Auto = false
|
||||
|
||||
if !storage.Auto && !storage.Checked {
|
||||
isChecked, errCheck := checkCloudAPIIsEnabled(ctx, httpClient, storage.ProjectID)
|
||||
if errCheck != nil {
|
||||
log.Fatalf("Failed to check if Cloud AI API is enabled: %v", errCheck)
|
||||
return
|
||||
}
|
||||
storage.Checked = isChecked
|
||||
if !isChecked {
|
||||
log.Fatal("Failed to check if Cloud AI API is enabled. If you encounter an error message, please create an issue.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
updateAuthRecord(record, storage)
|
||||
|
||||
store := sdkAuth.GetTokenStore()
|
||||
if setter, okSetter := store.(interface{ SetBaseDir(string) }); okSetter && cfg != nil {
|
||||
setter.SetBaseDir(cfg.AuthDir)
|
||||
}
|
||||
|
||||
savedPath, errSave := store.Save(ctx, record)
|
||||
if errSave != nil {
|
||||
log.Fatalf("Failed to save token to file: %v", errSave)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -67,3 +146,366 @@ func DoLogin(cfg *config.Config, projectID string, options *LoginOptions) {
|
||||
|
||||
fmt.Println("Gemini authentication successful!")
|
||||
}
|
||||
|
||||
func performGeminiCLISetup(ctx context.Context, httpClient *http.Client, storage *gemini.GeminiTokenStorage, requestedProject string) error {
|
||||
metadata := map[string]string{
|
||||
"ideType": "IDE_UNSPECIFIED",
|
||||
"platform": "PLATFORM_UNSPECIFIED",
|
||||
"pluginType": "GEMINI",
|
||||
}
|
||||
|
||||
trimmedRequest := strings.TrimSpace(requestedProject)
|
||||
explicitProject := trimmedRequest != ""
|
||||
|
||||
loadReqBody := map[string]any{
|
||||
"metadata": metadata,
|
||||
}
|
||||
if explicitProject {
|
||||
loadReqBody["cloudaicompanionProject"] = trimmedRequest
|
||||
}
|
||||
|
||||
var loadResp map[string]any
|
||||
if errLoad := callGeminiCLI(ctx, httpClient, "loadCodeAssist", loadReqBody, &loadResp); errLoad != nil {
|
||||
return fmt.Errorf("load code assist: %w", errLoad)
|
||||
}
|
||||
|
||||
tierID := "legacy-tier"
|
||||
if tiers, okTiers := loadResp["allowedTiers"].([]any); okTiers {
|
||||
for _, rawTier := range tiers {
|
||||
tier, okTier := rawTier.(map[string]any)
|
||||
if !okTier {
|
||||
continue
|
||||
}
|
||||
if isDefault, okDefault := tier["isDefault"].(bool); okDefault && isDefault {
|
||||
if id, okID := tier["id"].(string); okID && strings.TrimSpace(id) != "" {
|
||||
tierID = strings.TrimSpace(id)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
projectID := trimmedRequest
|
||||
if projectID == "" {
|
||||
if id, okProject := loadResp["cloudaicompanionProject"].(string); okProject {
|
||||
projectID = strings.TrimSpace(id)
|
||||
}
|
||||
if projectID == "" {
|
||||
if projectMap, okProject := loadResp["cloudaicompanionProject"].(map[string]any); okProject {
|
||||
if id, okID := projectMap["id"].(string); okID {
|
||||
projectID = strings.TrimSpace(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if projectID == "" {
|
||||
return &projectSelectionRequiredError{}
|
||||
}
|
||||
|
||||
onboardReqBody := map[string]any{
|
||||
"tierId": tierID,
|
||||
"metadata": metadata,
|
||||
"cloudaicompanionProject": projectID,
|
||||
}
|
||||
|
||||
// Store the requested project as a fallback in case the response omits it.
|
||||
storage.ProjectID = projectID
|
||||
|
||||
for {
|
||||
var onboardResp map[string]any
|
||||
if errOnboard := callGeminiCLI(ctx, httpClient, "onboardUser", onboardReqBody, &onboardResp); errOnboard != nil {
|
||||
return fmt.Errorf("onboard user: %w", errOnboard)
|
||||
}
|
||||
|
||||
if done, okDone := onboardResp["done"].(bool); okDone && done {
|
||||
responseProjectID := ""
|
||||
if resp, okResp := onboardResp["response"].(map[string]any); okResp {
|
||||
switch projectValue := resp["cloudaicompanionProject"].(type) {
|
||||
case map[string]any:
|
||||
if id, okID := projectValue["id"].(string); okID {
|
||||
responseProjectID = strings.TrimSpace(id)
|
||||
}
|
||||
case string:
|
||||
responseProjectID = strings.TrimSpace(projectValue)
|
||||
}
|
||||
}
|
||||
|
||||
finalProjectID := projectID
|
||||
if responseProjectID != "" {
|
||||
if explicitProject && !strings.EqualFold(responseProjectID, projectID) {
|
||||
log.Warnf("Gemini onboarding returned project %s instead of requested %s; keeping requested project ID.", responseProjectID, projectID)
|
||||
} else {
|
||||
finalProjectID = responseProjectID
|
||||
}
|
||||
}
|
||||
|
||||
storage.ProjectID = strings.TrimSpace(finalProjectID)
|
||||
if storage.ProjectID == "" {
|
||||
storage.ProjectID = strings.TrimSpace(projectID)
|
||||
}
|
||||
if storage.ProjectID == "" {
|
||||
return fmt.Errorf("onboard user completed without project id")
|
||||
}
|
||||
log.Infof("Onboarding complete. Using Project ID: %s", storage.ProjectID)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Println("Onboarding in progress, waiting 5 seconds...")
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func callGeminiCLI(ctx context.Context, httpClient *http.Client, endpoint string, body any, result any) error {
|
||||
url := fmt.Sprintf("%s/%s:%s", geminiCLIEndpoint, geminiCLIVersion, endpoint)
|
||||
if strings.HasPrefix(endpoint, "operations/") {
|
||||
url = fmt.Sprintf("%s/%s", geminiCLIEndpoint, endpoint)
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
if body != nil {
|
||||
rawBody, errMarshal := json.Marshal(body)
|
||||
if errMarshal != nil {
|
||||
return fmt.Errorf("marshal request body: %w", errMarshal)
|
||||
}
|
||||
reader = bytes.NewReader(rawBody)
|
||||
}
|
||||
|
||||
req, errRequest := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
|
||||
if errRequest != nil {
|
||||
return fmt.Errorf("create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||
req.Header.Set("X-Goog-Api-Client", geminiCLIApiClient)
|
||||
req.Header.Set("Client-Metadata", geminiCLIClientMetadata)
|
||||
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return fmt.Errorf("execute request: %w", errDo)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("response body close error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("api request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||
}
|
||||
|
||||
if result == nil {
|
||||
_, _ = io.Copy(io.Discard, resp.Body)
|
||||
return nil
|
||||
}
|
||||
|
||||
if errDecode := json.NewDecoder(resp.Body).Decode(result); errDecode != nil {
|
||||
return fmt.Errorf("decode response body: %w", errDecode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchGCPProjects(ctx context.Context, httpClient *http.Client) ([]interfaces.GCPProjectProjects, error) {
|
||||
req, errRequest := http.NewRequestWithContext(ctx, http.MethodGet, "https://cloudresourcemanager.googleapis.com/v1/projects", nil)
|
||||
if errRequest != nil {
|
||||
return nil, fmt.Errorf("could not create project list request: %w", errRequest)
|
||||
}
|
||||
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return nil, fmt.Errorf("failed to execute project list request: %w", errDo)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("response body close error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("project list request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||
}
|
||||
|
||||
var projects interfaces.GCPProject
|
||||
if errDecode := json.NewDecoder(resp.Body).Decode(&projects); errDecode != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal project list: %w", errDecode)
|
||||
}
|
||||
|
||||
return projects.Projects, nil
|
||||
}
|
||||
|
||||
// promptForProjectSelection prints available projects and returns the chosen project ID.
|
||||
func promptForProjectSelection(projects []interfaces.GCPProjectProjects, presetID string, promptFn func(string) (string, error)) string {
|
||||
trimmedPreset := strings.TrimSpace(presetID)
|
||||
if len(projects) == 0 {
|
||||
if trimmedPreset != "" {
|
||||
return trimmedPreset
|
||||
}
|
||||
fmt.Println("No Google Cloud projects are available for selection.")
|
||||
return ""
|
||||
}
|
||||
|
||||
fmt.Println("Available Google Cloud projects:")
|
||||
defaultIndex := 0
|
||||
for idx, project := range projects {
|
||||
fmt.Printf("[%d] %s (%s)\n", idx+1, project.ProjectID, project.Name)
|
||||
if trimmedPreset != "" && project.ProjectID == trimmedPreset {
|
||||
defaultIndex = idx
|
||||
}
|
||||
}
|
||||
|
||||
defaultID := projects[defaultIndex].ProjectID
|
||||
|
||||
if trimmedPreset != "" {
|
||||
for _, project := range projects {
|
||||
if project.ProjectID == trimmedPreset {
|
||||
return trimmedPreset
|
||||
}
|
||||
}
|
||||
log.Warnf("Provided project ID %s not found in available projects; please choose from the list.", trimmedPreset)
|
||||
}
|
||||
|
||||
for {
|
||||
promptMsg := fmt.Sprintf("Enter project ID [%s]: ", defaultID)
|
||||
answer, errPrompt := promptFn(promptMsg)
|
||||
if errPrompt != nil {
|
||||
log.Errorf("Project selection prompt failed: %v", errPrompt)
|
||||
return defaultID
|
||||
}
|
||||
answer = strings.TrimSpace(answer)
|
||||
if answer == "" {
|
||||
return defaultID
|
||||
}
|
||||
|
||||
for _, project := range projects {
|
||||
if project.ProjectID == answer {
|
||||
return project.ProjectID
|
||||
}
|
||||
}
|
||||
|
||||
if idx, errAtoi := strconv.Atoi(answer); errAtoi == nil {
|
||||
if idx >= 1 && idx <= len(projects) {
|
||||
return projects[idx-1].ProjectID
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Invalid selection, enter a project ID or a number from the list.")
|
||||
}
|
||||
}
|
||||
|
||||
func defaultProjectPrompt() func(string) (string, error) {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
return func(prompt string) (string, error) {
|
||||
fmt.Print(prompt)
|
||||
line, errRead := reader.ReadString('\n')
|
||||
if errRead != nil {
|
||||
if errors.Is(errRead, io.EOF) {
|
||||
return strings.TrimSpace(line), nil
|
||||
}
|
||||
return "", errRead
|
||||
}
|
||||
return strings.TrimSpace(line), nil
|
||||
}
|
||||
}
|
||||
|
||||
func showProjectSelectionHelp(email string, projects []interfaces.GCPProjectProjects) {
|
||||
if email != "" {
|
||||
log.Infof("Your account %s needs to specify a project ID.", email)
|
||||
} else {
|
||||
log.Info("You need to specify a project ID.")
|
||||
}
|
||||
|
||||
if len(projects) > 0 {
|
||||
fmt.Println("========================================================================")
|
||||
for _, p := range projects {
|
||||
fmt.Printf("Project ID: %s\n", p.ProjectID)
|
||||
fmt.Printf("Project Name: %s\n", p.Name)
|
||||
fmt.Println("------------------------------------------------------------------------")
|
||||
}
|
||||
} else {
|
||||
fmt.Println("No active projects were returned for this account.")
|
||||
}
|
||||
|
||||
fmt.Printf("Please run this command to login again with a specific project:\n\n%s --login --project_id <project_id>\n", os.Args[0])
|
||||
}
|
||||
|
||||
func checkCloudAPIIsEnabled(ctx context.Context, httpClient *http.Client, projectID string) (bool, error) {
|
||||
serviceUsageURL := "https://serviceusage.googleapis.com"
|
||||
requiredServices := []string{
|
||||
// "geminicloudassist.googleapis.com", // Gemini Cloud Assist API
|
||||
"cloudaicompanion.googleapis.com", // Gemini for Google Cloud API
|
||||
}
|
||||
for _, service := range requiredServices {
|
||||
checkUrl := fmt.Sprintf("%s/v1/projects/%s/services/%s", serviceUsageURL, projectID, service)
|
||||
req, errRequest := http.NewRequestWithContext(ctx, http.MethodGet, checkUrl, nil)
|
||||
if errRequest != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return false, fmt.Errorf("failed to execute request: %w", errDo)
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
if gjson.GetBytes(bodyBytes, "state").String() == "ENABLED" {
|
||||
_ = resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
|
||||
enableUrl := fmt.Sprintf("%s/v1/projects/%s/services/%s:enable", serviceUsageURL, projectID, service)
|
||||
req, errRequest = http.NewRequestWithContext(ctx, http.MethodPost, enableUrl, strings.NewReader("{}"))
|
||||
if errRequest != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||
resp, errDo = httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return false, fmt.Errorf("failed to execute request: %w", errDo)
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
errMessage := string(bodyBytes)
|
||||
errMessageResult := gjson.GetBytes(bodyBytes, "error.message")
|
||||
if errMessageResult.Exists() {
|
||||
errMessage = errMessageResult.String()
|
||||
}
|
||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusCreated {
|
||||
_ = resp.Body.Close()
|
||||
continue
|
||||
} else if resp.StatusCode == http.StatusBadRequest {
|
||||
_ = resp.Body.Close()
|
||||
if strings.Contains(strings.ToLower(errMessage), "already enabled") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("project activation required: %s", errMessage)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func updateAuthRecord(record *cliproxyauth.Auth, storage *gemini.GeminiTokenStorage) {
|
||||
if record == nil || storage == nil {
|
||||
return
|
||||
}
|
||||
|
||||
finalName := fmt.Sprintf("%s-%s.json", storage.Email, storage.ProjectID)
|
||||
|
||||
if record.Metadata == nil {
|
||||
record.Metadata = make(map[string]any)
|
||||
}
|
||||
record.Metadata["email"] = storage.Email
|
||||
record.Metadata["project_id"] = storage.ProjectID
|
||||
record.Metadata["auto"] = storage.Auto
|
||||
record.Metadata["checked"] = storage.Checked
|
||||
|
||||
record.ID = finalName
|
||||
record.FileName = finalName
|
||||
record.Storage = storage
|
||||
}
|
||||
|
||||
@@ -53,3 +53,17 @@ func StartService(cfg *config.Config, configPath string, localPassword string) {
|
||||
log.Fatalf("proxy service exited with error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForCloudDeploy waits indefinitely for shutdown signals in cloud deploy mode
|
||||
// when no configuration file is available.
|
||||
func WaitForCloudDeploy() {
|
||||
// Clarify that we are intentionally idle for configuration and not running the API server.
|
||||
log.Info("Cloud deploy mode: No config found; standing by for configuration. API server is not started. Press Ctrl+C to exit.")
|
||||
|
||||
ctxSignal, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
// Block until shutdown signal is received
|
||||
<-ctxSignal.Done()
|
||||
log.Info("Cloud deploy mode: Shutdown signal received; exiting")
|
||||
}
|
||||
|
||||
@@ -5,15 +5,19 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Config represents the application's configuration, loaded from a YAML file.
|
||||
type Config struct {
|
||||
config.SDKConfig `yaml:",inline"`
|
||||
// Port is the network port on which the API server will listen.
|
||||
Port int `yaml:"port" json:"-"`
|
||||
|
||||
@@ -29,24 +33,12 @@ type Config struct {
|
||||
// UsageStatisticsEnabled toggles in-memory usage aggregation; when false, usage data is discarded.
|
||||
UsageStatisticsEnabled bool `yaml:"usage-statistics-enabled" json:"usage-statistics-enabled"`
|
||||
|
||||
// ProxyURL is the URL of an optional proxy server to use for outbound requests.
|
||||
ProxyURL string `yaml:"proxy-url" json:"proxy-url"`
|
||||
|
||||
// APIKeys is a list of keys for authenticating clients to this proxy server.
|
||||
APIKeys []string `yaml:"api-keys" json:"api-keys"`
|
||||
|
||||
// Access holds request authentication provider configuration.
|
||||
Access AccessConfig `yaml:"auth" json:"auth"`
|
||||
|
||||
// QuotaExceeded defines the behavior when a quota is exceeded.
|
||||
QuotaExceeded QuotaExceeded `yaml:"quota-exceeded" json:"quota-exceeded"`
|
||||
|
||||
// GlAPIKey is the API key for the generative language API.
|
||||
GlAPIKey []string `yaml:"generative-language-api-key" json:"generative-language-api-key"`
|
||||
|
||||
// RequestLog enables or disables detailed request logging functionality.
|
||||
RequestLog bool `yaml:"request-log" json:"request-log"`
|
||||
|
||||
// RequestRetry defines the retry times when the request failed.
|
||||
RequestRetry int `yaml:"request-retry" json:"request-retry"`
|
||||
|
||||
@@ -61,65 +53,6 @@ type Config struct {
|
||||
|
||||
// RemoteManagement nests management-related options under 'remote-management'.
|
||||
RemoteManagement RemoteManagement `yaml:"remote-management" json:"-"`
|
||||
|
||||
// GeminiWeb groups configuration for Gemini Web client
|
||||
GeminiWeb GeminiWebConfig `yaml:"gemini-web" json:"gemini-web"`
|
||||
}
|
||||
|
||||
// AccessConfig groups request authentication providers.
|
||||
type AccessConfig struct {
|
||||
// Providers lists configured authentication providers.
|
||||
Providers []AccessProvider `yaml:"providers" json:"providers"`
|
||||
}
|
||||
|
||||
// AccessProvider describes a request authentication provider entry.
|
||||
type AccessProvider struct {
|
||||
// Name is the instance identifier for the provider.
|
||||
Name string `yaml:"name" json:"name"`
|
||||
|
||||
// Type selects the provider implementation registered via the SDK.
|
||||
Type string `yaml:"type" json:"type"`
|
||||
|
||||
// SDK optionally names a third-party SDK module providing this provider.
|
||||
SDK string `yaml:"sdk,omitempty" json:"sdk,omitempty"`
|
||||
|
||||
// APIKeys lists inline keys for providers that require them.
|
||||
APIKeys []string `yaml:"api-keys,omitempty" json:"api-keys,omitempty"`
|
||||
|
||||
// Config passes provider-specific options to the implementation.
|
||||
Config map[string]any `yaml:"config,omitempty" json:"config,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
// AccessProviderTypeConfigAPIKey is the built-in provider validating inline API keys.
|
||||
AccessProviderTypeConfigAPIKey = "config-api-key"
|
||||
|
||||
// DefaultAccessProviderName is applied when no provider name is supplied.
|
||||
DefaultAccessProviderName = "config-inline"
|
||||
)
|
||||
|
||||
// GeminiWebConfig nests Gemini Web related options under 'gemini-web'.
|
||||
type GeminiWebConfig struct {
|
||||
// Context enables JSON-based conversation reuse.
|
||||
// Defaults to true if not set in YAML (see LoadConfig).
|
||||
Context bool `yaml:"context" json:"context"`
|
||||
|
||||
// CodeMode, when true, enables coding mode behaviors for Gemini Web:
|
||||
// - Attach the predefined "Coding partner" Gem
|
||||
// - Enable XML wrapping hint for tool markup
|
||||
// - Merge <think> content into visible content for tool-friendly output
|
||||
CodeMode bool `yaml:"code-mode" json:"code-mode"`
|
||||
|
||||
// MaxCharsPerRequest caps the number of characters (runes) sent to
|
||||
// Gemini Web in a single request. Long prompts will be split into
|
||||
// multiple requests with a continuation hint, and only the final
|
||||
// request will carry any files. When unset or <=0, a conservative
|
||||
// default of 1,000,000 will be used.
|
||||
MaxCharsPerRequest int `yaml:"max-chars-per-request" json:"max-chars-per-request"`
|
||||
|
||||
// DisableContinuationHint, when true, disables the continuation hint for split prompts.
|
||||
// The hint is enabled by default.
|
||||
DisableContinuationHint bool `yaml:"disable-continuation-hint,omitempty" json:"disable-continuation-hint,omitempty"`
|
||||
}
|
||||
|
||||
// RemoteManagement holds management API configuration under 'remote-management'.
|
||||
@@ -128,6 +61,8 @@ type RemoteManagement struct {
|
||||
AllowRemote bool `yaml:"allow-remote"`
|
||||
// SecretKey is the management key (plaintext or bcrypt hashed). YAML key intentionally 'secret-key'.
|
||||
SecretKey string `yaml:"secret-key"`
|
||||
// DisableControlPanel skips serving and syncing the bundled management UI when true.
|
||||
DisableControlPanel bool `yaml:"disable-control-panel"`
|
||||
}
|
||||
|
||||
// QuotaExceeded defines the behavior when API quota limits are exceeded.
|
||||
@@ -149,6 +84,9 @@ type ClaudeKey struct {
|
||||
// BaseURL is the base URL for the Claude API endpoint.
|
||||
// If empty, the default Claude API URL will be used.
|
||||
BaseURL string `yaml:"base-url" json:"base-url"`
|
||||
|
||||
// ProxyURL overrides the global proxy setting for this API key if provided.
|
||||
ProxyURL string `yaml:"proxy-url" json:"proxy-url"`
|
||||
}
|
||||
|
||||
// CodexKey represents the configuration for a Codex API key,
|
||||
@@ -160,6 +98,9 @@ type CodexKey struct {
|
||||
// BaseURL is the base URL for the Codex API endpoint.
|
||||
// If empty, the default Codex API URL will be used.
|
||||
BaseURL string `yaml:"base-url" json:"base-url"`
|
||||
|
||||
// ProxyURL overrides the global proxy setting for this API key if provided.
|
||||
ProxyURL string `yaml:"proxy-url" json:"proxy-url"`
|
||||
}
|
||||
|
||||
// OpenAICompatibility represents the configuration for OpenAI API compatibility
|
||||
@@ -172,12 +113,25 @@ type OpenAICompatibility struct {
|
||||
BaseURL string `yaml:"base-url" json:"base-url"`
|
||||
|
||||
// APIKeys are the authentication keys for accessing the external API services.
|
||||
APIKeys []string `yaml:"api-keys" json:"api-keys"`
|
||||
// Deprecated: Use APIKeyEntries instead to support per-key proxy configuration.
|
||||
APIKeys []string `yaml:"api-keys,omitempty" json:"api-keys,omitempty"`
|
||||
|
||||
// APIKeyEntries defines API keys with optional per-key proxy configuration.
|
||||
APIKeyEntries []OpenAICompatibilityAPIKey `yaml:"api-key-entries,omitempty" json:"api-key-entries,omitempty"`
|
||||
|
||||
// Models defines the model configurations including aliases for routing.
|
||||
Models []OpenAICompatibilityModel `yaml:"models" json:"models"`
|
||||
}
|
||||
|
||||
// OpenAICompatibilityAPIKey represents an API key configuration with optional proxy setting.
|
||||
type OpenAICompatibilityAPIKey struct {
|
||||
// APIKey is the authentication key for accessing the external API services.
|
||||
APIKey string `yaml:"api-key" json:"api-key"`
|
||||
|
||||
// ProxyURL overrides the global proxy setting for this API key if provided.
|
||||
ProxyURL string `yaml:"proxy-url,omitempty" json:"proxy-url,omitempty"`
|
||||
}
|
||||
|
||||
// OpenAICompatibilityModel represents a model configuration for OpenAI compatibility,
|
||||
// including the actual model name and its alias for API routing.
|
||||
type OpenAICompatibilityModel struct {
|
||||
@@ -199,30 +153,51 @@ type OpenAICompatibilityModel struct {
|
||||
// - *Config: The loaded configuration
|
||||
// - error: An error if the configuration could not be loaded
|
||||
func LoadConfig(configFile string) (*Config, error) {
|
||||
return LoadConfigOptional(configFile, false)
|
||||
}
|
||||
|
||||
// LoadConfigOptional reads YAML from configFile.
|
||||
// If optional is true and the file is missing, it returns an empty Config.
|
||||
// If optional is true and the file is empty or invalid, it returns an empty Config.
|
||||
func LoadConfigOptional(configFile string, optional bool) (*Config, error) {
|
||||
// Read the entire configuration file into memory.
|
||||
data, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
if optional {
|
||||
if os.IsNotExist(err) || errors.Is(err, syscall.EISDIR) {
|
||||
// Missing and optional: return empty config (cloud deploy standby).
|
||||
return &Config{}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
|
||||
// In cloud deploy mode (optional=true), if file is empty or contains only whitespace, return empty config.
|
||||
if optional && len(data) == 0 {
|
||||
return &Config{}, nil
|
||||
}
|
||||
|
||||
// Unmarshal the YAML data into the Config struct.
|
||||
var config Config
|
||||
var cfg Config
|
||||
// Set defaults before unmarshal so that absent keys keep defaults.
|
||||
config.LoggingToFile = true
|
||||
config.UsageStatisticsEnabled = true
|
||||
config.GeminiWeb.Context = true
|
||||
if err = yaml.Unmarshal(data, &config); err != nil {
|
||||
cfg.LoggingToFile = false
|
||||
cfg.UsageStatisticsEnabled = false
|
||||
if err = yaml.Unmarshal(data, &cfg); err != nil {
|
||||
if optional {
|
||||
// In cloud deploy mode, if YAML parsing fails, return empty config instead of error.
|
||||
return &Config{}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to parse config file: %w", err)
|
||||
}
|
||||
|
||||
// Hash remote management key if plaintext is detected (nested)
|
||||
// We consider a value to be already hashed if it looks like a bcrypt hash ($2a$, $2b$, or $2y$ prefix).
|
||||
if config.RemoteManagement.SecretKey != "" && !looksLikeBcrypt(config.RemoteManagement.SecretKey) {
|
||||
hashed, errHash := hashSecret(config.RemoteManagement.SecretKey)
|
||||
if cfg.RemoteManagement.SecretKey != "" && !looksLikeBcrypt(cfg.RemoteManagement.SecretKey) {
|
||||
hashed, errHash := hashSecret(cfg.RemoteManagement.SecretKey)
|
||||
if errHash != nil {
|
||||
return nil, fmt.Errorf("failed to hash remote management key: %w", errHash)
|
||||
}
|
||||
config.RemoteManagement.SecretKey = hashed
|
||||
cfg.RemoteManagement.SecretKey = hashed
|
||||
|
||||
// Persist the hashed value back to the config file to avoid re-hashing on next startup.
|
||||
// Preserve YAML comments and ordering; update only the nested key.
|
||||
@@ -230,80 +205,22 @@ func LoadConfig(configFile string) (*Config, error) {
|
||||
}
|
||||
|
||||
// Sync request authentication providers with inline API keys for backwards compatibility.
|
||||
syncInlineAccessProvider(&config)
|
||||
syncInlineAccessProvider(&cfg)
|
||||
|
||||
// Return the populated configuration struct.
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// SyncInlineAPIKeys updates the inline API key provider and top-level APIKeys field.
|
||||
func SyncInlineAPIKeys(cfg *Config, keys []string) {
|
||||
if cfg == nil {
|
||||
return
|
||||
}
|
||||
cloned := append([]string(nil), keys...)
|
||||
cfg.APIKeys = cloned
|
||||
if provider := cfg.ConfigAPIKeyProvider(); provider != nil {
|
||||
if provider.Name == "" {
|
||||
provider.Name = DefaultAccessProviderName
|
||||
}
|
||||
provider.APIKeys = cloned
|
||||
return
|
||||
}
|
||||
cfg.Access.Providers = append(cfg.Access.Providers, AccessProvider{
|
||||
Name: DefaultAccessProviderName,
|
||||
Type: AccessProviderTypeConfigAPIKey,
|
||||
APIKeys: cloned,
|
||||
})
|
||||
}
|
||||
|
||||
// ConfigAPIKeyProvider returns the first inline API key provider if present.
|
||||
func (c *Config) ConfigAPIKeyProvider() *AccessProvider {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
for i := range c.Access.Providers {
|
||||
if c.Access.Providers[i].Type == AccessProviderTypeConfigAPIKey {
|
||||
if c.Access.Providers[i].Name == "" {
|
||||
c.Access.Providers[i].Name = DefaultAccessProviderName
|
||||
}
|
||||
return &c.Access.Providers[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func syncInlineAccessProvider(cfg *Config) {
|
||||
if cfg == nil {
|
||||
return
|
||||
}
|
||||
if len(cfg.Access.Providers) == 0 {
|
||||
if len(cfg.APIKeys) == 0 {
|
||||
return
|
||||
if len(cfg.APIKeys) == 0 {
|
||||
if provider := cfg.ConfigAPIKeyProvider(); provider != nil && len(provider.APIKeys) > 0 {
|
||||
cfg.APIKeys = append([]string(nil), provider.APIKeys...)
|
||||
}
|
||||
cfg.Access.Providers = append(cfg.Access.Providers, AccessProvider{
|
||||
Name: DefaultAccessProviderName,
|
||||
Type: AccessProviderTypeConfigAPIKey,
|
||||
APIKeys: append([]string(nil), cfg.APIKeys...),
|
||||
})
|
||||
return
|
||||
}
|
||||
provider := cfg.ConfigAPIKeyProvider()
|
||||
if provider == nil {
|
||||
if len(cfg.APIKeys) == 0 {
|
||||
return
|
||||
}
|
||||
cfg.Access.Providers = append(cfg.Access.Providers, AccessProvider{
|
||||
Name: DefaultAccessProviderName,
|
||||
Type: AccessProviderTypeConfigAPIKey,
|
||||
APIKeys: append([]string(nil), cfg.APIKeys...),
|
||||
})
|
||||
return
|
||||
}
|
||||
if len(provider.APIKeys) == 0 && len(cfg.APIKeys) > 0 {
|
||||
provider.APIKeys = append([]string(nil), cfg.APIKeys...)
|
||||
}
|
||||
cfg.APIKeys = append([]string(nil), provider.APIKeys...)
|
||||
cfg.Access.Providers = nil
|
||||
}
|
||||
|
||||
// looksLikeBcrypt returns true if the provided string appears to be a bcrypt hash.
|
||||
@@ -324,6 +241,7 @@ func hashSecret(secret string) (string, error) {
|
||||
// SaveConfigPreserveComments writes the config back to YAML while preserving existing comments
|
||||
// and key ordering by loading the original file into a yaml.Node tree and updating values in-place.
|
||||
func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
||||
persistCfg := sanitizeConfigForPersist(cfg)
|
||||
// Load original YAML as a node tree to preserve comments and ordering.
|
||||
data, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
@@ -342,7 +260,7 @@ func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
||||
}
|
||||
|
||||
// Marshal the current cfg to YAML, then unmarshal to a yaml.Node we can merge from.
|
||||
rendered, err := yaml.Marshal(cfg)
|
||||
rendered, err := yaml.Marshal(persistCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -357,6 +275,9 @@ func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
||||
return fmt.Errorf("expected generated root mapping node")
|
||||
}
|
||||
|
||||
// Remove deprecated auth block before merging to avoid persisting it again.
|
||||
removeMapKey(original.Content[0], "auth")
|
||||
|
||||
// Merge generated into original in-place, preserving comments/order of existing nodes.
|
||||
mergeMappingPreserve(original.Content[0], generated.Content[0])
|
||||
|
||||
@@ -375,6 +296,16 @@ func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
||||
return enc.Close()
|
||||
}
|
||||
|
||||
func sanitizeConfigForPersist(cfg *Config) *Config {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
clone := *cfg
|
||||
clone.SDKConfig = cfg.SDKConfig
|
||||
clone.SDKConfig.Access = config.AccessConfig{}
|
||||
return &clone
|
||||
}
|
||||
|
||||
// SaveConfigPreserveCommentsUpdateNestedScalar updates a nested scalar key path like ["a","b"]
|
||||
// while preserving comments and positions.
|
||||
func SaveConfigPreserveCommentsUpdateNestedScalar(configFile string, path []string, value string) error {
|
||||
@@ -577,3 +508,15 @@ func copyNodeShallow(dst, src *yaml.Node) {
|
||||
dst.Content = nil
|
||||
}
|
||||
}
|
||||
|
||||
func removeMapKey(mapNode *yaml.Node, key string) {
|
||||
if mapNode == nil || mapNode.Kind != yaml.MappingNode || key == "" {
|
||||
return
|
||||
}
|
||||
for i := 0; i+1 < len(mapNode.Content); i += 2 {
|
||||
if mapNode.Content[i] != nil && mapNode.Content[i].Value == key {
|
||||
mapNode.Content = append(mapNode.Content[:i], mapNode.Content[i+2:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,9 +10,6 @@ const (
|
||||
// GeminiCLI represents the Google Gemini CLI provider identifier.
|
||||
GeminiCLI = "gemini-cli"
|
||||
|
||||
// GeminiWeb represents the Google Gemini Web provider identifier.
|
||||
GeminiWeb = "gemini-web"
|
||||
|
||||
// Codex represents the OpenAI Codex provider identifier.
|
||||
Codex = "codex"
|
||||
|
||||
|
||||
284
internal/managementasset/updater.go
Normal file
284
internal/managementasset/updater.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package managementasset
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
sdkconfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
managementReleaseURL = "https://api.github.com/repos/router-for-me/Cli-Proxy-API-Management-Center/releases/latest"
|
||||
managementAssetName = "management.html"
|
||||
httpUserAgent = "CLIProxyAPI-management-updater"
|
||||
updateCheckInterval = 3 * time.Hour
|
||||
)
|
||||
|
||||
// ManagementFileName exposes the control panel asset filename.
|
||||
const ManagementFileName = managementAssetName
|
||||
|
||||
var (
|
||||
lastUpdateCheckMu sync.Mutex
|
||||
lastUpdateCheckTime time.Time
|
||||
)
|
||||
|
||||
func newHTTPClient(proxyURL string) *http.Client {
|
||||
client := &http.Client{Timeout: 15 * time.Second}
|
||||
|
||||
sdkCfg := &sdkconfig.SDKConfig{ProxyURL: strings.TrimSpace(proxyURL)}
|
||||
util.SetProxy(sdkCfg, client)
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
type releaseAsset struct {
|
||||
Name string `json:"name"`
|
||||
BrowserDownloadURL string `json:"browser_download_url"`
|
||||
Digest string `json:"digest"`
|
||||
}
|
||||
|
||||
type releaseResponse struct {
|
||||
Assets []releaseAsset `json:"assets"`
|
||||
}
|
||||
|
||||
// StaticDir resolves the directory that stores the management control panel asset.
|
||||
func StaticDir(configFilePath string) string {
|
||||
configFilePath = strings.TrimSpace(configFilePath)
|
||||
if configFilePath == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
base := filepath.Dir(configFilePath)
|
||||
fileInfo, err := os.Stat(configFilePath)
|
||||
if err == nil {
|
||||
if fileInfo.IsDir() {
|
||||
base = configFilePath
|
||||
}
|
||||
}
|
||||
|
||||
return filepath.Join(base, "static")
|
||||
}
|
||||
|
||||
// FilePath resolves the absolute path to the management control panel asset.
|
||||
func FilePath(configFilePath string) string {
|
||||
dir := StaticDir(configFilePath)
|
||||
if dir == "" {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(dir, ManagementFileName)
|
||||
}
|
||||
|
||||
// EnsureLatestManagementHTML checks the latest management.html asset and updates the local copy when needed.
|
||||
// The function is designed to run in a background goroutine and will never panic.
|
||||
// It enforces a 3-hour rate limit to avoid frequent checks on config/auth file changes.
|
||||
func EnsureLatestManagementHTML(ctx context.Context, staticDir string, proxyURL string) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
staticDir = strings.TrimSpace(staticDir)
|
||||
if staticDir == "" {
|
||||
log.Debug("management asset sync skipped: empty static directory")
|
||||
return
|
||||
}
|
||||
|
||||
// Rate limiting: check only once every 3 hours
|
||||
lastUpdateCheckMu.Lock()
|
||||
now := time.Now()
|
||||
timeSinceLastCheck := now.Sub(lastUpdateCheckTime)
|
||||
if timeSinceLastCheck < updateCheckInterval {
|
||||
lastUpdateCheckMu.Unlock()
|
||||
log.Debugf("management asset update check skipped: last check was %v ago (interval: %v)", timeSinceLastCheck.Round(time.Second), updateCheckInterval)
|
||||
return
|
||||
}
|
||||
lastUpdateCheckTime = now
|
||||
lastUpdateCheckMu.Unlock()
|
||||
|
||||
if err := os.MkdirAll(staticDir, 0o755); err != nil {
|
||||
log.WithError(err).Warn("failed to prepare static directory for management asset")
|
||||
return
|
||||
}
|
||||
|
||||
client := newHTTPClient(proxyURL)
|
||||
|
||||
localPath := filepath.Join(staticDir, managementAssetName)
|
||||
localHash, err := fileSHA256(localPath)
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
log.WithError(err).Debug("failed to read local management asset hash")
|
||||
}
|
||||
localHash = ""
|
||||
}
|
||||
|
||||
asset, remoteHash, err := fetchLatestAsset(ctx, client)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("failed to fetch latest management release information")
|
||||
return
|
||||
}
|
||||
|
||||
if remoteHash != "" && localHash != "" && strings.EqualFold(remoteHash, localHash) {
|
||||
log.Debug("management asset is already up to date")
|
||||
return
|
||||
}
|
||||
|
||||
data, downloadedHash, err := downloadAsset(ctx, client, asset.BrowserDownloadURL)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("failed to download management asset")
|
||||
return
|
||||
}
|
||||
|
||||
if remoteHash != "" && !strings.EqualFold(remoteHash, downloadedHash) {
|
||||
log.Warnf("remote digest mismatch for management asset: expected %s got %s", remoteHash, downloadedHash)
|
||||
}
|
||||
|
||||
if err = atomicWriteFile(localPath, data); err != nil {
|
||||
log.WithError(err).Warn("failed to update management asset on disk")
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("management asset updated successfully (hash=%s)", downloadedHash)
|
||||
}
|
||||
|
||||
func fetchLatestAsset(ctx context.Context, client *http.Client) (*releaseAsset, string, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, managementReleaseURL, nil)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("create release request: %w", err)
|
||||
}
|
||||
req.Header.Set("Accept", "application/vnd.github+json")
|
||||
req.Header.Set("User-Agent", httpUserAgent)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("execute release request: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
return nil, "", fmt.Errorf("unexpected release status %d: %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
var release releaseResponse
|
||||
if err = json.NewDecoder(resp.Body).Decode(&release); err != nil {
|
||||
return nil, "", fmt.Errorf("decode release response: %w", err)
|
||||
}
|
||||
|
||||
for i := range release.Assets {
|
||||
asset := &release.Assets[i]
|
||||
if strings.EqualFold(asset.Name, managementAssetName) {
|
||||
remoteHash := parseDigest(asset.Digest)
|
||||
return asset, remoteHash, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, "", fmt.Errorf("management asset %s not found in latest release", managementAssetName)
|
||||
}
|
||||
|
||||
func downloadAsset(ctx context.Context, client *http.Client, downloadURL string) ([]byte, string, error) {
|
||||
if strings.TrimSpace(downloadURL) == "" {
|
||||
return nil, "", fmt.Errorf("empty download url")
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("create download request: %w", err)
|
||||
}
|
||||
req.Header.Set("User-Agent", httpUserAgent)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("execute download request: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
return nil, "", fmt.Errorf("unexpected download status %d: %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("read download body: %w", err)
|
||||
}
|
||||
|
||||
sum := sha256.Sum256(data)
|
||||
return data, hex.EncodeToString(sum[:]), nil
|
||||
}
|
||||
|
||||
func fileSHA256(path string) (string, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
}()
|
||||
|
||||
h := sha256.New()
|
||||
if _, err = io.Copy(h, file); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func atomicWriteFile(path string, data []byte) error {
|
||||
tmpFile, err := os.CreateTemp(filepath.Dir(path), "management-*.html")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmpName := tmpFile.Name()
|
||||
defer func() {
|
||||
_ = tmpFile.Close()
|
||||
_ = os.Remove(tmpName)
|
||||
}()
|
||||
|
||||
if _, err = tmpFile.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tmpFile.Chmod(0o644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = tmpFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.Rename(tmpName, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseDigest(digest string) string {
|
||||
digest = strings.TrimSpace(digest)
|
||||
if digest == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
if idx := strings.Index(digest, ":"); idx >= 0 {
|
||||
digest = digest[idx+1:]
|
||||
}
|
||||
|
||||
return strings.ToLower(strings.TrimSpace(digest))
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
40
internal/misc/copy-example-config.go
Normal file
40
internal/misc/copy-example-config.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package misc
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func CopyConfigTemplate(src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if errClose := in.Close(); errClose != nil {
|
||||
log.WithError(errClose).Warn("failed to close source config file")
|
||||
}
|
||||
}()
|
||||
|
||||
if err = os.MkdirAll(filepath.Dir(dst), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if errClose := out.Close(); errClose != nil {
|
||||
log.WithError(errClose).Warn("failed to close destination config file")
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err = io.Copy(out, in); err != nil {
|
||||
return err
|
||||
}
|
||||
return out.Sync()
|
||||
}
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var credentialSeparator = strings.Repeat("-", 70)
|
||||
// Separator used to visually group related log lines.
|
||||
var credentialSeparator = strings.Repeat("-", 67)
|
||||
|
||||
// LogSavingCredentials emits a consistent log message when persisting auth material.
|
||||
func LogSavingCredentials(path string) {
|
||||
@@ -21,5 +22,5 @@ func LogSavingCredentials(path string) {
|
||||
|
||||
// LogCredentialSeparator adds a visual separator to group auth/key processing logs.
|
||||
func LogCredentialSeparator() {
|
||||
log.Info(credentialSeparator)
|
||||
log.Debug(credentialSeparator)
|
||||
}
|
||||
|
||||
@@ -1,882 +0,0 @@
|
||||
package geminiwebapi
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GeminiClient is the async http client interface (Go port)
|
||||
type GeminiClient struct {
|
||||
Cookies map[string]string
|
||||
Proxy string
|
||||
Running bool
|
||||
httpClient *http.Client
|
||||
AccessToken string
|
||||
Timeout time.Duration
|
||||
insecure bool
|
||||
}
|
||||
|
||||
// HTTP bootstrap utilities -------------------------------------------------
|
||||
type httpOptions struct {
|
||||
ProxyURL string
|
||||
Insecure bool
|
||||
FollowRedirects bool
|
||||
}
|
||||
|
||||
func newHTTPClient(opts httpOptions) *http.Client {
|
||||
transport := &http.Transport{}
|
||||
if opts.ProxyURL != "" {
|
||||
if pu, err := url.Parse(opts.ProxyURL); err == nil {
|
||||
transport.Proxy = http.ProxyURL(pu)
|
||||
}
|
||||
}
|
||||
if opts.Insecure {
|
||||
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
}
|
||||
jar, _ := cookiejar.New(nil)
|
||||
client := &http.Client{Transport: transport, Timeout: 60 * time.Second, Jar: jar}
|
||||
if !opts.FollowRedirects {
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
func applyHeaders(req *http.Request, headers http.Header) {
|
||||
for k, v := range headers {
|
||||
for _, vv := range v {
|
||||
req.Header.Add(k, vv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func applyCookies(req *http.Request, cookies map[string]string) {
|
||||
for k, v := range cookies {
|
||||
req.AddCookie(&http.Cookie{Name: k, Value: v})
|
||||
}
|
||||
}
|
||||
|
||||
func sendInitRequest(cookies map[string]string, proxy string, insecure bool) (*http.Response, map[string]string, error) {
|
||||
client := newHTTPClient(httpOptions{ProxyURL: proxy, Insecure: insecure, FollowRedirects: true})
|
||||
req, _ := http.NewRequest(http.MethodGet, EndpointInit, nil)
|
||||
applyHeaders(req, HeadersGemini)
|
||||
applyCookies(req, cookies)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return resp, nil, &AuthError{Msg: resp.Status}
|
||||
}
|
||||
outCookies := map[string]string{}
|
||||
for _, c := range resp.Cookies() {
|
||||
outCookies[c.Name] = c.Value
|
||||
}
|
||||
for k, v := range cookies {
|
||||
outCookies[k] = v
|
||||
}
|
||||
return resp, outCookies, nil
|
||||
}
|
||||
|
||||
func getAccessToken(baseCookies map[string]string, proxy string, verbose bool, insecure bool) (string, map[string]string, error) {
|
||||
extraCookies := map[string]string{}
|
||||
{
|
||||
client := newHTTPClient(httpOptions{ProxyURL: proxy, Insecure: insecure, FollowRedirects: true})
|
||||
req, _ := http.NewRequest(http.MethodGet, EndpointGoogle, nil)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Debugf("priming google cookies failed: %v", err)
|
||||
}
|
||||
} else if resp != nil {
|
||||
if u, err := url.Parse(EndpointGoogle); err == nil {
|
||||
for _, c := range client.Jar.Cookies(u) {
|
||||
extraCookies[c.Name] = c.Value
|
||||
}
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
trySets := make([]map[string]string, 0, 8)
|
||||
|
||||
if v1, ok1 := baseCookies["__Secure-1PSID"]; ok1 {
|
||||
if v2, ok2 := baseCookies["__Secure-1PSIDTS"]; ok2 {
|
||||
merged := map[string]string{"__Secure-1PSID": v1, "__Secure-1PSIDTS": v2}
|
||||
if nid, ok := baseCookies["NID"]; ok {
|
||||
merged["NID"] = nid
|
||||
}
|
||||
trySets = append(trySets, merged)
|
||||
} else if verbose {
|
||||
log.Debug("Skipping base cookies: __Secure-1PSIDTS missing")
|
||||
}
|
||||
}
|
||||
|
||||
if len(extraCookies) > 0 {
|
||||
trySets = append(trySets, extraCookies)
|
||||
}
|
||||
|
||||
reToken := regexp.MustCompile(`"SNlM0e":"([^"]+)"`)
|
||||
|
||||
for _, cookies := range trySets {
|
||||
resp, mergedCookies, err := sendInitRequest(cookies, proxy, insecure)
|
||||
if err != nil {
|
||||
if verbose {
|
||||
log.Warnf("Failed init request: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
matches := reToken.FindStringSubmatch(string(body))
|
||||
if len(matches) >= 2 {
|
||||
token := matches[1]
|
||||
if verbose {
|
||||
fmt.Println("Gemini access token acquired.")
|
||||
}
|
||||
return token, mergedCookies, nil
|
||||
}
|
||||
}
|
||||
return "", nil, &AuthError{Msg: "Failed to retrieve token."}
|
||||
}
|
||||
|
||||
func rotate1PSIDTS(cookies map[string]string, proxy string, insecure bool) (string, error) {
|
||||
_, ok := cookies["__Secure-1PSID"]
|
||||
if !ok {
|
||||
return "", &AuthError{Msg: "__Secure-1PSID missing"}
|
||||
}
|
||||
|
||||
// Reuse shared HTTP client helper for consistency.
|
||||
client := newHTTPClient(httpOptions{ProxyURL: proxy, Insecure: insecure, FollowRedirects: true})
|
||||
|
||||
req, _ := http.NewRequest(http.MethodPost, EndpointRotateCookies, strings.NewReader("[000,\"-0000000000000000000\"]"))
|
||||
applyHeaders(req, HeadersRotateCookies)
|
||||
applyCookies(req, cookies)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
return "", &AuthError{Msg: "unauthorized"}
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return "", errors.New(resp.Status)
|
||||
}
|
||||
|
||||
for _, c := range resp.Cookies() {
|
||||
if c.Name == "__Secure-1PSIDTS" {
|
||||
return c.Value, nil
|
||||
}
|
||||
}
|
||||
// Fallback: check cookie jar in case the Set-Cookie was on a redirect hop
|
||||
if u, err := url.Parse(EndpointRotateCookies); err == nil && client.Jar != nil {
|
||||
for _, c := range client.Jar.Cookies(u) {
|
||||
if c.Name == "__Secure-1PSIDTS" && c.Value != "" {
|
||||
return c.Value, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// MaskToken28 masks a sensitive token for safe logging. Keep middle partially visible.
|
||||
func MaskToken28(s string) string {
|
||||
n := len(s)
|
||||
if n == 0 {
|
||||
return ""
|
||||
}
|
||||
if n < 20 {
|
||||
return strings.Repeat("*", n)
|
||||
}
|
||||
midStart := n/2 - 2
|
||||
if midStart < 8 {
|
||||
midStart = 8
|
||||
}
|
||||
if midStart+4 > n-8 {
|
||||
midStart = n - 8 - 4
|
||||
if midStart < 8 {
|
||||
midStart = 8
|
||||
}
|
||||
}
|
||||
prefixByte := s[:8]
|
||||
middle := s[midStart : midStart+4]
|
||||
suffix := s[n-8:]
|
||||
return prefixByte + strings.Repeat("*", 4) + middle + strings.Repeat("*", 4) + suffix
|
||||
}
|
||||
|
||||
var NanoBananaModel = map[string]struct{}{
|
||||
"gemini-2.5-flash-image-preview": {},
|
||||
}
|
||||
|
||||
// NewGeminiClient creates a client. Pass empty strings to auto-detect via browser cookies (not implemented in Go port).
|
||||
func NewGeminiClient(secure1psid string, secure1psidts string, proxy string, opts ...func(*GeminiClient)) *GeminiClient {
|
||||
c := &GeminiClient{
|
||||
Cookies: map[string]string{},
|
||||
Proxy: proxy,
|
||||
Running: false,
|
||||
Timeout: 300 * time.Second,
|
||||
insecure: false,
|
||||
}
|
||||
if secure1psid != "" {
|
||||
c.Cookies["__Secure-1PSID"] = secure1psid
|
||||
if secure1psidts != "" {
|
||||
c.Cookies["__Secure-1PSIDTS"] = secure1psidts
|
||||
}
|
||||
}
|
||||
for _, f := range opts {
|
||||
f(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// WithInsecureTLS sets skipping TLS verification (to mirror httpx verify=False)
|
||||
func WithInsecureTLS(insecure bool) func(*GeminiClient) {
|
||||
return func(c *GeminiClient) { c.insecure = insecure }
|
||||
}
|
||||
|
||||
// Init initializes the access token and http client.
|
||||
func (c *GeminiClient) Init(timeoutSec float64, verbose bool) error {
|
||||
// get access token
|
||||
token, validCookies, err := getAccessToken(c.Cookies, c.Proxy, verbose, c.insecure)
|
||||
if err != nil {
|
||||
c.Close(0)
|
||||
return err
|
||||
}
|
||||
c.AccessToken = token
|
||||
c.Cookies = validCookies
|
||||
|
||||
tr := &http.Transport{}
|
||||
if c.Proxy != "" {
|
||||
if pu, errParse := url.Parse(c.Proxy); errParse == nil {
|
||||
tr.Proxy = http.ProxyURL(pu)
|
||||
}
|
||||
}
|
||||
if c.insecure {
|
||||
// set via roundtripper in utils_get_access_token for token; here we reuse via default Transport
|
||||
// intentionally not adding here, as requests rely on endpoints with normal TLS
|
||||
}
|
||||
c.httpClient = &http.Client{Transport: tr, Timeout: time.Duration(timeoutSec * float64(time.Second))}
|
||||
c.Running = true
|
||||
|
||||
c.Timeout = time.Duration(timeoutSec * float64(time.Second))
|
||||
if verbose {
|
||||
fmt.Println("Gemini client initialized successfully.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *GeminiClient) Close(delaySec float64) {
|
||||
if delaySec > 0 {
|
||||
time.Sleep(time.Duration(delaySec * float64(time.Second)))
|
||||
}
|
||||
c.Running = false
|
||||
}
|
||||
|
||||
// ensureRunning mirrors the decorator behavior and retries on APIError.
|
||||
func (c *GeminiClient) ensureRunning() error {
|
||||
if c.Running {
|
||||
return nil
|
||||
}
|
||||
return c.Init(float64(c.Timeout/time.Second), false)
|
||||
}
|
||||
|
||||
// RotateTS performs a RotateCookies request and returns the new __Secure-1PSIDTS value (if any).
|
||||
func (c *GeminiClient) RotateTS() (string, error) {
|
||||
if c == nil {
|
||||
return "", fmt.Errorf("gemini web client is nil")
|
||||
}
|
||||
return rotate1PSIDTS(c.Cookies, c.Proxy, c.insecure)
|
||||
}
|
||||
|
||||
// GenerateContent sends a prompt (with optional files) and parses the response into ModelOutput.
|
||||
func (c *GeminiClient) GenerateContent(prompt string, files []string, model Model, gem *Gem, chat *ChatSession) (ModelOutput, error) {
|
||||
var empty ModelOutput
|
||||
if prompt == "" {
|
||||
return empty, &ValueError{Msg: "Prompt cannot be empty."}
|
||||
}
|
||||
if err := c.ensureRunning(); err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
// Retry wrapper similar to decorator (retry=2)
|
||||
retries := 2
|
||||
for {
|
||||
out, err := c.generateOnce(prompt, files, model, gem, chat)
|
||||
if err == nil {
|
||||
return out, nil
|
||||
}
|
||||
var apiErr *APIError
|
||||
var imgErr *ImageGenerationError
|
||||
shouldRetry := false
|
||||
if errors.As(err, &imgErr) {
|
||||
if retries > 1 {
|
||||
retries = 1
|
||||
} // only once for image generation
|
||||
shouldRetry = true
|
||||
} else if errors.As(err, &apiErr) {
|
||||
shouldRetry = true
|
||||
}
|
||||
if shouldRetry && retries > 0 {
|
||||
time.Sleep(time.Second)
|
||||
retries--
|
||||
continue
|
||||
}
|
||||
return empty, err
|
||||
}
|
||||
}
|
||||
|
||||
func ensureAnyLen(slice []any, index int) []any {
|
||||
if index < len(slice) {
|
||||
return slice
|
||||
}
|
||||
gap := index + 1 - len(slice)
|
||||
return append(slice, make([]any, gap)...)
|
||||
}
|
||||
|
||||
func (c *GeminiClient) generateOnce(prompt string, files []string, model Model, gem *Gem, chat *ChatSession) (ModelOutput, error) {
|
||||
var empty ModelOutput
|
||||
// Build f.req
|
||||
var uploaded [][]any
|
||||
for _, fp := range files {
|
||||
id, err := uploadFile(fp, c.Proxy, c.insecure)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
name, err := parseFileName(fp)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
uploaded = append(uploaded, []any{[]any{id}, name})
|
||||
}
|
||||
var item0 any
|
||||
if len(uploaded) > 0 {
|
||||
item0 = []any{prompt, 0, nil, uploaded}
|
||||
} else {
|
||||
item0 = []any{prompt}
|
||||
}
|
||||
var item2 any = nil
|
||||
if chat != nil {
|
||||
item2 = chat.Metadata()
|
||||
}
|
||||
|
||||
inner := []any{item0, nil, item2}
|
||||
requestedModel := strings.ToLower(model.Name)
|
||||
if chat != nil && chat.RequestedModel() != "" {
|
||||
requestedModel = chat.RequestedModel()
|
||||
}
|
||||
if _, ok := NanoBananaModel[requestedModel]; ok {
|
||||
inner = ensureAnyLen(inner, 49)
|
||||
inner[49] = 14
|
||||
}
|
||||
if gem != nil {
|
||||
// pad with 16 nils then gem ID
|
||||
for i := 0; i < 16; i++ {
|
||||
inner = append(inner, nil)
|
||||
}
|
||||
inner = append(inner, gem.ID)
|
||||
}
|
||||
innerJSON, _ := json.Marshal(inner)
|
||||
outer := []any{nil, string(innerJSON)}
|
||||
outerJSON, _ := json.Marshal(outer)
|
||||
|
||||
// form
|
||||
form := url.Values{}
|
||||
form.Set("at", c.AccessToken)
|
||||
form.Set("f.req", string(outerJSON))
|
||||
|
||||
req, _ := http.NewRequest(http.MethodPost, EndpointGenerate, strings.NewReader(form.Encode()))
|
||||
applyHeaders(req, HeadersGemini)
|
||||
applyHeaders(req, model.ModelHeader)
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded;charset=utf-8")
|
||||
applyCookies(req, c.Cookies)
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return empty, &TimeoutError{GeminiError{Msg: "Generate content request timed out."}}
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
|
||||
if resp.StatusCode == 429 {
|
||||
// Surface 429 as TemporarilyBlocked to match reference behavior
|
||||
c.Close(0)
|
||||
return empty, &TemporarilyBlocked{GeminiError{Msg: "Too many requests. IP temporarily blocked."}}
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
c.Close(0)
|
||||
return empty, &APIError{Msg: fmt.Sprintf("Failed to generate contents. Status %d", resp.StatusCode)}
|
||||
}
|
||||
|
||||
// Read body and split lines; take the 3rd line (index 2)
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
parts := strings.Split(string(b), "\n")
|
||||
if len(parts) < 3 {
|
||||
c.Close(0)
|
||||
return empty, &APIError{Msg: "Invalid response data received."}
|
||||
}
|
||||
var responseJSON []any
|
||||
if err = json.Unmarshal([]byte(parts[2]), &responseJSON); err != nil {
|
||||
c.Close(0)
|
||||
return empty, &APIError{Msg: "Invalid response data received."}
|
||||
}
|
||||
|
||||
// find body where main_part[4] exists
|
||||
var (
|
||||
body any
|
||||
bodyIndex int
|
||||
)
|
||||
for i, p := range responseJSON {
|
||||
arr, ok := p.([]any)
|
||||
if !ok || len(arr) < 3 {
|
||||
continue
|
||||
}
|
||||
s, ok := arr[2].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
var mainPart []any
|
||||
if err = json.Unmarshal([]byte(s), &mainPart); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(mainPart) > 4 && mainPart[4] != nil {
|
||||
body = mainPart
|
||||
bodyIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if body == nil {
|
||||
// Fallback: scan subsequent lines to locate a data frame with a non-empty body (mainPart[4]).
|
||||
var lastTop []any
|
||||
for li := 3; li < len(parts) && body == nil; li++ {
|
||||
line := strings.TrimSpace(parts[li])
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
var top []any
|
||||
if err = json.Unmarshal([]byte(line), &top); err != nil {
|
||||
continue
|
||||
}
|
||||
lastTop = top
|
||||
for i, p := range top {
|
||||
arr, ok := p.([]any)
|
||||
if !ok || len(arr) < 3 {
|
||||
continue
|
||||
}
|
||||
s, ok := arr[2].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
var mainPart []any
|
||||
if err = json.Unmarshal([]byte(s), &mainPart); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(mainPart) > 4 && mainPart[4] != nil {
|
||||
body = mainPart
|
||||
bodyIndex = i
|
||||
responseJSON = top
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// Parse nested error code to align with error mapping
|
||||
var top []any
|
||||
// Prefer lastTop from fallback scan; otherwise try parts[2]
|
||||
if len(lastTop) > 0 {
|
||||
top = lastTop
|
||||
} else {
|
||||
_ = json.Unmarshal([]byte(parts[2]), &top)
|
||||
}
|
||||
if len(top) > 0 {
|
||||
if code, ok := extractErrorCode(top); ok {
|
||||
switch code {
|
||||
case ErrorUsageLimitExceeded:
|
||||
return empty, &UsageLimitExceeded{GeminiError{Msg: fmt.Sprintf("Failed to generate contents. Usage limit of %s has exceeded. Please try switching to another model.", model.Name)}}
|
||||
case ErrorModelInconsistent:
|
||||
return empty, &ModelInvalid{GeminiError{Msg: "Selected model is inconsistent or unavailable."}}
|
||||
case ErrorModelHeaderInvalid:
|
||||
return empty, &APIError{Msg: "Invalid model header string. Please update the selected model header."}
|
||||
case ErrorIPTemporarilyBlocked:
|
||||
return empty, &TemporarilyBlocked{GeminiError{Msg: "Too many requests. IP temporarily blocked."}}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Debug("Invalid response: control frames only; no body found")
|
||||
// Close the client to force re-initialization on next request (parity with reference client behavior)
|
||||
c.Close(0)
|
||||
return empty, &APIError{Msg: "Failed to generate contents. Invalid response data received."}
|
||||
}
|
||||
|
||||
bodyArr := body.([]any)
|
||||
// metadata
|
||||
var metadata []string
|
||||
if len(bodyArr) > 1 {
|
||||
if metaArr, ok := bodyArr[1].([]any); ok {
|
||||
for _, v := range metaArr {
|
||||
if s, isOk := v.(string); isOk {
|
||||
metadata = append(metadata, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// candidates parsing
|
||||
candContainer, ok := bodyArr[4].([]any)
|
||||
if !ok {
|
||||
return empty, &APIError{Msg: "Failed to parse response body."}
|
||||
}
|
||||
candidates := make([]Candidate, 0, len(candContainer))
|
||||
reCard := regexp.MustCompile(`^http://googleusercontent\.com/card_content/\d+`)
|
||||
reGen := regexp.MustCompile(`http://googleusercontent\.com/image_generation_content/\d+`)
|
||||
|
||||
for ci, candAny := range candContainer {
|
||||
cArr, isOk := candAny.([]any)
|
||||
if !isOk {
|
||||
continue
|
||||
}
|
||||
// text: cArr[1][0]
|
||||
var text string
|
||||
if len(cArr) > 1 {
|
||||
if sArr, isOk1 := cArr[1].([]any); isOk1 && len(sArr) > 0 {
|
||||
text, _ = sArr[0].(string)
|
||||
}
|
||||
}
|
||||
if reCard.MatchString(text) {
|
||||
// candidate[22] and candidate[22][0] or text
|
||||
if len(cArr) > 22 {
|
||||
if arr, isOk1 := cArr[22].([]any); isOk1 && len(arr) > 0 {
|
||||
if s, isOk2 := arr[0].(string); isOk2 {
|
||||
text = s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// thoughts: candidate[37][0][0]
|
||||
var thoughts *string
|
||||
if len(cArr) > 37 {
|
||||
if a, ok1 := cArr[37].([]any); ok1 && len(a) > 0 {
|
||||
if b1, ok2 := a[0].([]any); ok2 && len(b1) > 0 {
|
||||
if s, ok3 := b1[0].(string); ok3 {
|
||||
ss := decodeHTML(s)
|
||||
thoughts = &ss
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// web images: candidate[12][1]
|
||||
var webImages []WebImage
|
||||
var imgSection any
|
||||
if len(cArr) > 12 {
|
||||
imgSection = cArr[12]
|
||||
}
|
||||
if arr, ok1 := imgSection.([]any); ok1 && len(arr) > 1 {
|
||||
if imagesArr, ok2 := arr[1].([]any); ok2 {
|
||||
for _, wiAny := range imagesArr {
|
||||
wiArr, ok3 := wiAny.([]any)
|
||||
if !ok3 {
|
||||
continue
|
||||
}
|
||||
// url: wiArr[0][0][0], title: wiArr[7][0], alt: wiArr[0][4]
|
||||
var urlStr, title, alt string
|
||||
if len(wiArr) > 0 {
|
||||
if a, ok5 := wiArr[0].([]any); ok5 && len(a) > 0 {
|
||||
if b1, ok6 := a[0].([]any); ok6 && len(b1) > 0 {
|
||||
urlStr, _ = b1[0].(string)
|
||||
}
|
||||
if len(a) > 4 {
|
||||
if s, ok6 := a[4].(string); ok6 {
|
||||
alt = s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(wiArr) > 7 {
|
||||
if a, ok4 := wiArr[7].([]any); ok4 && len(a) > 0 {
|
||||
title, _ = a[0].(string)
|
||||
}
|
||||
}
|
||||
webImages = append(webImages, WebImage{Image: Image{URL: urlStr, Title: title, Alt: alt, Proxy: c.Proxy}})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// generated images
|
||||
var genImages []GeneratedImage
|
||||
hasGen := false
|
||||
if arr, ok1 := imgSection.([]any); ok1 && len(arr) > 7 {
|
||||
if a, ok2 := arr[7].([]any); ok2 && len(a) > 0 && a[0] != nil {
|
||||
hasGen = true
|
||||
}
|
||||
}
|
||||
if hasGen {
|
||||
// find img part
|
||||
var imgBody []any
|
||||
for pi := bodyIndex; pi < len(responseJSON); pi++ {
|
||||
part := responseJSON[pi]
|
||||
arr, ok1 := part.([]any)
|
||||
if !ok1 || len(arr) < 3 {
|
||||
continue
|
||||
}
|
||||
s, ok1 := arr[2].(string)
|
||||
if !ok1 {
|
||||
continue
|
||||
}
|
||||
var mp []any
|
||||
if err = json.Unmarshal([]byte(s), &mp); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(mp) > 4 {
|
||||
if tt, ok2 := mp[4].([]any); ok2 && len(tt) > ci {
|
||||
if sec, ok3 := tt[ci].([]any); ok3 && len(sec) > 12 {
|
||||
if ss, ok4 := sec[12].([]any); ok4 && len(ss) > 7 {
|
||||
if first, ok5 := ss[7].([]any); ok5 && len(first) > 0 && first[0] != nil {
|
||||
imgBody = mp
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if imgBody == nil {
|
||||
return empty, &ImageGenerationError{APIError{Msg: "Failed to parse generated images."}}
|
||||
}
|
||||
imgCand := imgBody[4].([]any)[ci].([]any)
|
||||
if len(imgCand) > 1 {
|
||||
if a, ok1 := imgCand[1].([]any); ok1 && len(a) > 0 {
|
||||
if s, ok2 := a[0].(string); ok2 {
|
||||
text = strings.TrimSpace(reGen.ReplaceAllString(s, ""))
|
||||
}
|
||||
}
|
||||
}
|
||||
// images list at imgCand[12][7][0]
|
||||
if len(imgCand) > 12 {
|
||||
if s1, ok1 := imgCand[12].([]any); ok1 && len(s1) > 7 {
|
||||
if s2, ok2 := s1[7].([]any); ok2 && len(s2) > 0 {
|
||||
if s3, ok3 := s2[0].([]any); ok3 {
|
||||
for ii, giAny := range s3 {
|
||||
ga, ok4 := giAny.([]any)
|
||||
if !ok4 || len(ga) < 4 {
|
||||
continue
|
||||
}
|
||||
// url: ga[0][3][3]
|
||||
var urlStr, title, alt string
|
||||
if a, ok5 := ga[0].([]any); ok5 && len(a) > 3 {
|
||||
if b1, ok6 := a[3].([]any); ok6 && len(b1) > 3 {
|
||||
urlStr, _ = b1[3].(string)
|
||||
}
|
||||
}
|
||||
// title from ga[3][6]
|
||||
if len(ga) > 3 {
|
||||
if a, ok5 := ga[3].([]any); ok5 {
|
||||
if len(a) > 6 {
|
||||
if v, ok6 := a[6].(float64); ok6 && v != 0 {
|
||||
title = fmt.Sprintf("[Generated Image %.0f]", v)
|
||||
} else {
|
||||
title = "[Generated Image]"
|
||||
}
|
||||
} else {
|
||||
title = "[Generated Image]"
|
||||
}
|
||||
// alt from ga[3][5][ii] fallback
|
||||
if len(a) > 5 {
|
||||
if tt, ok6 := a[5].([]any); ok6 {
|
||||
if ii < len(tt) {
|
||||
if s, ok7 := tt[ii].(string); ok7 {
|
||||
alt = s
|
||||
}
|
||||
} else if len(tt) > 0 {
|
||||
if s, ok7 := tt[0].(string); ok7 {
|
||||
alt = s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
genImages = append(genImages, GeneratedImage{Image: Image{URL: urlStr, Title: title, Alt: alt, Proxy: c.Proxy}, Cookies: c.Cookies})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cand := Candidate{
|
||||
RCID: fmt.Sprintf("%v", cArr[0]),
|
||||
Text: decodeHTML(text),
|
||||
Thoughts: thoughts,
|
||||
WebImages: webImages,
|
||||
GeneratedImages: genImages,
|
||||
}
|
||||
candidates = append(candidates, cand)
|
||||
}
|
||||
|
||||
if len(candidates) == 0 {
|
||||
return empty, &GeminiError{Msg: "Failed to generate contents. No output data found in response."}
|
||||
}
|
||||
output := ModelOutput{Metadata: metadata, Candidates: candidates, Chosen: 0}
|
||||
if chat != nil {
|
||||
chat.lastOutput = &output
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// extractErrorCode attempts to navigate the known nested error structure and fetch the integer code.
|
||||
// Mirrors reference path: response_json[0][5][2][0][1][0]
|
||||
func extractErrorCode(top []any) (int, bool) {
|
||||
if len(top) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
a, ok := top[0].([]any)
|
||||
if !ok || len(a) <= 5 {
|
||||
return 0, false
|
||||
}
|
||||
b, ok := a[5].([]any)
|
||||
if !ok || len(b) <= 2 {
|
||||
return 0, false
|
||||
}
|
||||
c, ok := b[2].([]any)
|
||||
if !ok || len(c) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
d, ok := c[0].([]any)
|
||||
if !ok || len(d) <= 1 {
|
||||
return 0, false
|
||||
}
|
||||
e, ok := d[1].([]any)
|
||||
if !ok || len(e) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
f, ok := e[0].(float64)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return int(f), true
|
||||
}
|
||||
|
||||
// StartChat returns a ChatSession attached to the client
|
||||
func (c *GeminiClient) StartChat(model Model, gem *Gem, metadata []string) *ChatSession {
|
||||
return &ChatSession{client: c, metadata: normalizeMeta(metadata), model: model, gem: gem, requestedModel: strings.ToLower(model.Name)}
|
||||
}
|
||||
|
||||
// ChatSession holds conversation metadata
|
||||
type ChatSession struct {
|
||||
client *GeminiClient
|
||||
metadata []string // cid, rid, rcid
|
||||
lastOutput *ModelOutput
|
||||
model Model
|
||||
gem *Gem
|
||||
requestedModel string
|
||||
}
|
||||
|
||||
func (cs *ChatSession) String() string {
|
||||
var cid, rid, rcid string
|
||||
if len(cs.metadata) > 0 {
|
||||
cid = cs.metadata[0]
|
||||
}
|
||||
if len(cs.metadata) > 1 {
|
||||
rid = cs.metadata[1]
|
||||
}
|
||||
if len(cs.metadata) > 2 {
|
||||
rcid = cs.metadata[2]
|
||||
}
|
||||
return fmt.Sprintf("ChatSession(cid='%s', rid='%s', rcid='%s')", cid, rid, rcid)
|
||||
}
|
||||
|
||||
func normalizeMeta(v []string) []string {
|
||||
out := []string{"", "", ""}
|
||||
for i := 0; i < len(v) && i < 3; i++ {
|
||||
out[i] = v[i]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (cs *ChatSession) Metadata() []string { return cs.metadata }
|
||||
func (cs *ChatSession) SetMetadata(v []string) { cs.metadata = normalizeMeta(v) }
|
||||
func (cs *ChatSession) RequestedModel() string { return cs.requestedModel }
|
||||
func (cs *ChatSession) SetRequestedModel(name string) {
|
||||
cs.requestedModel = strings.ToLower(name)
|
||||
}
|
||||
func (cs *ChatSession) CID() string {
|
||||
if len(cs.metadata) > 0 {
|
||||
return cs.metadata[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (cs *ChatSession) RID() string {
|
||||
if len(cs.metadata) > 1 {
|
||||
return cs.metadata[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (cs *ChatSession) RCID() string {
|
||||
if len(cs.metadata) > 2 {
|
||||
return cs.metadata[2]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (cs *ChatSession) setCID(v string) {
|
||||
if len(cs.metadata) < 1 {
|
||||
cs.metadata = normalizeMeta(cs.metadata)
|
||||
}
|
||||
cs.metadata[0] = v
|
||||
}
|
||||
func (cs *ChatSession) setRID(v string) {
|
||||
if len(cs.metadata) < 2 {
|
||||
cs.metadata = normalizeMeta(cs.metadata)
|
||||
}
|
||||
cs.metadata[1] = v
|
||||
}
|
||||
func (cs *ChatSession) setRCID(v string) {
|
||||
if len(cs.metadata) < 3 {
|
||||
cs.metadata = normalizeMeta(cs.metadata)
|
||||
}
|
||||
cs.metadata[2] = v
|
||||
}
|
||||
|
||||
// SendMessage shortcut to client's GenerateContent
|
||||
func (cs *ChatSession) SendMessage(prompt string, files []string) (ModelOutput, error) {
|
||||
out, err := cs.client.GenerateContent(prompt, files, cs.model, cs.gem, cs)
|
||||
if err == nil {
|
||||
cs.lastOutput = &out
|
||||
cs.SetMetadata(out.Metadata)
|
||||
cs.setRCID(out.RCID())
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// ChooseCandidate selects a candidate from last output and updates rcid
|
||||
func (cs *ChatSession) ChooseCandidate(index int) (ModelOutput, error) {
|
||||
if cs.lastOutput == nil {
|
||||
return ModelOutput{}, &ValueError{Msg: "No previous output data found in this chat session."}
|
||||
}
|
||||
if index >= len(cs.lastOutput.Candidates) {
|
||||
return ModelOutput{}, &ValueError{Msg: fmt.Sprintf("Index %d exceeds candidates", index)}
|
||||
}
|
||||
cs.lastOutput.Chosen = index
|
||||
cs.setRCID(cs.lastOutput.RCID())
|
||||
return *cs.lastOutput, nil
|
||||
}
|
||||
@@ -1,542 +0,0 @@
|
||||
package geminiwebapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
// Image helpers ------------------------------------------------------------
|
||||
|
||||
type Image struct {
|
||||
URL string
|
||||
Title string
|
||||
Alt string
|
||||
Proxy string
|
||||
}
|
||||
|
||||
func (i Image) String() string {
|
||||
short := i.URL
|
||||
if len(short) > 20 {
|
||||
short = short[:8] + "..." + short[len(short)-12:]
|
||||
}
|
||||
return fmt.Sprintf("Image(title='%s', alt='%s', url='%s')", i.Title, i.Alt, short)
|
||||
}
|
||||
|
||||
func (i Image) Save(path string, filename string, cookies map[string]string, verbose bool, skipInvalidFilename bool, insecure bool) (string, error) {
|
||||
if filename == "" {
|
||||
// Try to parse filename from URL.
|
||||
u := i.URL
|
||||
if p := strings.Split(u, "/"); len(p) > 0 {
|
||||
filename = p[len(p)-1]
|
||||
}
|
||||
if q := strings.Split(filename, "?"); len(q) > 0 {
|
||||
filename = q[0]
|
||||
}
|
||||
}
|
||||
// Regex validation (pattern: ^(.*\.\w+)) to extract name with extension.
|
||||
if filename != "" {
|
||||
re := regexp.MustCompile(`^(.*\.\w+)`)
|
||||
if m := re.FindStringSubmatch(filename); len(m) >= 2 {
|
||||
filename = m[1]
|
||||
} else {
|
||||
if verbose {
|
||||
log.Warnf("Invalid filename: %s", filename)
|
||||
}
|
||||
if skipInvalidFilename {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// Build client using shared helper to keep proxy/TLS behavior consistent.
|
||||
client := newHTTPClient(httpOptions{ProxyURL: i.Proxy, Insecure: insecure, FollowRedirects: true})
|
||||
client.Timeout = 120 * time.Second
|
||||
|
||||
// Helper to set raw Cookie header using provided cookies (parity with the reference client behavior).
|
||||
buildCookieHeader := func(m map[string]string) string {
|
||||
if len(m) == 0 {
|
||||
return ""
|
||||
}
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
parts := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
parts = append(parts, fmt.Sprintf("%s=%s", k, m[k]))
|
||||
}
|
||||
return strings.Join(parts, "; ")
|
||||
}
|
||||
rawCookie := buildCookieHeader(cookies)
|
||||
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
// Ensure provided cookies are always sent across redirects (domain-agnostic).
|
||||
if rawCookie != "" {
|
||||
req.Header.Set("Cookie", rawCookie)
|
||||
}
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, i.URL, nil)
|
||||
if rawCookie != "" {
|
||||
req.Header.Set("Cookie", rawCookie)
|
||||
}
|
||||
// Add browser-like headers to improve compatibility.
|
||||
req.Header.Set("Accept", "image/avif,image/webp,image/apng,image/*,*/*;q=0.8")
|
||||
req.Header.Set("Connection", "keep-alive")
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("error downloading image: %d %s", resp.StatusCode, resp.Status)
|
||||
}
|
||||
if ct := resp.Header.Get("Content-Type"); ct != "" && !strings.Contains(strings.ToLower(ct), "image") {
|
||||
log.Warnf("Content type of %s is not image, but %s.", filename, ct)
|
||||
}
|
||||
if path == "" {
|
||||
path = "temp"
|
||||
}
|
||||
if err = os.MkdirAll(path, 0o755); err != nil {
|
||||
return "", err
|
||||
}
|
||||
dest := filepath.Join(path, filename)
|
||||
f, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, err = io.Copy(f, resp.Body)
|
||||
_ = f.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if verbose {
|
||||
fmt.Printf("Image saved as %s\n", dest)
|
||||
}
|
||||
abspath, _ := filepath.Abs(dest)
|
||||
return abspath, nil
|
||||
}
|
||||
|
||||
type WebImage struct{ Image }
|
||||
|
||||
type GeneratedImage struct {
|
||||
Image
|
||||
Cookies map[string]string
|
||||
}
|
||||
|
||||
func (g GeneratedImage) Save(path string, filename string, fullSize bool, verbose bool, skipInvalidFilename bool, insecure bool) (string, error) {
|
||||
if len(g.Cookies) == 0 {
|
||||
return "", &ValueError{Msg: "GeneratedImage requires cookies."}
|
||||
}
|
||||
strURL := g.URL
|
||||
if fullSize {
|
||||
strURL = strURL + "=s2048"
|
||||
}
|
||||
if filename == "" {
|
||||
name := time.Now().Format("20060102150405")
|
||||
if len(strURL) >= 10 {
|
||||
name = fmt.Sprintf("%s_%s.png", name, strURL[len(strURL)-10:])
|
||||
} else {
|
||||
name += ".png"
|
||||
}
|
||||
filename = name
|
||||
}
|
||||
tmp := g.Image
|
||||
tmp.URL = strURL
|
||||
return tmp.Save(path, filename, g.Cookies, verbose, skipInvalidFilename, insecure)
|
||||
}
|
||||
|
||||
// Request parsing & file helpers -------------------------------------------
|
||||
|
||||
func ParseMessagesAndFiles(rawJSON []byte) ([]RoleText, [][]byte, []string, [][]int, error) {
|
||||
var messages []RoleText
|
||||
var files [][]byte
|
||||
var mimes []string
|
||||
var perMsgFileIdx [][]int
|
||||
|
||||
contents := gjson.GetBytes(rawJSON, "contents")
|
||||
if contents.Exists() {
|
||||
contents.ForEach(func(_, content gjson.Result) bool {
|
||||
role := NormalizeRole(content.Get("role").String())
|
||||
var b strings.Builder
|
||||
startFile := len(files)
|
||||
content.Get("parts").ForEach(func(_, part gjson.Result) bool {
|
||||
if text := part.Get("text"); text.Exists() {
|
||||
if b.Len() > 0 {
|
||||
b.WriteString("\n")
|
||||
}
|
||||
b.WriteString(text.String())
|
||||
}
|
||||
if inlineData := part.Get("inlineData"); inlineData.Exists() {
|
||||
data := inlineData.Get("data").String()
|
||||
if data != "" {
|
||||
if dec, err := base64.StdEncoding.DecodeString(data); err == nil {
|
||||
files = append(files, dec)
|
||||
m := inlineData.Get("mimeType").String()
|
||||
if m == "" {
|
||||
m = inlineData.Get("mime_type").String()
|
||||
}
|
||||
mimes = append(mimes, m)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
messages = append(messages, RoleText{Role: role, Text: b.String()})
|
||||
endFile := len(files)
|
||||
if endFile > startFile {
|
||||
idxs := make([]int, 0, endFile-startFile)
|
||||
for i := startFile; i < endFile; i++ {
|
||||
idxs = append(idxs, i)
|
||||
}
|
||||
perMsgFileIdx = append(perMsgFileIdx, idxs)
|
||||
} else {
|
||||
perMsgFileIdx = append(perMsgFileIdx, nil)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
return messages, files, mimes, perMsgFileIdx, nil
|
||||
}
|
||||
|
||||
func MaterializeInlineFiles(files [][]byte, mimes []string) ([]string, *interfaces.ErrorMessage) {
|
||||
if len(files) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
paths := make([]string, 0, len(files))
|
||||
for i, data := range files {
|
||||
ext := MimeToExt(mimes, i)
|
||||
f, err := os.CreateTemp("", "gemini-upload-*"+ext)
|
||||
if err != nil {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: fmt.Errorf("failed to create temp file: %w", err)}
|
||||
}
|
||||
if _, err = f.Write(data); err != nil {
|
||||
_ = f.Close()
|
||||
_ = os.Remove(f.Name())
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: fmt.Errorf("failed to write temp file: %w", err)}
|
||||
}
|
||||
if err = f.Close(); err != nil {
|
||||
_ = os.Remove(f.Name())
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: fmt.Errorf("failed to close temp file: %w", err)}
|
||||
}
|
||||
paths = append(paths, f.Name())
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func CleanupFiles(paths []string) {
|
||||
for _, p := range paths {
|
||||
if p != "" {
|
||||
_ = os.Remove(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func FetchGeneratedImageData(gi GeneratedImage) (string, string, error) {
|
||||
path, err := gi.Save("", "", true, false, true, false)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer func() { _ = os.Remove(path) }()
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
mime := http.DetectContentType(b)
|
||||
if !strings.HasPrefix(mime, "image/") {
|
||||
if guessed := mimeFromExtension(filepath.Ext(path)); guessed != "" {
|
||||
mime = guessed
|
||||
} else {
|
||||
mime = "image/png"
|
||||
}
|
||||
}
|
||||
return mime, base64.StdEncoding.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
func MimeToExt(mimes []string, i int) string {
|
||||
if i < len(mimes) {
|
||||
return MimeToPreferredExt(strings.ToLower(mimes[i]))
|
||||
}
|
||||
return ".png"
|
||||
}
|
||||
|
||||
var preferredExtByMIME = map[string]string{
|
||||
"image/png": ".png",
|
||||
"image/jpeg": ".jpg",
|
||||
"image/jpg": ".jpg",
|
||||
"image/webp": ".webp",
|
||||
"image/gif": ".gif",
|
||||
"image/bmp": ".bmp",
|
||||
"image/heic": ".heic",
|
||||
"application/pdf": ".pdf",
|
||||
}
|
||||
|
||||
func MimeToPreferredExt(mime string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(mime))
|
||||
if normalized == "" {
|
||||
return ".png"
|
||||
}
|
||||
if ext, ok := preferredExtByMIME[normalized]; ok {
|
||||
return ext
|
||||
}
|
||||
return ".png"
|
||||
}
|
||||
|
||||
func mimeFromExtension(ext string) string {
|
||||
cleaned := strings.TrimPrefix(strings.ToLower(ext), ".")
|
||||
if cleaned == "" {
|
||||
return ""
|
||||
}
|
||||
if mt, ok := misc.MimeTypes[cleaned]; ok && mt != "" {
|
||||
return mt
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// File upload helpers ------------------------------------------------------
|
||||
|
||||
func uploadFile(path string, proxy string, insecure bool) (string, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
}()
|
||||
|
||||
var buf bytes.Buffer
|
||||
mw := multipart.NewWriter(&buf)
|
||||
fw, err := mw.CreateFormFile("file", filepath.Base(path))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err = io.Copy(fw, f); err != nil {
|
||||
return "", err
|
||||
}
|
||||
_ = mw.Close()
|
||||
|
||||
client := newHTTPClient(httpOptions{ProxyURL: proxy, Insecure: insecure, FollowRedirects: true})
|
||||
client.Timeout = 300 * time.Second
|
||||
|
||||
req, _ := http.NewRequest(http.MethodPost, EndpointUpload, &buf)
|
||||
applyHeaders(req, HeadersUpload)
|
||||
req.Header.Set("Content-Type", mw.FormDataContentType())
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("Connection", "keep-alive")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return "", &APIError{Msg: resp.Status}
|
||||
}
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func parseFileName(path string) (string, error) {
|
||||
if st, err := os.Stat(path); err != nil || st.IsDir() {
|
||||
return "", &ValueError{Msg: path + " is not a valid file."}
|
||||
}
|
||||
return filepath.Base(path), nil
|
||||
}
|
||||
|
||||
// Response formatting helpers ----------------------------------------------
|
||||
|
||||
var (
|
||||
reGoogle = regexp.MustCompile("(\\()?\\[`([^`]+?)`\\]\\(https://www\\.google\\.com/search\\?q=[^)]*\\)(\\))?")
|
||||
reColonNum = regexp.MustCompile(`([^:]+:\d+)`)
|
||||
reInline = regexp.MustCompile("`(\\[[^\\]]+\\]\\([^\\)]+\\))`")
|
||||
)
|
||||
|
||||
func unescapeGeminiText(s string) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
s = strings.ReplaceAll(s, "<", "<")
|
||||
s = strings.ReplaceAll(s, "\\<", "<")
|
||||
s = strings.ReplaceAll(s, "\\_", "_")
|
||||
s = strings.ReplaceAll(s, "\\>", ">")
|
||||
return s
|
||||
}
|
||||
|
||||
func postProcessModelText(text string) string {
|
||||
text = reGoogle.ReplaceAllStringFunc(text, func(m string) string {
|
||||
subs := reGoogle.FindStringSubmatch(m)
|
||||
if len(subs) < 4 {
|
||||
return m
|
||||
}
|
||||
outerOpen := subs[1]
|
||||
display := subs[2]
|
||||
target := display
|
||||
if loc := reColonNum.FindString(display); loc != "" {
|
||||
target = loc
|
||||
}
|
||||
newSeg := "[`" + display + "`](" + target + ")"
|
||||
if outerOpen != "" {
|
||||
return "(" + newSeg + ")"
|
||||
}
|
||||
return newSeg
|
||||
})
|
||||
text = reInline.ReplaceAllString(text, "$1")
|
||||
return text
|
||||
}
|
||||
|
||||
func estimateTokens(s string) int {
|
||||
if s == "" {
|
||||
return 0
|
||||
}
|
||||
rc := float64(utf8.RuneCountInString(s))
|
||||
if rc <= 0 {
|
||||
return 0
|
||||
}
|
||||
est := int(math.Ceil(rc / 4.0))
|
||||
if est < 0 {
|
||||
return 0
|
||||
}
|
||||
return est
|
||||
}
|
||||
|
||||
// ConvertOutputToGemini converts simplified ModelOutput to Gemini API-like JSON.
|
||||
// promptText is used only to estimate usage tokens to populate usage fields.
|
||||
func ConvertOutputToGemini(output *ModelOutput, modelName string, promptText string) ([]byte, error) {
|
||||
if output == nil || len(output.Candidates) == 0 {
|
||||
return nil, fmt.Errorf("empty output")
|
||||
}
|
||||
|
||||
parts := make([]map[string]any, 0, 2)
|
||||
|
||||
var thoughtsText string
|
||||
if output.Candidates[0].Thoughts != nil {
|
||||
if t := strings.TrimSpace(*output.Candidates[0].Thoughts); t != "" {
|
||||
thoughtsText = unescapeGeminiText(t)
|
||||
parts = append(parts, map[string]any{
|
||||
"text": thoughtsText,
|
||||
"thought": true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
visible := unescapeGeminiText(output.Candidates[0].Text)
|
||||
finalText := postProcessModelText(visible)
|
||||
if finalText != "" {
|
||||
parts = append(parts, map[string]any{"text": finalText})
|
||||
}
|
||||
|
||||
if imgs := output.Candidates[0].GeneratedImages; len(imgs) > 0 {
|
||||
for _, gi := range imgs {
|
||||
if mime, data, err := FetchGeneratedImageData(gi); err == nil && data != "" {
|
||||
parts = append(parts, map[string]any{
|
||||
"inlineData": map[string]any{
|
||||
"mimeType": mime,
|
||||
"data": data,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
promptTokens := estimateTokens(promptText)
|
||||
completionTokens := estimateTokens(finalText)
|
||||
thoughtsTokens := 0
|
||||
if thoughtsText != "" {
|
||||
thoughtsTokens = estimateTokens(thoughtsText)
|
||||
}
|
||||
totalTokens := promptTokens + completionTokens
|
||||
|
||||
now := time.Now()
|
||||
resp := map[string]any{
|
||||
"candidates": []any{
|
||||
map[string]any{
|
||||
"content": map[string]any{
|
||||
"parts": parts,
|
||||
"role": "model",
|
||||
},
|
||||
"finishReason": "stop",
|
||||
"index": 0,
|
||||
},
|
||||
},
|
||||
"createTime": now.Format(time.RFC3339Nano),
|
||||
"responseId": fmt.Sprintf("gemini-web-%d", now.UnixNano()),
|
||||
"modelVersion": modelName,
|
||||
"usageMetadata": map[string]any{
|
||||
"promptTokenCount": promptTokens,
|
||||
"candidatesTokenCount": completionTokens,
|
||||
"thoughtsTokenCount": thoughtsTokens,
|
||||
"totalTokenCount": totalTokens,
|
||||
},
|
||||
}
|
||||
b, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal gemini response: %w", err)
|
||||
}
|
||||
return ensureColonSpacing(b), nil
|
||||
}
|
||||
|
||||
// ensureColonSpacing inserts a single space after JSON key-value colons while
|
||||
// leaving string content untouched. This matches the relaxed formatting used by
|
||||
// Gemini responses and keeps downstream text-processing tools compatible with
|
||||
// the proxy output.
|
||||
func ensureColonSpacing(b []byte) []byte {
|
||||
if len(b) == 0 {
|
||||
return b
|
||||
}
|
||||
var out bytes.Buffer
|
||||
out.Grow(len(b) + len(b)/8)
|
||||
inString := false
|
||||
escaped := false
|
||||
for i := 0; i < len(b); i++ {
|
||||
ch := b[i]
|
||||
out.WriteByte(ch)
|
||||
if escaped {
|
||||
escaped = false
|
||||
continue
|
||||
}
|
||||
switch ch {
|
||||
case '\\':
|
||||
escaped = true
|
||||
case '"':
|
||||
inString = !inString
|
||||
case ':':
|
||||
if !inString && i+1 < len(b) {
|
||||
next := b[i+1]
|
||||
if next != ' ' && next != '\n' && next != '\r' && next != '\t' {
|
||||
out.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
||||
@@ -1,310 +0,0 @@
|
||||
package geminiwebapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
)
|
||||
|
||||
// Gemini web endpoints and default headers ----------------------------------
|
||||
const (
|
||||
EndpointGoogle = "https://www.google.com"
|
||||
EndpointInit = "https://gemini.google.com/app"
|
||||
EndpointGenerate = "https://gemini.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate"
|
||||
EndpointRotateCookies = "https://accounts.google.com/RotateCookies"
|
||||
EndpointUpload = "https://content-push.googleapis.com/upload"
|
||||
)
|
||||
|
||||
var (
|
||||
HeadersGemini = http.Header{
|
||||
"Content-Type": []string{"application/x-www-form-urlencoded;charset=utf-8"},
|
||||
"Host": []string{"gemini.google.com"},
|
||||
"Origin": []string{"https://gemini.google.com"},
|
||||
"Referer": []string{"https://gemini.google.com/"},
|
||||
"User-Agent": []string{"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"},
|
||||
"X-Same-Domain": []string{"1"},
|
||||
}
|
||||
HeadersRotateCookies = http.Header{
|
||||
"Content-Type": []string{"application/json"},
|
||||
}
|
||||
HeadersUpload = http.Header{
|
||||
"Push-ID": []string{"feeds/mcudyrk2a4khkz"},
|
||||
}
|
||||
)
|
||||
|
||||
// Model metadata -------------------------------------------------------------
|
||||
type Model struct {
|
||||
Name string
|
||||
ModelHeader http.Header
|
||||
AdvancedOnly bool
|
||||
}
|
||||
|
||||
var (
|
||||
ModelUnspecified = Model{
|
||||
Name: "unspecified",
|
||||
ModelHeader: http.Header{},
|
||||
AdvancedOnly: false,
|
||||
}
|
||||
ModelG25Flash = Model{
|
||||
Name: "gemini-2.5-flash",
|
||||
ModelHeader: http.Header{
|
||||
"x-goog-ext-525001261-jspb": []string{"[1,null,null,null,\"71c2d248d3b102ff\",null,null,0,[4]]"},
|
||||
},
|
||||
AdvancedOnly: false,
|
||||
}
|
||||
ModelG25Pro = Model{
|
||||
Name: "gemini-2.5-pro",
|
||||
ModelHeader: http.Header{
|
||||
"x-goog-ext-525001261-jspb": []string{"[1,null,null,null,\"4af6c7f5da75d65d\",null,null,0,[4]]"},
|
||||
},
|
||||
AdvancedOnly: false,
|
||||
}
|
||||
ModelG20Flash = Model{
|
||||
Name: "gemini-2.0-flash",
|
||||
ModelHeader: http.Header{
|
||||
"x-goog-ext-525001261-jspb": []string{"[1,null,null,null,\"f299729663a2343f\"]"},
|
||||
},
|
||||
AdvancedOnly: false,
|
||||
}
|
||||
ModelG20FlashThinking = Model{
|
||||
Name: "gemini-2.0-flash-thinking",
|
||||
ModelHeader: http.Header{
|
||||
"x-goog-ext-525001261-jspb": []string{"[null,null,null,null,\"7ca48d02d802f20a\"]"},
|
||||
},
|
||||
AdvancedOnly: false,
|
||||
}
|
||||
)
|
||||
|
||||
func ModelFromName(name string) (Model, error) {
|
||||
switch name {
|
||||
case ModelUnspecified.Name:
|
||||
return ModelUnspecified, nil
|
||||
case ModelG25Flash.Name:
|
||||
return ModelG25Flash, nil
|
||||
case ModelG25Pro.Name:
|
||||
return ModelG25Pro, nil
|
||||
case ModelG20Flash.Name:
|
||||
return ModelG20Flash, nil
|
||||
case ModelG20FlashThinking.Name:
|
||||
return ModelG20FlashThinking, nil
|
||||
default:
|
||||
return Model{}, &ValueError{Msg: "Unknown model name: " + name}
|
||||
}
|
||||
}
|
||||
|
||||
// Known error codes returned from the server.
|
||||
const (
|
||||
ErrorUsageLimitExceeded = 1037
|
||||
ErrorModelInconsistent = 1050
|
||||
ErrorModelHeaderInvalid = 1052
|
||||
ErrorIPTemporarilyBlocked = 1060
|
||||
)
|
||||
|
||||
var (
|
||||
GeminiWebAliasOnce sync.Once
|
||||
GeminiWebAliasMap map[string]string
|
||||
)
|
||||
|
||||
func EnsureGeminiWebAliasMap() {
|
||||
GeminiWebAliasOnce.Do(func() {
|
||||
GeminiWebAliasMap = make(map[string]string)
|
||||
for _, m := range registry.GetGeminiModels() {
|
||||
if m.ID == "gemini-2.5-flash-lite" {
|
||||
continue
|
||||
} else if m.ID == "gemini-2.5-flash" {
|
||||
GeminiWebAliasMap["gemini-2.5-flash-image-preview"] = "gemini-2.5-flash"
|
||||
}
|
||||
alias := AliasFromModelID(m.ID)
|
||||
GeminiWebAliasMap[strings.ToLower(alias)] = strings.ToLower(m.ID)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func GetGeminiWebAliasedModels() []*registry.ModelInfo {
|
||||
EnsureGeminiWebAliasMap()
|
||||
aliased := make([]*registry.ModelInfo, 0)
|
||||
for _, m := range registry.GetGeminiModels() {
|
||||
if m.ID == "gemini-2.5-flash-lite" {
|
||||
continue
|
||||
} else if m.ID == "gemini-2.5-flash" {
|
||||
cpy := *m
|
||||
cpy.ID = "gemini-2.5-flash-image-preview"
|
||||
cpy.Name = "gemini-2.5-flash-image-preview"
|
||||
cpy.DisplayName = "Nano Banana"
|
||||
cpy.Description = "Gemini 2.5 Flash Preview Image"
|
||||
aliased = append(aliased, &cpy)
|
||||
}
|
||||
cpy := *m
|
||||
cpy.ID = AliasFromModelID(m.ID)
|
||||
cpy.Name = cpy.ID
|
||||
aliased = append(aliased, &cpy)
|
||||
}
|
||||
return aliased
|
||||
}
|
||||
|
||||
func MapAliasToUnderlying(name string) string {
|
||||
EnsureGeminiWebAliasMap()
|
||||
n := strings.ToLower(name)
|
||||
if u, ok := GeminiWebAliasMap[n]; ok {
|
||||
return u
|
||||
}
|
||||
const suffix = "-web"
|
||||
if strings.HasSuffix(n, suffix) {
|
||||
return strings.TrimSuffix(n, suffix)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func AliasFromModelID(modelID string) string {
|
||||
return modelID + "-web"
|
||||
}
|
||||
|
||||
// Conversation domain structures -------------------------------------------
|
||||
type RoleText struct {
|
||||
Role string
|
||||
Text string
|
||||
}
|
||||
|
||||
type StoredMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
type ConversationRecord struct {
|
||||
Model string `json:"model"`
|
||||
ClientID string `json:"client_id"`
|
||||
Metadata []string `json:"metadata,omitempty"`
|
||||
Messages []StoredMessage `json:"messages"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
type Candidate struct {
|
||||
RCID string
|
||||
Text string
|
||||
Thoughts *string
|
||||
WebImages []WebImage
|
||||
GeneratedImages []GeneratedImage
|
||||
}
|
||||
|
||||
func (c Candidate) String() string {
|
||||
t := c.Text
|
||||
if len(t) > 20 {
|
||||
t = t[:20] + "..."
|
||||
}
|
||||
return fmt.Sprintf("Candidate(rcid='%s', text='%s', images=%d)", c.RCID, t, len(c.WebImages)+len(c.GeneratedImages))
|
||||
}
|
||||
|
||||
func (c Candidate) Images() []Image {
|
||||
images := make([]Image, 0, len(c.WebImages)+len(c.GeneratedImages))
|
||||
for _, wi := range c.WebImages {
|
||||
images = append(images, wi.Image)
|
||||
}
|
||||
for _, gi := range c.GeneratedImages {
|
||||
images = append(images, gi.Image)
|
||||
}
|
||||
return images
|
||||
}
|
||||
|
||||
type ModelOutput struct {
|
||||
Metadata []string
|
||||
Candidates []Candidate
|
||||
Chosen int
|
||||
}
|
||||
|
||||
func (m ModelOutput) String() string { return m.Text() }
|
||||
|
||||
func (m ModelOutput) Text() string {
|
||||
if len(m.Candidates) == 0 {
|
||||
return ""
|
||||
}
|
||||
return m.Candidates[m.Chosen].Text
|
||||
}
|
||||
|
||||
func (m ModelOutput) Thoughts() *string {
|
||||
if len(m.Candidates) == 0 {
|
||||
return nil
|
||||
}
|
||||
return m.Candidates[m.Chosen].Thoughts
|
||||
}
|
||||
|
||||
func (m ModelOutput) Images() []Image {
|
||||
if len(m.Candidates) == 0 {
|
||||
return nil
|
||||
}
|
||||
return m.Candidates[m.Chosen].Images()
|
||||
}
|
||||
|
||||
func (m ModelOutput) RCID() string {
|
||||
if len(m.Candidates) == 0 {
|
||||
return ""
|
||||
}
|
||||
return m.Candidates[m.Chosen].RCID
|
||||
}
|
||||
|
||||
type Gem struct {
|
||||
ID string
|
||||
Name string
|
||||
Description *string
|
||||
Prompt *string
|
||||
Predefined bool
|
||||
}
|
||||
|
||||
func (g Gem) String() string {
|
||||
return fmt.Sprintf("Gem(id='%s', name='%s', description='%v', prompt='%v', predefined=%v)", g.ID, g.Name, g.Description, g.Prompt, g.Predefined)
|
||||
}
|
||||
|
||||
func decodeHTML(s string) string { return html.UnescapeString(s) }
|
||||
|
||||
// Error hierarchy -----------------------------------------------------------
|
||||
type AuthError struct{ Msg string }
|
||||
|
||||
func (e *AuthError) Error() string {
|
||||
if e.Msg == "" {
|
||||
return "authentication error"
|
||||
}
|
||||
return e.Msg
|
||||
}
|
||||
|
||||
type APIError struct{ Msg string }
|
||||
|
||||
func (e *APIError) Error() string {
|
||||
if e.Msg == "" {
|
||||
return "api error"
|
||||
}
|
||||
return e.Msg
|
||||
}
|
||||
|
||||
type ImageGenerationError struct{ APIError }
|
||||
|
||||
type GeminiError struct{ Msg string }
|
||||
|
||||
func (e *GeminiError) Error() string {
|
||||
if e.Msg == "" {
|
||||
return "gemini error"
|
||||
}
|
||||
return e.Msg
|
||||
}
|
||||
|
||||
type TimeoutError struct{ GeminiError }
|
||||
|
||||
type UsageLimitExceeded struct{ GeminiError }
|
||||
|
||||
type ModelInvalid struct{ GeminiError }
|
||||
|
||||
type TemporarilyBlocked struct{ GeminiError }
|
||||
|
||||
type ValueError struct{ Msg string }
|
||||
|
||||
func (e *ValueError) Error() string {
|
||||
if e.Msg == "" {
|
||||
return "value error"
|
||||
}
|
||||
return e.Msg
|
||||
}
|
||||
@@ -1,227 +0,0 @@
|
||||
package geminiwebapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
var (
|
||||
reThink = regexp.MustCompile(`(?s)^\s*<think>.*?</think>\s*`)
|
||||
reXMLAnyTag = regexp.MustCompile(`(?s)<\s*[^>]+>`)
|
||||
)
|
||||
|
||||
// NormalizeRole converts a role to a standard format (lowercase, 'model' -> 'assistant').
|
||||
func NormalizeRole(role string) string {
|
||||
r := strings.ToLower(role)
|
||||
if r == "model" {
|
||||
return "assistant"
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// NeedRoleTags checks if a list of messages requires role tags.
|
||||
func NeedRoleTags(msgs []RoleText) bool {
|
||||
for _, m := range msgs {
|
||||
if strings.ToLower(m.Role) != "user" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// AddRoleTag wraps content with a role tag.
|
||||
func AddRoleTag(role, content string, unclose bool) string {
|
||||
if role == "" {
|
||||
role = "user"
|
||||
}
|
||||
if unclose {
|
||||
return "<|im_start|>" + role + "\n" + content
|
||||
}
|
||||
return "<|im_start|>" + role + "\n" + content + "\n<|im_end|>"
|
||||
}
|
||||
|
||||
// BuildPrompt constructs the final prompt from a list of messages.
|
||||
func BuildPrompt(msgs []RoleText, tagged bool, appendAssistant bool) string {
|
||||
if len(msgs) == 0 {
|
||||
if tagged && appendAssistant {
|
||||
return AddRoleTag("assistant", "", true)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
if !tagged {
|
||||
var sb strings.Builder
|
||||
for i, m := range msgs {
|
||||
if i > 0 {
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
sb.WriteString(m.Text)
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
var sb strings.Builder
|
||||
for _, m := range msgs {
|
||||
sb.WriteString(AddRoleTag(m.Role, m.Text, false))
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
if appendAssistant {
|
||||
sb.WriteString(AddRoleTag("assistant", "", true))
|
||||
}
|
||||
return strings.TrimSpace(sb.String())
|
||||
}
|
||||
|
||||
// RemoveThinkTags strips <think>...</think> blocks from a string.
|
||||
func RemoveThinkTags(s string) string {
|
||||
return strings.TrimSpace(reThink.ReplaceAllString(s, ""))
|
||||
}
|
||||
|
||||
// SanitizeAssistantMessages removes think tags from assistant messages.
|
||||
func SanitizeAssistantMessages(msgs []RoleText) []RoleText {
|
||||
out := make([]RoleText, 0, len(msgs))
|
||||
for _, m := range msgs {
|
||||
if strings.ToLower(m.Role) == "assistant" {
|
||||
out = append(out, RoleText{Role: m.Role, Text: RemoveThinkTags(m.Text)})
|
||||
} else {
|
||||
out = append(out, m)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AppendXMLWrapHintIfNeeded appends an XML wrap hint to messages containing XML-like blocks.
|
||||
func AppendXMLWrapHintIfNeeded(msgs []RoleText, disable bool) []RoleText {
|
||||
if disable {
|
||||
return msgs
|
||||
}
|
||||
const xmlWrapHint = "\nFor any xml block, e.g. tool call, always wrap it with: \n`````xml\n...\n`````\n"
|
||||
out := make([]RoleText, 0, len(msgs))
|
||||
for _, m := range msgs {
|
||||
t := m.Text
|
||||
if reXMLAnyTag.MatchString(t) {
|
||||
t = t + xmlWrapHint
|
||||
}
|
||||
out = append(out, RoleText{Role: m.Role, Text: t})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// EstimateTotalTokensFromRawJSON estimates token count by summing text parts.
|
||||
func EstimateTotalTokensFromRawJSON(rawJSON []byte) int {
|
||||
totalChars := 0
|
||||
contents := gjson.GetBytes(rawJSON, "contents")
|
||||
if contents.Exists() {
|
||||
contents.ForEach(func(_, content gjson.Result) bool {
|
||||
content.Get("parts").ForEach(func(_, part gjson.Result) bool {
|
||||
if t := part.Get("text"); t.Exists() {
|
||||
totalChars += utf8.RuneCountInString(t.String())
|
||||
}
|
||||
return true
|
||||
})
|
||||
return true
|
||||
})
|
||||
}
|
||||
if totalChars <= 0 {
|
||||
return 0
|
||||
}
|
||||
return int(math.Ceil(float64(totalChars) / 4.0))
|
||||
}
|
||||
|
||||
// Request chunking helpers ------------------------------------------------
|
||||
|
||||
const continuationHint = "\n(More messages to come, please reply with just 'ok.')"
|
||||
|
||||
func ChunkByRunes(s string, size int) []string {
|
||||
if size <= 0 {
|
||||
return []string{s}
|
||||
}
|
||||
chunks := make([]string, 0, (len(s)/size)+1)
|
||||
var buf strings.Builder
|
||||
count := 0
|
||||
for _, r := range s {
|
||||
buf.WriteRune(r)
|
||||
count++
|
||||
if count >= size {
|
||||
chunks = append(chunks, buf.String())
|
||||
buf.Reset()
|
||||
count = 0
|
||||
}
|
||||
}
|
||||
if buf.Len() > 0 {
|
||||
chunks = append(chunks, buf.String())
|
||||
}
|
||||
if len(chunks) == 0 {
|
||||
return []string{""}
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
func MaxCharsPerRequest(cfg *config.Config) int {
|
||||
// Read max characters per request from config with a conservative default.
|
||||
if cfg != nil {
|
||||
if v := cfg.GeminiWeb.MaxCharsPerRequest; v > 0 {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return 1_000_000
|
||||
}
|
||||
|
||||
func SendWithSplit(chat *ChatSession, text string, files []string, cfg *config.Config) (ModelOutput, error) {
|
||||
// Validate chat session
|
||||
if chat == nil {
|
||||
return ModelOutput{}, fmt.Errorf("nil chat session")
|
||||
}
|
||||
|
||||
// Resolve maxChars characters per request
|
||||
maxChars := MaxCharsPerRequest(cfg)
|
||||
if maxChars <= 0 {
|
||||
maxChars = 1_000_000
|
||||
}
|
||||
|
||||
// If within limit, send directly
|
||||
if utf8.RuneCountInString(text) <= maxChars {
|
||||
return chat.SendMessage(text, files)
|
||||
}
|
||||
|
||||
// Decide whether to use continuation hint (enabled by default)
|
||||
useHint := true
|
||||
if cfg != nil && cfg.GeminiWeb.DisableContinuationHint {
|
||||
useHint = false
|
||||
}
|
||||
|
||||
// Compute chunk size in runes. If the hint does not fit, disable it for this request.
|
||||
hintLen := 0
|
||||
if useHint {
|
||||
hintLen = utf8.RuneCountInString(continuationHint)
|
||||
}
|
||||
chunkSize := maxChars - hintLen
|
||||
if chunkSize <= 0 {
|
||||
// maxChars is too small to accommodate the hint; fall back to no-hint splitting
|
||||
useHint = false
|
||||
chunkSize = maxChars
|
||||
}
|
||||
|
||||
// Split into rune-safe chunks
|
||||
chunks := ChunkByRunes(text, chunkSize)
|
||||
if len(chunks) == 0 {
|
||||
chunks = []string{""}
|
||||
}
|
||||
|
||||
// Send all but the last chunk without files, optionally appending hint
|
||||
for i := 0; i < len(chunks)-1; i++ {
|
||||
part := chunks[i]
|
||||
if useHint {
|
||||
part += continuationHint
|
||||
}
|
||||
if _, err := chat.SendMessage(part, nil); err != nil {
|
||||
return ModelOutput{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Send final chunk with files and return the actual output
|
||||
return chat.SendMessage(chunks[len(chunks)-1], files)
|
||||
}
|
||||
@@ -1,851 +0,0 @@
|
||||
package geminiwebapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator"
|
||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
geminiWebDefaultTimeoutSec = 300
|
||||
)
|
||||
|
||||
type GeminiWebState struct {
|
||||
cfg *config.Config
|
||||
token *gemini.GeminiWebTokenStorage
|
||||
storagePath string
|
||||
|
||||
stableClientID string
|
||||
accountID string
|
||||
|
||||
reqMu sync.Mutex
|
||||
client *GeminiClient
|
||||
|
||||
tokenMu sync.Mutex
|
||||
tokenDirty bool
|
||||
|
||||
convMu sync.RWMutex
|
||||
convStore map[string][]string
|
||||
convData map[string]ConversationRecord
|
||||
convIndex map[string]string
|
||||
|
||||
lastRefresh time.Time
|
||||
}
|
||||
|
||||
func NewGeminiWebState(cfg *config.Config, token *gemini.GeminiWebTokenStorage, storagePath string) *GeminiWebState {
|
||||
state := &GeminiWebState{
|
||||
cfg: cfg,
|
||||
token: token,
|
||||
storagePath: storagePath,
|
||||
convStore: make(map[string][]string),
|
||||
convData: make(map[string]ConversationRecord),
|
||||
convIndex: make(map[string]string),
|
||||
}
|
||||
suffix := Sha256Hex(token.Secure1PSID)
|
||||
if len(suffix) > 16 {
|
||||
suffix = suffix[:16]
|
||||
}
|
||||
state.stableClientID = "gemini-web-" + suffix
|
||||
if storagePath != "" {
|
||||
base := strings.TrimSuffix(filepath.Base(storagePath), filepath.Ext(storagePath))
|
||||
if base != "" {
|
||||
state.accountID = base
|
||||
} else {
|
||||
state.accountID = suffix
|
||||
}
|
||||
} else {
|
||||
state.accountID = suffix
|
||||
}
|
||||
state.loadConversationCaches()
|
||||
return state
|
||||
}
|
||||
|
||||
// Label returns a stable account label for logging and persistence.
|
||||
// If a storage file path is known, it uses the file base name (without extension).
|
||||
// Otherwise, it falls back to the stable client ID (e.g., "gemini-web-<hash>").
|
||||
func (s *GeminiWebState) Label() string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
if s.storagePath != "" {
|
||||
base := strings.TrimSuffix(filepath.Base(s.storagePath), filepath.Ext(s.storagePath))
|
||||
if base != "" {
|
||||
return base
|
||||
}
|
||||
}
|
||||
return s.stableClientID
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) loadConversationCaches() {
|
||||
path := s.convPath()
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
if store, err := LoadConvStore(path); err == nil {
|
||||
s.convStore = store
|
||||
}
|
||||
if items, index, err := LoadConvData(path); err == nil {
|
||||
s.convData = items
|
||||
s.convIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
// convPath returns the BoltDB file path used for both account metadata and conversation data.
|
||||
func (s *GeminiWebState) convPath() string {
|
||||
base := s.storagePath
|
||||
if base == "" {
|
||||
// Use accountID directly as base name; ConvBoltPath will append .bolt.
|
||||
base = s.accountID
|
||||
}
|
||||
return ConvBoltPath(base)
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) GetRequestMutex() *sync.Mutex { return &s.reqMu }
|
||||
|
||||
func (s *GeminiWebState) EnsureClient() error {
|
||||
if s.client != nil && s.client.Running {
|
||||
return nil
|
||||
}
|
||||
proxyURL := ""
|
||||
if s.cfg != nil {
|
||||
proxyURL = s.cfg.ProxyURL
|
||||
}
|
||||
s.client = NewGeminiClient(
|
||||
s.token.Secure1PSID,
|
||||
s.token.Secure1PSIDTS,
|
||||
proxyURL,
|
||||
)
|
||||
timeout := geminiWebDefaultTimeoutSec
|
||||
if err := s.client.Init(float64(timeout), false); err != nil {
|
||||
s.client = nil
|
||||
return err
|
||||
}
|
||||
s.lastRefresh = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) Refresh(ctx context.Context) error {
|
||||
_ = ctx
|
||||
proxyURL := ""
|
||||
if s.cfg != nil {
|
||||
proxyURL = s.cfg.ProxyURL
|
||||
}
|
||||
s.client = NewGeminiClient(
|
||||
s.token.Secure1PSID,
|
||||
s.token.Secure1PSIDTS,
|
||||
proxyURL,
|
||||
)
|
||||
timeout := geminiWebDefaultTimeoutSec
|
||||
if err := s.client.Init(float64(timeout), false); err != nil {
|
||||
return err
|
||||
}
|
||||
// Attempt rotation proactively to persist new TS sooner
|
||||
if newTS, err := s.client.RotateTS(); err == nil && newTS != "" && newTS != s.token.Secure1PSIDTS {
|
||||
s.tokenMu.Lock()
|
||||
s.token.Secure1PSIDTS = newTS
|
||||
s.tokenDirty = true
|
||||
if s.client != nil && s.client.Cookies != nil {
|
||||
s.client.Cookies["__Secure-1PSIDTS"] = newTS
|
||||
}
|
||||
s.tokenMu.Unlock()
|
||||
// Detailed debug log: provider and account.
|
||||
log.Debugf("gemini web account %s rotated 1PSIDTS: %s", s.accountID, MaskToken28(newTS))
|
||||
}
|
||||
s.lastRefresh = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) TokenSnapshot() *gemini.GeminiWebTokenStorage {
|
||||
s.tokenMu.Lock()
|
||||
defer s.tokenMu.Unlock()
|
||||
c := *s.token
|
||||
return &c
|
||||
}
|
||||
|
||||
type geminiWebPrepared struct {
|
||||
handlerType string
|
||||
translatedRaw []byte
|
||||
prompt string
|
||||
uploaded []string
|
||||
chat *ChatSession
|
||||
cleaned []RoleText
|
||||
underlying string
|
||||
reuse bool
|
||||
tagged bool
|
||||
originalRaw []byte
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) prepare(ctx context.Context, modelName string, rawJSON []byte, stream bool, original []byte) (*geminiWebPrepared, *interfaces.ErrorMessage) {
|
||||
res := &geminiWebPrepared{originalRaw: original}
|
||||
res.translatedRaw = bytes.Clone(rawJSON)
|
||||
if handler, ok := ctx.Value("handler").(interfaces.APIHandler); ok && handler != nil {
|
||||
res.handlerType = handler.HandlerType()
|
||||
res.translatedRaw = translator.Request(res.handlerType, constant.GeminiWeb, modelName, res.translatedRaw, stream)
|
||||
}
|
||||
recordAPIRequest(ctx, s.cfg, res.translatedRaw)
|
||||
|
||||
messages, files, mimes, msgFileIdx, err := ParseMessagesAndFiles(res.translatedRaw)
|
||||
if err != nil {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: 400, Error: fmt.Errorf("bad request: %w", err)}
|
||||
}
|
||||
cleaned := SanitizeAssistantMessages(messages)
|
||||
res.cleaned = cleaned
|
||||
res.underlying = MapAliasToUnderlying(modelName)
|
||||
model, err := ModelFromName(res.underlying)
|
||||
if err != nil {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: 400, Error: err}
|
||||
}
|
||||
|
||||
var meta []string
|
||||
useMsgs := cleaned
|
||||
filesSubset := files
|
||||
mimesSubset := mimes
|
||||
|
||||
if s.useReusableContext() {
|
||||
reuseMeta, remaining := s.findReusableSession(res.underlying, cleaned)
|
||||
if len(reuseMeta) > 0 {
|
||||
res.reuse = true
|
||||
meta = reuseMeta
|
||||
if len(remaining) == 1 {
|
||||
useMsgs = []RoleText{remaining[0]}
|
||||
} else if len(remaining) > 1 {
|
||||
useMsgs = remaining
|
||||
} else if len(cleaned) > 0 {
|
||||
useMsgs = []RoleText{cleaned[len(cleaned)-1]}
|
||||
}
|
||||
if len(useMsgs) == 1 && len(messages) > 0 && len(msgFileIdx) == len(messages) {
|
||||
lastIdx := len(msgFileIdx) - 1
|
||||
idxs := msgFileIdx[lastIdx]
|
||||
if len(idxs) > 0 {
|
||||
filesSubset = make([][]byte, 0, len(idxs))
|
||||
mimesSubset = make([]string, 0, len(idxs))
|
||||
for _, fi := range idxs {
|
||||
if fi >= 0 && fi < len(files) {
|
||||
filesSubset = append(filesSubset, files[fi])
|
||||
if fi < len(mimes) {
|
||||
mimesSubset = append(mimesSubset, mimes[fi])
|
||||
} else {
|
||||
mimesSubset = append(mimesSubset, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filesSubset = nil
|
||||
mimesSubset = nil
|
||||
}
|
||||
} else {
|
||||
filesSubset = nil
|
||||
mimesSubset = nil
|
||||
}
|
||||
} else {
|
||||
if len(cleaned) >= 2 && strings.EqualFold(cleaned[len(cleaned)-2].Role, "assistant") {
|
||||
keyUnderlying := AccountMetaKey(s.accountID, res.underlying)
|
||||
keyAlias := AccountMetaKey(s.accountID, modelName)
|
||||
s.convMu.RLock()
|
||||
fallbackMeta := s.convStore[keyUnderlying]
|
||||
if len(fallbackMeta) == 0 {
|
||||
fallbackMeta = s.convStore[keyAlias]
|
||||
}
|
||||
s.convMu.RUnlock()
|
||||
if len(fallbackMeta) > 0 {
|
||||
meta = fallbackMeta
|
||||
useMsgs = []RoleText{cleaned[len(cleaned)-1]}
|
||||
res.reuse = true
|
||||
filesSubset = nil
|
||||
mimesSubset = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
keyUnderlying := AccountMetaKey(s.accountID, res.underlying)
|
||||
keyAlias := AccountMetaKey(s.accountID, modelName)
|
||||
s.convMu.RLock()
|
||||
if v, ok := s.convStore[keyUnderlying]; ok && len(v) > 0 {
|
||||
meta = v
|
||||
} else {
|
||||
meta = s.convStore[keyAlias]
|
||||
}
|
||||
s.convMu.RUnlock()
|
||||
}
|
||||
|
||||
res.tagged = NeedRoleTags(useMsgs)
|
||||
if res.reuse && len(useMsgs) == 1 {
|
||||
res.tagged = false
|
||||
}
|
||||
|
||||
enableXML := s.cfg != nil && s.cfg.GeminiWeb.CodeMode
|
||||
useMsgs = AppendXMLWrapHintIfNeeded(useMsgs, !enableXML)
|
||||
|
||||
res.prompt = BuildPrompt(useMsgs, res.tagged, res.tagged)
|
||||
if strings.TrimSpace(res.prompt) == "" {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: 400, Error: errors.New("bad request: empty prompt after filtering system/thought content")}
|
||||
}
|
||||
|
||||
uploaded, upErr := MaterializeInlineFiles(filesSubset, mimesSubset)
|
||||
if upErr != nil {
|
||||
return nil, upErr
|
||||
}
|
||||
res.uploaded = uploaded
|
||||
|
||||
if err = s.EnsureClient(); err != nil {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: 500, Error: err}
|
||||
}
|
||||
chat := s.client.StartChat(model, s.getConfiguredGem(), meta)
|
||||
chat.SetRequestedModel(modelName)
|
||||
res.chat = chat
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) Send(ctx context.Context, modelName string, reqPayload []byte, opts cliproxyexecutor.Options) ([]byte, *interfaces.ErrorMessage, *geminiWebPrepared) {
|
||||
prep, errMsg := s.prepare(ctx, modelName, reqPayload, opts.Stream, opts.OriginalRequest)
|
||||
if errMsg != nil {
|
||||
return nil, errMsg, nil
|
||||
}
|
||||
defer CleanupFiles(prep.uploaded)
|
||||
|
||||
output, err := SendWithSplit(prep.chat, prep.prompt, prep.uploaded, s.cfg)
|
||||
if err != nil {
|
||||
return nil, s.wrapSendError(err), nil
|
||||
}
|
||||
|
||||
// Hook: For gemini-2.5-flash-image-preview, if the API returns only images without any text,
|
||||
// inject a small textual summary so that conversation persistence has non-empty assistant text.
|
||||
// This helps conversation recovery (conv store) to match sessions reliably.
|
||||
if strings.EqualFold(modelName, "gemini-2.5-flash-image-preview") {
|
||||
if len(output.Candidates) > 0 {
|
||||
c := output.Candidates[output.Chosen]
|
||||
hasNoText := strings.TrimSpace(c.Text) == ""
|
||||
hasImages := len(c.GeneratedImages) > 0 || len(c.WebImages) > 0
|
||||
if hasNoText && hasImages {
|
||||
// Build a stable, concise fallback text. Avoid dynamic details to keep hashes stable.
|
||||
// Prefer a deterministic phrase with count to aid users while keeping consistency.
|
||||
fallback := "Done"
|
||||
// Mutate the chosen candidate's text so both response conversion and
|
||||
// conversation persistence observe the same fallback.
|
||||
output.Candidates[output.Chosen].Text = fallback
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gemBytes, err := ConvertOutputToGemini(&output, modelName, prep.prompt)
|
||||
if err != nil {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: 500, Error: err}, nil
|
||||
}
|
||||
|
||||
s.addAPIResponseData(ctx, gemBytes)
|
||||
s.persistConversation(modelName, prep, &output)
|
||||
return gemBytes, nil, prep
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) wrapSendError(genErr error) *interfaces.ErrorMessage {
|
||||
status := 500
|
||||
var usage *UsageLimitExceeded
|
||||
var blocked *TemporarilyBlocked
|
||||
var invalid *ModelInvalid
|
||||
var valueErr *ValueError
|
||||
var timeout *TimeoutError
|
||||
switch {
|
||||
case errors.As(genErr, &usage):
|
||||
status = 429
|
||||
case errors.As(genErr, &blocked):
|
||||
status = 429
|
||||
case errors.As(genErr, &invalid):
|
||||
status = 400
|
||||
case errors.As(genErr, &valueErr):
|
||||
status = 400
|
||||
case errors.As(genErr, &timeout):
|
||||
status = 504
|
||||
}
|
||||
return &interfaces.ErrorMessage{StatusCode: status, Error: genErr}
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) persistConversation(modelName string, prep *geminiWebPrepared, output *ModelOutput) {
|
||||
if output == nil || prep == nil || prep.chat == nil {
|
||||
return
|
||||
}
|
||||
metadata := prep.chat.Metadata()
|
||||
if len(metadata) > 0 {
|
||||
keyUnderlying := AccountMetaKey(s.accountID, prep.underlying)
|
||||
keyAlias := AccountMetaKey(s.accountID, modelName)
|
||||
s.convMu.Lock()
|
||||
s.convStore[keyUnderlying] = metadata
|
||||
s.convStore[keyAlias] = metadata
|
||||
storeSnapshot := make(map[string][]string, len(s.convStore))
|
||||
for k, v := range s.convStore {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
cp := make([]string, len(v))
|
||||
copy(cp, v)
|
||||
storeSnapshot[k] = cp
|
||||
}
|
||||
s.convMu.Unlock()
|
||||
_ = SaveConvStore(s.convPath(), storeSnapshot)
|
||||
}
|
||||
|
||||
if !s.useReusableContext() {
|
||||
return
|
||||
}
|
||||
rec, ok := BuildConversationRecord(prep.underlying, s.stableClientID, prep.cleaned, output, metadata)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
stableHash := HashConversation(rec.ClientID, prep.underlying, rec.Messages)
|
||||
accountHash := HashConversation(s.accountID, prep.underlying, rec.Messages)
|
||||
|
||||
s.convMu.Lock()
|
||||
s.convData[stableHash] = rec
|
||||
s.convIndex["hash:"+stableHash] = stableHash
|
||||
if accountHash != stableHash {
|
||||
s.convIndex["hash:"+accountHash] = stableHash
|
||||
}
|
||||
dataSnapshot := make(map[string]ConversationRecord, len(s.convData))
|
||||
for k, v := range s.convData {
|
||||
dataSnapshot[k] = v
|
||||
}
|
||||
indexSnapshot := make(map[string]string, len(s.convIndex))
|
||||
for k, v := range s.convIndex {
|
||||
indexSnapshot[k] = v
|
||||
}
|
||||
s.convMu.Unlock()
|
||||
_ = SaveConvData(s.convPath(), dataSnapshot, indexSnapshot)
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) addAPIResponseData(ctx context.Context, line []byte) {
|
||||
appendAPIResponseChunk(ctx, s.cfg, line)
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) ConvertToTarget(ctx context.Context, modelName string, prep *geminiWebPrepared, gemBytes []byte) []byte {
|
||||
if prep == nil || prep.handlerType == "" {
|
||||
return gemBytes
|
||||
}
|
||||
if !translator.NeedConvert(prep.handlerType, constant.GeminiWeb) {
|
||||
return gemBytes
|
||||
}
|
||||
var param any
|
||||
out := translator.ResponseNonStream(prep.handlerType, constant.GeminiWeb, ctx, modelName, prep.originalRaw, prep.translatedRaw, gemBytes, ¶m)
|
||||
if prep.handlerType == constant.OpenAI && out != "" {
|
||||
newID := fmt.Sprintf("chatcmpl-%x", time.Now().UnixNano())
|
||||
if v := gjson.Parse(out).Get("id"); v.Exists() {
|
||||
out, _ = sjson.Set(out, "id", newID)
|
||||
}
|
||||
}
|
||||
return []byte(out)
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) ConvertStream(ctx context.Context, modelName string, prep *geminiWebPrepared, gemBytes []byte) []string {
|
||||
if prep == nil || prep.handlerType == "" {
|
||||
return []string{string(gemBytes)}
|
||||
}
|
||||
if !translator.NeedConvert(prep.handlerType, constant.GeminiWeb) {
|
||||
return []string{string(gemBytes)}
|
||||
}
|
||||
var param any
|
||||
return translator.Response(prep.handlerType, constant.GeminiWeb, ctx, modelName, prep.originalRaw, prep.translatedRaw, gemBytes, ¶m)
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) DoneStream(ctx context.Context, modelName string, prep *geminiWebPrepared) []string {
|
||||
if prep == nil || prep.handlerType == "" {
|
||||
return nil
|
||||
}
|
||||
if !translator.NeedConvert(prep.handlerType, constant.GeminiWeb) {
|
||||
return nil
|
||||
}
|
||||
var param any
|
||||
return translator.Response(prep.handlerType, constant.GeminiWeb, ctx, modelName, prep.originalRaw, prep.translatedRaw, []byte("[DONE]"), ¶m)
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) useReusableContext() bool {
|
||||
if s.cfg == nil {
|
||||
return true
|
||||
}
|
||||
return s.cfg.GeminiWeb.Context
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) findReusableSession(modelName string, msgs []RoleText) ([]string, []RoleText) {
|
||||
s.convMu.RLock()
|
||||
items := s.convData
|
||||
index := s.convIndex
|
||||
s.convMu.RUnlock()
|
||||
return FindReusableSessionIn(items, index, s.stableClientID, s.accountID, modelName, msgs)
|
||||
}
|
||||
|
||||
func (s *GeminiWebState) getConfiguredGem() *Gem {
|
||||
if s.cfg != nil && s.cfg.GeminiWeb.CodeMode {
|
||||
return &Gem{ID: "coding-partner", Name: "Coding partner", Predefined: true}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// recordAPIRequest stores the upstream request payload in Gin context for request logging.
|
||||
func recordAPIRequest(ctx context.Context, cfg *config.Config, payload []byte) {
|
||||
if cfg == nil || !cfg.RequestLog || len(payload) == 0 {
|
||||
return
|
||||
}
|
||||
if ginCtx, ok := ctx.Value("gin").(*gin.Context); ok && ginCtx != nil {
|
||||
ginCtx.Set("API_REQUEST", bytes.Clone(payload))
|
||||
}
|
||||
}
|
||||
|
||||
// appendAPIResponseChunk appends an upstream response chunk to Gin context for request logging.
|
||||
func appendAPIResponseChunk(ctx context.Context, cfg *config.Config, chunk []byte) {
|
||||
if cfg == nil || !cfg.RequestLog {
|
||||
return
|
||||
}
|
||||
data := bytes.TrimSpace(bytes.Clone(chunk))
|
||||
if len(data) == 0 {
|
||||
return
|
||||
}
|
||||
if ginCtx, ok := ctx.Value("gin").(*gin.Context); ok && ginCtx != nil {
|
||||
if existing, exists := ginCtx.Get("API_RESPONSE"); exists {
|
||||
if prev, okBytes := existing.([]byte); okBytes {
|
||||
prev = append(prev, data...)
|
||||
prev = append(prev, []byte("\n\n")...)
|
||||
ginCtx.Set("API_RESPONSE", prev)
|
||||
return
|
||||
}
|
||||
}
|
||||
ginCtx.Set("API_RESPONSE", data)
|
||||
}
|
||||
}
|
||||
|
||||
// Persistence helpers --------------------------------------------------
|
||||
|
||||
// Sha256Hex computes the SHA256 hash of a string and returns its hex representation.
|
||||
func Sha256Hex(s string) string {
|
||||
sum := sha256.Sum256([]byte(s))
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
func ToStoredMessages(msgs []RoleText) []StoredMessage {
|
||||
out := make([]StoredMessage, 0, len(msgs))
|
||||
for _, m := range msgs {
|
||||
out = append(out, StoredMessage{
|
||||
Role: m.Role,
|
||||
Content: m.Text,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func HashMessage(m StoredMessage) string {
|
||||
s := fmt.Sprintf(`{"content":%q,"role":%q}`, m.Content, strings.ToLower(m.Role))
|
||||
return Sha256Hex(s)
|
||||
}
|
||||
|
||||
func HashConversation(clientID, model string, msgs []StoredMessage) string {
|
||||
var b strings.Builder
|
||||
b.WriteString(clientID)
|
||||
b.WriteString("|")
|
||||
b.WriteString(model)
|
||||
for _, m := range msgs {
|
||||
b.WriteString("|")
|
||||
b.WriteString(HashMessage(m))
|
||||
}
|
||||
return Sha256Hex(b.String())
|
||||
}
|
||||
|
||||
// ConvBoltPath returns the BoltDB file path used for both account metadata and conversation data.
|
||||
// Different logical datasets are kept in separate buckets within this single DB file.
|
||||
func ConvBoltPath(tokenFilePath string) string {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil || wd == "" {
|
||||
wd = "."
|
||||
}
|
||||
convDir := filepath.Join(wd, "conv")
|
||||
base := strings.TrimSuffix(filepath.Base(tokenFilePath), filepath.Ext(tokenFilePath))
|
||||
return filepath.Join(convDir, base+".bolt")
|
||||
}
|
||||
|
||||
// LoadConvStore reads the account-level metadata store from disk.
|
||||
func LoadConvStore(path string) (map[string][]string, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db, err := bolt.Open(path, 0o600, &bolt.Options{Timeout: time.Second})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
}()
|
||||
out := map[string][]string{}
|
||||
err = db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("account_meta"))
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.ForEach(func(k, v []byte) error {
|
||||
var arr []string
|
||||
if len(v) > 0 {
|
||||
if e := json.Unmarshal(v, &arr); e != nil {
|
||||
// Skip malformed entries instead of failing the whole load
|
||||
return nil
|
||||
}
|
||||
}
|
||||
out[string(k)] = arr
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SaveConvStore writes the account-level metadata store to disk atomically.
|
||||
func SaveConvStore(path string, data map[string][]string) error {
|
||||
if data == nil {
|
||||
data = map[string][]string{}
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := bolt.Open(path, 0o600, &bolt.Options{Timeout: 2 * time.Second})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
}()
|
||||
return db.Update(func(tx *bolt.Tx) error {
|
||||
// Recreate bucket to reflect the given snapshot exactly.
|
||||
if b := tx.Bucket([]byte("account_meta")); b != nil {
|
||||
if err = tx.DeleteBucket([]byte("account_meta")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b, errCreateBucket := tx.CreateBucket([]byte("account_meta"))
|
||||
if errCreateBucket != nil {
|
||||
return errCreateBucket
|
||||
}
|
||||
for k, v := range data {
|
||||
enc, e := json.Marshal(v)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if e = b.Put([]byte(k), enc); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// AccountMetaKey builds the key for account-level metadata map.
|
||||
func AccountMetaKey(email, modelName string) string {
|
||||
return fmt.Sprintf("account-meta|%s|%s", email, modelName)
|
||||
}
|
||||
|
||||
// LoadConvData reads the full conversation data and index from disk.
|
||||
func LoadConvData(path string) (map[string]ConversationRecord, map[string]string, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
db, err := bolt.Open(path, 0o600, &bolt.Options{Timeout: time.Second})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
}()
|
||||
items := map[string]ConversationRecord{}
|
||||
index := map[string]string{}
|
||||
err = db.View(func(tx *bolt.Tx) error {
|
||||
// Load conv_items
|
||||
if b := tx.Bucket([]byte("conv_items")); b != nil {
|
||||
if e := b.ForEach(func(k, v []byte) error {
|
||||
var rec ConversationRecord
|
||||
if len(v) > 0 {
|
||||
if e2 := json.Unmarshal(v, &rec); e2 != nil {
|
||||
// Skip malformed
|
||||
return nil
|
||||
}
|
||||
items[string(k)] = rec
|
||||
}
|
||||
return nil
|
||||
}); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
// Load conv_index
|
||||
if b := tx.Bucket([]byte("conv_index")); b != nil {
|
||||
if e := b.ForEach(func(k, v []byte) error {
|
||||
index[string(k)] = string(v)
|
||||
return nil
|
||||
}); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return items, index, nil
|
||||
}
|
||||
|
||||
// SaveConvData writes the full conversation data and index to disk atomically.
|
||||
func SaveConvData(path string, items map[string]ConversationRecord, index map[string]string) error {
|
||||
if items == nil {
|
||||
items = map[string]ConversationRecord{}
|
||||
}
|
||||
if index == nil {
|
||||
index = map[string]string{}
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := bolt.Open(path, 0o600, &bolt.Options{Timeout: 2 * time.Second})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
}()
|
||||
return db.Update(func(tx *bolt.Tx) error {
|
||||
// Recreate items bucket
|
||||
if b := tx.Bucket([]byte("conv_items")); b != nil {
|
||||
if err = tx.DeleteBucket([]byte("conv_items")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
bi, errCreateBucket := tx.CreateBucket([]byte("conv_items"))
|
||||
if errCreateBucket != nil {
|
||||
return errCreateBucket
|
||||
}
|
||||
for k, rec := range items {
|
||||
enc, e := json.Marshal(rec)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if e = bi.Put([]byte(k), enc); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
// Recreate index bucket
|
||||
if b := tx.Bucket([]byte("conv_index")); b != nil {
|
||||
if err = tx.DeleteBucket([]byte("conv_index")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
bx, errCreateBucket := tx.CreateBucket([]byte("conv_index"))
|
||||
if errCreateBucket != nil {
|
||||
return errCreateBucket
|
||||
}
|
||||
for k, v := range index {
|
||||
if e := bx.Put([]byte(k), []byte(v)); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// BuildConversationRecord constructs a ConversationRecord from history and the latest output.
|
||||
// Returns false when output is empty or has no candidates.
|
||||
func BuildConversationRecord(model, clientID string, history []RoleText, output *ModelOutput, metadata []string) (ConversationRecord, bool) {
|
||||
if output == nil || len(output.Candidates) == 0 {
|
||||
return ConversationRecord{}, false
|
||||
}
|
||||
text := ""
|
||||
if t := output.Candidates[0].Text; t != "" {
|
||||
text = RemoveThinkTags(t)
|
||||
}
|
||||
final := append([]RoleText{}, history...)
|
||||
final = append(final, RoleText{Role: "assistant", Text: text})
|
||||
rec := ConversationRecord{
|
||||
Model: model,
|
||||
ClientID: clientID,
|
||||
Metadata: metadata,
|
||||
Messages: ToStoredMessages(final),
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
return rec, true
|
||||
}
|
||||
|
||||
// FindByMessageListIn looks up a conversation record by hashed message list.
|
||||
// It attempts both the stable client ID and a legacy email-based ID.
|
||||
func FindByMessageListIn(items map[string]ConversationRecord, index map[string]string, stableClientID, email, model string, msgs []RoleText) (ConversationRecord, bool) {
|
||||
stored := ToStoredMessages(msgs)
|
||||
stableHash := HashConversation(stableClientID, model, stored)
|
||||
fallbackHash := HashConversation(email, model, stored)
|
||||
|
||||
// Try stable hash via index indirection first
|
||||
if key, ok := index["hash:"+stableHash]; ok {
|
||||
if rec, ok2 := items[key]; ok2 {
|
||||
return rec, true
|
||||
}
|
||||
}
|
||||
if rec, ok := items[stableHash]; ok {
|
||||
return rec, true
|
||||
}
|
||||
// Fallback to legacy hash (email-based)
|
||||
if key, ok := index["hash:"+fallbackHash]; ok {
|
||||
if rec, ok2 := items[key]; ok2 {
|
||||
return rec, true
|
||||
}
|
||||
}
|
||||
if rec, ok := items[fallbackHash]; ok {
|
||||
return rec, true
|
||||
}
|
||||
return ConversationRecord{}, false
|
||||
}
|
||||
|
||||
// FindConversationIn tries exact then sanitized assistant messages.
|
||||
func FindConversationIn(items map[string]ConversationRecord, index map[string]string, stableClientID, email, model string, msgs []RoleText) (ConversationRecord, bool) {
|
||||
if len(msgs) == 0 {
|
||||
return ConversationRecord{}, false
|
||||
}
|
||||
if rec, ok := FindByMessageListIn(items, index, stableClientID, email, model, msgs); ok {
|
||||
return rec, true
|
||||
}
|
||||
if rec, ok := FindByMessageListIn(items, index, stableClientID, email, model, SanitizeAssistantMessages(msgs)); ok {
|
||||
return rec, true
|
||||
}
|
||||
return ConversationRecord{}, false
|
||||
}
|
||||
|
||||
// FindReusableSessionIn returns reusable metadata and the remaining message suffix.
|
||||
func FindReusableSessionIn(items map[string]ConversationRecord, index map[string]string, stableClientID, email, model string, msgs []RoleText) ([]string, []RoleText) {
|
||||
if len(msgs) < 2 {
|
||||
return nil, nil
|
||||
}
|
||||
searchEnd := len(msgs)
|
||||
for searchEnd >= 2 {
|
||||
sub := msgs[:searchEnd]
|
||||
tail := sub[len(sub)-1]
|
||||
if strings.EqualFold(tail.Role, "assistant") || strings.EqualFold(tail.Role, "system") {
|
||||
if rec, ok := FindConversationIn(items, index, stableClientID, email, model, sub); ok {
|
||||
remain := msgs[searchEnd:]
|
||||
return rec.Metadata, remain
|
||||
}
|
||||
}
|
||||
searchEnd--
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -8,6 +8,14 @@ import "time"
|
||||
// GetClaudeModels returns the standard Claude model definitions
|
||||
func GetClaudeModels() []*ModelInfo {
|
||||
return []*ModelInfo{
|
||||
{
|
||||
ID: "claude-sonnet-4-5-20250929",
|
||||
Object: "model",
|
||||
Created: 1759104000, // 2025-09-29
|
||||
OwnedBy: "anthropic",
|
||||
Type: "claude",
|
||||
DisplayName: "Claude 4.5 Sonnet",
|
||||
},
|
||||
{
|
||||
ID: "claude-opus-4-1-20250805",
|
||||
Object: "model",
|
||||
@@ -96,6 +104,34 @@ func GetGeminiModels() []*ModelInfo {
|
||||
OutputTokenLimit: 65536,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
},
|
||||
{
|
||||
ID: "gemini-2.5-flash-image-preview",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-2.5-flash-image-preview",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini 2.5 Flash Image Preview",
|
||||
Description: "State-of-the-art image generation and editing model.",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 8192,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
},
|
||||
{
|
||||
ID: "gemini-2.5-flash-image",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-2.5-flash-image",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini 2.5 Flash Image",
|
||||
Description: "State-of-the-art image generation and editing model.",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 8192,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,6 +180,34 @@ func GetGeminiCLIModels() []*ModelInfo {
|
||||
OutputTokenLimit: 65536,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
},
|
||||
{
|
||||
ID: "gemini-2.5-flash-image-preview",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-2.5-flash-image-preview",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini 2.5 Flash Image Preview",
|
||||
Description: "State-of-the-art image generation and editing model.",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 8192,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
},
|
||||
{
|
||||
ID: "gemini-2.5-flash-image",
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: "google",
|
||||
Type: "gemini",
|
||||
Name: "models/gemini-2.5-flash-image",
|
||||
Version: "2.5",
|
||||
DisplayName: "Gemini 2.5 Flash Image",
|
||||
Description: "State-of-the-art image generation and editing model.",
|
||||
InputTokenLimit: 1048576,
|
||||
OutputTokenLimit: 8192,
|
||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,3 +378,46 @@ func GetQwenModels() []*ModelInfo {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetIFlowModels returns supported models for iFlow OAuth accounts.
|
||||
|
||||
func GetIFlowModels() []*ModelInfo {
|
||||
created := time.Now().Unix()
|
||||
entries := []struct {
|
||||
ID string
|
||||
DisplayName string
|
||||
Description string
|
||||
}{
|
||||
{ID: "tstars2.0", DisplayName: "TStars-2.0", Description: "iFlow TStars-2.0 multimodal assistant"},
|
||||
{ID: "qwen3-coder-plus", DisplayName: "Qwen3-Coder-Plus", Description: "Qwen3 Coder Plus code generation"},
|
||||
{ID: "qwen3-coder", DisplayName: "Qwen3-Coder-480B-A35B", Description: "Qwen3 Coder 480B A35B"},
|
||||
{ID: "qwen3-max", DisplayName: "Qwen3-Max", Description: "Qwen3 flagship model"},
|
||||
{ID: "qwen3-vl-plus", DisplayName: "Qwen3-VL-Plus", Description: "Qwen3 multimodal vision-language"},
|
||||
{ID: "qwen3-max-preview", DisplayName: "Qwen3-Max-Preview", Description: "Qwen3 Max preview build"},
|
||||
{ID: "kimi-k2-0905", DisplayName: "Kimi-K2-Instruct-0905", Description: "Moonshot Kimi K2 instruct 0905"},
|
||||
{ID: "glm-4.5", DisplayName: "GLM-4.5", Description: "Zhipu GLM 4.5 general model"},
|
||||
{ID: "glm-4.6", DisplayName: "GLM-4.6", Description: "Zhipu GLM 4.6 general model"},
|
||||
{ID: "kimi-k2", DisplayName: "Kimi-K2", Description: "Moonshot Kimi K2 general model"},
|
||||
{ID: "deepseek-v3.2", DisplayName: "DeepSeek-V3.2-Exp", Description: "DeepSeek V3.2 experimental"},
|
||||
{ID: "deepseek-v3.1", DisplayName: "DeepSeek-V3.1-Terminus", Description: "DeepSeek V3.1 Terminus"},
|
||||
{ID: "deepseek-r1", DisplayName: "DeepSeek-R1", Description: "DeepSeek reasoning model R1"},
|
||||
{ID: "deepseek-v3", DisplayName: "DeepSeek-V3-671B", Description: "DeepSeek V3 671B"},
|
||||
{ID: "qwen3-32b", DisplayName: "Qwen3-32B", Description: "Qwen3 32B"},
|
||||
{ID: "qwen3-235b-a22b-thinking-2507", DisplayName: "Qwen3-235B-A22B-Thinking", Description: "Qwen3 235B A22B Thinking (2507)"},
|
||||
{ID: "qwen3-235b-a22b-instruct", DisplayName: "Qwen3-235B-A22B-Instruct", Description: "Qwen3 235B A22B Instruct"},
|
||||
{ID: "qwen3-235b", DisplayName: "Qwen3-235B-A22B", Description: "Qwen3 235B A22B"},
|
||||
}
|
||||
models := make([]*ModelInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
models = append(models, &ModelInfo{
|
||||
ID: entry.ID,
|
||||
Object: "model",
|
||||
Created: created,
|
||||
OwnedBy: "iflow",
|
||||
Type: "iflow",
|
||||
DisplayName: entry.DisplayName,
|
||||
Description: entry.Description,
|
||||
})
|
||||
}
|
||||
return models
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
misc "github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -100,54 +101,265 @@ func (r *ModelRegistry) RegisterClient(clientID, clientProvider string, models [
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
|
||||
// Remove any existing registration for this client
|
||||
r.unregisterClientInternal(clientID)
|
||||
|
||||
provider := strings.ToLower(clientProvider)
|
||||
modelIDs := make([]string, 0, len(models))
|
||||
uniqueModelIDs := make([]string, 0, len(models))
|
||||
rawModelIDs := make([]string, 0, len(models))
|
||||
newModels := make(map[string]*ModelInfo, len(models))
|
||||
newCounts := make(map[string]int, len(models))
|
||||
for _, model := range models {
|
||||
if model == nil || model.ID == "" {
|
||||
continue
|
||||
}
|
||||
rawModelIDs = append(rawModelIDs, model.ID)
|
||||
newCounts[model.ID]++
|
||||
if _, exists := newModels[model.ID]; exists {
|
||||
continue
|
||||
}
|
||||
newModels[model.ID] = model
|
||||
uniqueModelIDs = append(uniqueModelIDs, model.ID)
|
||||
}
|
||||
|
||||
if len(uniqueModelIDs) == 0 {
|
||||
// No models supplied; unregister existing client state if present.
|
||||
r.unregisterClientInternal(clientID)
|
||||
delete(r.clientModels, clientID)
|
||||
delete(r.clientProviders, clientID)
|
||||
misc.LogCredentialSeparator()
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
for _, model := range models {
|
||||
modelIDs = append(modelIDs, model.ID)
|
||||
|
||||
if existing, exists := r.models[model.ID]; exists {
|
||||
// Model already exists, increment count
|
||||
existing.Count++
|
||||
existing.LastUpdated = now
|
||||
if existing.SuspendedClients == nil {
|
||||
existing.SuspendedClients = make(map[string]string)
|
||||
}
|
||||
if provider != "" {
|
||||
if existing.Providers == nil {
|
||||
existing.Providers = make(map[string]int)
|
||||
}
|
||||
existing.Providers[provider]++
|
||||
}
|
||||
log.Debugf("Incremented count for model %s, now %d clients", model.ID, existing.Count)
|
||||
oldModels, hadExisting := r.clientModels[clientID]
|
||||
oldProvider, _ := r.clientProviders[clientID]
|
||||
providerChanged := oldProvider != provider
|
||||
if !hadExisting {
|
||||
// Pure addition path.
|
||||
for _, modelID := range rawModelIDs {
|
||||
model := newModels[modelID]
|
||||
r.addModelRegistration(modelID, provider, model, now)
|
||||
}
|
||||
r.clientModels[clientID] = append([]string(nil), rawModelIDs...)
|
||||
if provider != "" {
|
||||
r.clientProviders[clientID] = provider
|
||||
} else {
|
||||
// New model, create registration
|
||||
registration := &ModelRegistration{
|
||||
Info: model,
|
||||
Count: 1,
|
||||
LastUpdated: now,
|
||||
QuotaExceededClients: make(map[string]*time.Time),
|
||||
SuspendedClients: make(map[string]string),
|
||||
}
|
||||
if provider != "" {
|
||||
registration.Providers = map[string]int{provider: 1}
|
||||
}
|
||||
r.models[model.ID] = registration
|
||||
log.Debugf("Registered new model %s from provider %s", model.ID, clientProvider)
|
||||
delete(r.clientProviders, clientID)
|
||||
}
|
||||
log.Debugf("Registered client %s from provider %s with %d models", clientID, clientProvider, len(rawModelIDs))
|
||||
misc.LogCredentialSeparator()
|
||||
return
|
||||
}
|
||||
|
||||
oldCounts := make(map[string]int, len(oldModels))
|
||||
for _, id := range oldModels {
|
||||
oldCounts[id]++
|
||||
}
|
||||
|
||||
added := make([]string, 0)
|
||||
for _, id := range uniqueModelIDs {
|
||||
if oldCounts[id] == 0 {
|
||||
added = append(added, id)
|
||||
}
|
||||
}
|
||||
|
||||
r.clientModels[clientID] = modelIDs
|
||||
removed := make([]string, 0)
|
||||
for id := range oldCounts {
|
||||
if newCounts[id] == 0 {
|
||||
removed = append(removed, id)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle provider change for overlapping models before modifications.
|
||||
if providerChanged && oldProvider != "" {
|
||||
for id, newCount := range newCounts {
|
||||
if newCount == 0 {
|
||||
continue
|
||||
}
|
||||
oldCount := oldCounts[id]
|
||||
if oldCount == 0 {
|
||||
continue
|
||||
}
|
||||
toRemove := newCount
|
||||
if oldCount < toRemove {
|
||||
toRemove = oldCount
|
||||
}
|
||||
if reg, ok := r.models[id]; ok && reg.Providers != nil {
|
||||
if count, okProv := reg.Providers[oldProvider]; okProv {
|
||||
if count <= toRemove {
|
||||
delete(reg.Providers, oldProvider)
|
||||
} else {
|
||||
reg.Providers[oldProvider] = count - toRemove
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply removals first to keep counters accurate.
|
||||
for _, id := range removed {
|
||||
oldCount := oldCounts[id]
|
||||
for i := 0; i < oldCount; i++ {
|
||||
r.removeModelRegistration(clientID, id, oldProvider, now)
|
||||
}
|
||||
}
|
||||
|
||||
for id, oldCount := range oldCounts {
|
||||
newCount := newCounts[id]
|
||||
if newCount == 0 || oldCount <= newCount {
|
||||
continue
|
||||
}
|
||||
overage := oldCount - newCount
|
||||
for i := 0; i < overage; i++ {
|
||||
r.removeModelRegistration(clientID, id, oldProvider, now)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply additions.
|
||||
for id, newCount := range newCounts {
|
||||
oldCount := oldCounts[id]
|
||||
if newCount <= oldCount {
|
||||
continue
|
||||
}
|
||||
model := newModels[id]
|
||||
diff := newCount - oldCount
|
||||
for i := 0; i < diff; i++ {
|
||||
r.addModelRegistration(id, provider, model, now)
|
||||
}
|
||||
}
|
||||
|
||||
// Update metadata for models that remain associated with the client.
|
||||
addedSet := make(map[string]struct{}, len(added))
|
||||
for _, id := range added {
|
||||
addedSet[id] = struct{}{}
|
||||
}
|
||||
for _, id := range uniqueModelIDs {
|
||||
model := newModels[id]
|
||||
if reg, ok := r.models[id]; ok {
|
||||
reg.Info = cloneModelInfo(model)
|
||||
reg.LastUpdated = now
|
||||
if reg.QuotaExceededClients != nil {
|
||||
delete(reg.QuotaExceededClients, clientID)
|
||||
}
|
||||
if reg.SuspendedClients != nil {
|
||||
delete(reg.SuspendedClients, clientID)
|
||||
}
|
||||
if providerChanged && provider != "" {
|
||||
if _, newlyAdded := addedSet[id]; newlyAdded {
|
||||
continue
|
||||
}
|
||||
overlapCount := newCounts[id]
|
||||
if oldCount := oldCounts[id]; oldCount < overlapCount {
|
||||
overlapCount = oldCount
|
||||
}
|
||||
if overlapCount <= 0 {
|
||||
continue
|
||||
}
|
||||
if reg.Providers == nil {
|
||||
reg.Providers = make(map[string]int)
|
||||
}
|
||||
reg.Providers[provider] += overlapCount
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update client bookkeeping.
|
||||
if len(rawModelIDs) > 0 {
|
||||
r.clientModels[clientID] = append([]string(nil), rawModelIDs...)
|
||||
}
|
||||
if provider != "" {
|
||||
r.clientProviders[clientID] = provider
|
||||
} else {
|
||||
delete(r.clientProviders, clientID)
|
||||
}
|
||||
log.Debugf("Registered client %s from provider %s with %d models", clientID, clientProvider, len(models))
|
||||
|
||||
if len(added) == 0 && len(removed) == 0 && !providerChanged {
|
||||
// Only metadata (e.g., display name) changed; skip separator when no log output.
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("Reconciled client %s (provider %s) models: +%d, -%d", clientID, provider, len(added), len(removed))
|
||||
misc.LogCredentialSeparator()
|
||||
}
|
||||
|
||||
func (r *ModelRegistry) addModelRegistration(modelID, provider string, model *ModelInfo, now time.Time) {
|
||||
if model == nil || modelID == "" {
|
||||
return
|
||||
}
|
||||
if existing, exists := r.models[modelID]; exists {
|
||||
existing.Count++
|
||||
existing.LastUpdated = now
|
||||
existing.Info = cloneModelInfo(model)
|
||||
if existing.SuspendedClients == nil {
|
||||
existing.SuspendedClients = make(map[string]string)
|
||||
}
|
||||
if provider != "" {
|
||||
if existing.Providers == nil {
|
||||
existing.Providers = make(map[string]int)
|
||||
}
|
||||
existing.Providers[provider]++
|
||||
}
|
||||
log.Debugf("Incremented count for model %s, now %d clients", modelID, existing.Count)
|
||||
return
|
||||
}
|
||||
|
||||
registration := &ModelRegistration{
|
||||
Info: cloneModelInfo(model),
|
||||
Count: 1,
|
||||
LastUpdated: now,
|
||||
QuotaExceededClients: make(map[string]*time.Time),
|
||||
SuspendedClients: make(map[string]string),
|
||||
}
|
||||
if provider != "" {
|
||||
registration.Providers = map[string]int{provider: 1}
|
||||
}
|
||||
r.models[modelID] = registration
|
||||
log.Debugf("Registered new model %s from provider %s", modelID, provider)
|
||||
}
|
||||
|
||||
func (r *ModelRegistry) removeModelRegistration(clientID, modelID, provider string, now time.Time) {
|
||||
registration, exists := r.models[modelID]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
registration.Count--
|
||||
registration.LastUpdated = now
|
||||
if registration.QuotaExceededClients != nil {
|
||||
delete(registration.QuotaExceededClients, clientID)
|
||||
}
|
||||
if registration.SuspendedClients != nil {
|
||||
delete(registration.SuspendedClients, clientID)
|
||||
}
|
||||
if registration.Count < 0 {
|
||||
registration.Count = 0
|
||||
}
|
||||
if provider != "" && registration.Providers != nil {
|
||||
if count, ok := registration.Providers[provider]; ok {
|
||||
if count <= 1 {
|
||||
delete(registration.Providers, provider)
|
||||
} else {
|
||||
registration.Providers[provider] = count - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Debugf("Decremented count for model %s, now %d clients", modelID, registration.Count)
|
||||
if registration.Count <= 0 {
|
||||
delete(r.models, modelID)
|
||||
log.Debugf("Removed model %s as no clients remain", modelID)
|
||||
}
|
||||
}
|
||||
|
||||
func cloneModelInfo(model *ModelInfo) *ModelInfo {
|
||||
if model == nil {
|
||||
return nil
|
||||
}
|
||||
copy := *model
|
||||
if len(model.SupportedGenerationMethods) > 0 {
|
||||
copy.SupportedGenerationMethods = append([]string(nil), model.SupportedGenerationMethods...)
|
||||
}
|
||||
if len(model.SupportedParameters) > 0 {
|
||||
copy.SupportedParameters = append([]string(nil), model.SupportedParameters...)
|
||||
}
|
||||
return ©
|
||||
}
|
||||
|
||||
// UnregisterClient removes a client and decrements counts for its models
|
||||
@@ -207,6 +419,8 @@ func (r *ModelRegistry) unregisterClientInternal(clientID string) {
|
||||
delete(r.clientProviders, clientID)
|
||||
}
|
||||
log.Debugf("Unregistered client %s", clientID)
|
||||
// Separator line after completing client unregistration (after the summary line)
|
||||
misc.LogCredentialSeparator()
|
||||
}
|
||||
|
||||
// SetModelQuotaExceeded marks a model as quota exceeded for a specific client
|
||||
|
||||
@@ -61,10 +61,7 @@ func (e *ClaudeExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, r
|
||||
}
|
||||
applyClaudeHeaders(httpReq, apiKey, false)
|
||||
|
||||
httpClient := &http.Client{}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
@@ -130,10 +127,7 @@ func (e *ClaudeExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
||||
}
|
||||
applyClaudeHeaders(httpReq, apiKey, true)
|
||||
|
||||
httpClient := &http.Client{Timeout: 0}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -150,8 +144,8 @@ func (e *ClaudeExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
||||
defer close(out)
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
buf := make([]byte, 1024*1024)
|
||||
scanner.Buffer(buf, 1024*1024)
|
||||
buf := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buf, 20_971_520)
|
||||
var param any
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
@@ -196,10 +190,7 @@ func (e *ClaudeExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Aut
|
||||
}
|
||||
applyClaudeHeaders(httpReq, apiKey, false)
|
||||
|
||||
httpClient := &http.Client{}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
@@ -284,6 +275,7 @@ func hasZSTDEcoding(contentEncoding string) bool {
|
||||
func applyClaudeHeaders(r *http.Request, apiKey string, stream bool) {
|
||||
r.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Anthropic-Beta", "claude-code-20250219,oauth-2025-04-20,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14")
|
||||
|
||||
var ginHeaders http.Header
|
||||
if ginCtx, ok := r.Context().Value("gin").(*gin.Context); ok && ginCtx != nil && ginCtx.Request != nil {
|
||||
@@ -292,7 +284,6 @@ func applyClaudeHeaders(r *http.Request, apiKey string, stream bool) {
|
||||
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Version", "2023-06-01")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Dangerous-Direct-Browser-Access", "true")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Beta", "claude-code-20250219,oauth-2025-04-20,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "X-App", "cli")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "X-Stainless-Helper-Method", "stream")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "X-Stainless-Retry-Count", "0")
|
||||
|
||||
@@ -54,8 +54,6 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
||||
if util.InArray([]string{"gpt-5", "gpt-5-minimal", "gpt-5-low", "gpt-5-medium", "gpt-5-high"}, req.Model) {
|
||||
body, _ = sjson.SetBytes(body, "model", "gpt-5")
|
||||
switch req.Model {
|
||||
case "gpt-5":
|
||||
body, _ = sjson.DeleteBytes(body, "reasoning.effort")
|
||||
case "gpt-5-minimal":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "minimal")
|
||||
case "gpt-5-low":
|
||||
@@ -68,8 +66,6 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
||||
} else if util.InArray([]string{"gpt-5-codex", "gpt-5-codex-low", "gpt-5-codex-medium", "gpt-5-codex-high"}, req.Model) {
|
||||
body, _ = sjson.SetBytes(body, "model", "gpt-5-codex")
|
||||
switch req.Model {
|
||||
case "gpt-5-codex":
|
||||
body, _ = sjson.DeleteBytes(body, "reasoning.effort")
|
||||
case "gpt-5-codex-low":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "low")
|
||||
case "gpt-5-codex-medium":
|
||||
@@ -80,6 +76,7 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
||||
}
|
||||
|
||||
body, _ = sjson.SetBytes(body, "stream", true)
|
||||
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
||||
|
||||
url := strings.TrimSuffix(baseURL, "/") + "/responses"
|
||||
recordAPIRequest(ctx, e.cfg, body)
|
||||
@@ -89,10 +86,7 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
||||
}
|
||||
applyCodexHeaders(httpReq, auth, apiKey)
|
||||
|
||||
httpClient := &http.Client{}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
@@ -147,8 +141,6 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
||||
if util.InArray([]string{"gpt-5", "gpt-5-minimal", "gpt-5-low", "gpt-5-medium", "gpt-5-high"}, req.Model) {
|
||||
body, _ = sjson.SetBytes(body, "model", "gpt-5")
|
||||
switch req.Model {
|
||||
case "gpt-5":
|
||||
body, _ = sjson.DeleteBytes(body, "reasoning.effort")
|
||||
case "gpt-5-minimal":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "minimal")
|
||||
case "gpt-5-low":
|
||||
@@ -161,8 +153,6 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
||||
} else if util.InArray([]string{"gpt-5-codex", "gpt-5-codex-low", "gpt-5-codex-medium", "gpt-5-codex-high"}, req.Model) {
|
||||
body, _ = sjson.SetBytes(body, "model", "gpt-5-codex")
|
||||
switch req.Model {
|
||||
case "gpt-5-codex":
|
||||
body, _ = sjson.DeleteBytes(body, "reasoning.effort")
|
||||
case "gpt-5-codex-low":
|
||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "low")
|
||||
case "gpt-5-codex-medium":
|
||||
@@ -172,6 +162,8 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
||||
}
|
||||
}
|
||||
|
||||
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
||||
|
||||
url := strings.TrimSuffix(baseURL, "/") + "/responses"
|
||||
recordAPIRequest(ctx, e.cfg, body)
|
||||
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body))
|
||||
@@ -180,10 +172,7 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
||||
}
|
||||
applyCodexHeaders(httpReq, auth, apiKey)
|
||||
|
||||
httpClient := &http.Client{Timeout: 0}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -200,8 +189,8 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
||||
defer close(out)
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
buf := make([]byte, 1024*1024)
|
||||
scanner.Buffer(buf, 1024*1024)
|
||||
buf := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buf, 20_971_520)
|
||||
var param any
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
@@ -51,7 +52,7 @@ func (e *GeminiCLIExecutor) Identifier() string { return "gemini-cli" }
|
||||
func (e *GeminiCLIExecutor) PrepareRequest(_ *http.Request, _ *cliproxyauth.Auth) error { return nil }
|
||||
|
||||
func (e *GeminiCLIExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||
tokenSource, baseTokenData, err := prepareGeminiCLITokenSource(ctx, auth)
|
||||
tokenSource, baseTokenData, err := prepareGeminiCLITokenSource(ctx, e.cfg, auth)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
}
|
||||
@@ -60,6 +61,7 @@ func (e *GeminiCLIExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth
|
||||
from := opts.SourceFormat
|
||||
to := sdktranslator.FromString("gemini-cli")
|
||||
basePayload := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||
basePayload = fixGeminiCLIImageAspectRatio(req.Model, basePayload)
|
||||
|
||||
action := "generateContent"
|
||||
if req.Metadata != nil {
|
||||
@@ -74,7 +76,7 @@ func (e *GeminiCLIExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth
|
||||
models = append([]string{req.Model}, models...)
|
||||
}
|
||||
|
||||
httpClient := newHTTPClient(ctx, 0)
|
||||
httpClient := newHTTPClient(ctx, e.cfg, auth, 0)
|
||||
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
||||
|
||||
var lastStatus int
|
||||
@@ -89,6 +91,7 @@ func (e *GeminiCLIExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth
|
||||
payload = setJSONField(payload, "project", projectID)
|
||||
payload = setJSONField(payload, "model", attemptModel)
|
||||
}
|
||||
payload = disableGeminiThinkingConfig(payload, attemptModel)
|
||||
|
||||
tok, errTok := tokenSource.Token()
|
||||
if errTok != nil {
|
||||
@@ -138,7 +141,7 @@ func (e *GeminiCLIExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth
|
||||
}
|
||||
|
||||
func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error) {
|
||||
tokenSource, baseTokenData, err := prepareGeminiCLITokenSource(ctx, auth)
|
||||
tokenSource, baseTokenData, err := prepareGeminiCLITokenSource(ctx, e.cfg, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -147,6 +150,7 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
||||
from := opts.SourceFormat
|
||||
to := sdktranslator.FromString("gemini-cli")
|
||||
basePayload := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
||||
basePayload = fixGeminiCLIImageAspectRatio(req.Model, basePayload)
|
||||
|
||||
projectID := strings.TrimSpace(stringValue(auth.Metadata, "project_id"))
|
||||
|
||||
@@ -155,7 +159,7 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
||||
models = append([]string{req.Model}, models...)
|
||||
}
|
||||
|
||||
httpClient := newHTTPClient(ctx, 0)
|
||||
httpClient := newHTTPClient(ctx, e.cfg, auth, 0)
|
||||
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
||||
|
||||
var lastStatus int
|
||||
@@ -165,6 +169,7 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
||||
payload := append([]byte(nil), basePayload...)
|
||||
payload = setJSONField(payload, "project", projectID)
|
||||
payload = setJSONField(payload, "model", attemptModel)
|
||||
payload = disableGeminiThinkingConfig(payload, attemptModel)
|
||||
|
||||
tok, errTok := tokenSource.Token()
|
||||
if errTok != nil {
|
||||
@@ -212,8 +217,8 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
if opts.Alt == "" {
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
buf := make([]byte, 1024*1024)
|
||||
scanner.Buffer(buf, 1024*1024)
|
||||
buf := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buf, 20_971_520)
|
||||
var param any
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
@@ -268,7 +273,7 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
||||
}
|
||||
|
||||
func (e *GeminiCLIExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||
tokenSource, baseTokenData, err := prepareGeminiCLITokenSource(ctx, auth)
|
||||
tokenSource, baseTokenData, err := prepareGeminiCLITokenSource(ctx, e.cfg, auth)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
}
|
||||
@@ -281,7 +286,7 @@ func (e *GeminiCLIExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.
|
||||
models = append([]string{req.Model}, models...)
|
||||
}
|
||||
|
||||
httpClient := newHTTPClient(ctx, 0)
|
||||
httpClient := newHTTPClient(ctx, e.cfg, auth, 0)
|
||||
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
||||
|
||||
var lastStatus int
|
||||
@@ -291,6 +296,8 @@ func (e *GeminiCLIExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.
|
||||
payload := sdktranslator.TranslateRequest(from, to, attemptModel, bytes.Clone(req.Payload), false)
|
||||
payload = deleteJSONField(payload, "project")
|
||||
payload = deleteJSONField(payload, "model")
|
||||
payload = disableGeminiThinkingConfig(payload, attemptModel)
|
||||
payload = fixGeminiCLIImageAspectRatio(attemptModel, payload)
|
||||
|
||||
tok, errTok := tokenSource.Token()
|
||||
if errTok != nil {
|
||||
@@ -348,7 +355,7 @@ func (e *GeminiCLIExecutor) Refresh(ctx context.Context, auth *cliproxyauth.Auth
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func prepareGeminiCLITokenSource(ctx context.Context, auth *cliproxyauth.Auth) (oauth2.TokenSource, map[string]any, error) {
|
||||
func prepareGeminiCLITokenSource(ctx context.Context, cfg *config.Config, auth *cliproxyauth.Auth) (oauth2.TokenSource, map[string]any, error) {
|
||||
if auth == nil || auth.Metadata == nil {
|
||||
return nil, nil, fmt.Errorf("gemini-cli auth metadata missing")
|
||||
}
|
||||
@@ -392,8 +399,8 @@ func prepareGeminiCLITokenSource(ctx context.Context, auth *cliproxyauth.Auth) (
|
||||
}
|
||||
|
||||
ctxToken := ctx
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
ctxToken = context.WithValue(ctxToken, oauth2.HTTPClient, &http.Client{Transport: rt})
|
||||
if httpClient := newProxyAwareHTTPClient(ctx, cfg, auth, 0); httpClient != nil {
|
||||
ctxToken = context.WithValue(ctxToken, oauth2.HTTPClient, httpClient)
|
||||
}
|
||||
|
||||
src := conf.TokenSource(ctxToken, &token)
|
||||
@@ -438,15 +445,8 @@ func updateGeminiCLITokenMetadata(auth *cliproxyauth.Auth, base map[string]any,
|
||||
auth.Metadata["token"] = merged
|
||||
}
|
||||
|
||||
func newHTTPClient(ctx context.Context, timeout time.Duration) *http.Client {
|
||||
client := &http.Client{}
|
||||
if timeout > 0 {
|
||||
client.Timeout = timeout
|
||||
}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
client.Transport = rt
|
||||
}
|
||||
return client
|
||||
func newHTTPClient(ctx context.Context, cfg *config.Config, auth *cliproxyauth.Auth, timeout time.Duration) *http.Client {
|
||||
return newProxyAwareHTTPClient(ctx, cfg, auth, timeout)
|
||||
}
|
||||
|
||||
func cloneMap(in map[string]any) map[string]any {
|
||||
@@ -507,6 +507,29 @@ func cliPreviewFallbackOrder(model string) []string {
|
||||
}
|
||||
}
|
||||
|
||||
func disableGeminiThinkingConfig(body []byte, model string) []byte {
|
||||
if !geminiModelDisallowsThinking(model) {
|
||||
return body
|
||||
}
|
||||
|
||||
updated := deleteJSONField(body, "request.generationConfig.thinkingConfig")
|
||||
updated = deleteJSONField(updated, "generationConfig.thinkingConfig")
|
||||
return updated
|
||||
}
|
||||
|
||||
func geminiModelDisallowsThinking(model string) bool {
|
||||
if model == "" {
|
||||
return false
|
||||
}
|
||||
lower := strings.ToLower(model)
|
||||
for _, marker := range []string{"gemini-2.5-flash-image-preview", "gemini-2.5-flash-image"} {
|
||||
if strings.Contains(lower, marker) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// setJSONField sets a top-level JSON field on a byte slice payload via sjson.
|
||||
func setJSONField(body []byte, key, value string) []byte {
|
||||
if key == "" {
|
||||
@@ -530,3 +553,45 @@ func deleteJSONField(body []byte, key string) []byte {
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
func fixGeminiCLIImageAspectRatio(modelName string, rawJSON []byte) []byte {
|
||||
if modelName == "gemini-2.5-flash-image-preview" {
|
||||
aspectRatioResult := gjson.GetBytes(rawJSON, "request.generationConfig.imageConfig.aspectRatio")
|
||||
if aspectRatioResult.Exists() {
|
||||
contents := gjson.GetBytes(rawJSON, "request.contents")
|
||||
contentArray := contents.Array()
|
||||
if len(contentArray) > 0 {
|
||||
hasInlineData := false
|
||||
loopContent:
|
||||
for i := 0; i < len(contentArray); i++ {
|
||||
parts := contentArray[i].Get("parts").Array()
|
||||
for j := 0; j < len(parts); j++ {
|
||||
if parts[j].Get("inlineData").Exists() {
|
||||
hasInlineData = true
|
||||
break loopContent
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !hasInlineData {
|
||||
emptyImageBase64ed, _ := util.CreateWhiteImageBase64(aspectRatioResult.String())
|
||||
emptyImagePart := `{"inlineData":{"mime_type":"image/png","data":""}}`
|
||||
emptyImagePart, _ = sjson.Set(emptyImagePart, "inlineData.data", emptyImageBase64ed)
|
||||
newPartsJson := `[]`
|
||||
newPartsJson, _ = sjson.SetRaw(newPartsJson, "-1", `{"text": "Based on the following requirements, create an image within the uploaded picture. The new content *MUST* completely cover the entire area of the original picture, maintaining its exact proportions, and *NO* blank areas should appear."}`)
|
||||
newPartsJson, _ = sjson.SetRaw(newPartsJson, "-1", emptyImagePart)
|
||||
|
||||
parts := contentArray[0].Get("parts").Array()
|
||||
for j := 0; j < len(parts); j++ {
|
||||
newPartsJson, _ = sjson.SetRaw(newPartsJson, "-1", parts[j].Raw)
|
||||
}
|
||||
|
||||
rawJSON, _ = sjson.SetRawBytes(rawJSON, "request.contents.0.parts", []byte(newPartsJson))
|
||||
rawJSON, _ = sjson.SetRawBytes(rawJSON, "request.generationConfig.responseModalities", []byte(`["Image", "Text"]`))
|
||||
}
|
||||
}
|
||||
rawJSON, _ = sjson.DeleteBytes(rawJSON, "request.generationConfig.imageConfig")
|
||||
}
|
||||
}
|
||||
return rawJSON
|
||||
}
|
||||
|
||||
@@ -77,6 +77,8 @@ func (e *GeminiExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, r
|
||||
from := opts.SourceFormat
|
||||
to := sdktranslator.FromString("gemini")
|
||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||
body = disableGeminiThinkingConfig(body, req.Model)
|
||||
body = fixGeminiImageAspectRatio(req.Model, body)
|
||||
|
||||
action := "generateContent"
|
||||
if req.Metadata != nil {
|
||||
@@ -103,10 +105,7 @@ func (e *GeminiExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, r
|
||||
httpReq.Header.Set("Authorization", "Bearer "+bearer)
|
||||
}
|
||||
|
||||
httpClient := &http.Client{}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
@@ -137,6 +136,8 @@ func (e *GeminiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
||||
from := opts.SourceFormat
|
||||
to := sdktranslator.FromString("gemini")
|
||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
||||
body = disableGeminiThinkingConfig(body, req.Model)
|
||||
body = fixGeminiImageAspectRatio(req.Model, body)
|
||||
|
||||
url := fmt.Sprintf("%s/%s/models/%s:%s", glEndpoint, glAPIVersion, req.Model, "streamGenerateContent")
|
||||
if opts.Alt == "" {
|
||||
@@ -159,10 +160,7 @@ func (e *GeminiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
||||
httpReq.Header.Set("Authorization", "Bearer "+bearer)
|
||||
}
|
||||
|
||||
httpClient := &http.Client{Timeout: 0}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -179,8 +177,8 @@ func (e *GeminiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
||||
defer close(out)
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
buf := make([]byte, 1024*1024)
|
||||
scanner.Buffer(buf, 1024*1024)
|
||||
buf := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buf, 20_971_520)
|
||||
var param any
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
@@ -210,6 +208,8 @@ func (e *GeminiExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Aut
|
||||
from := opts.SourceFormat
|
||||
to := sdktranslator.FromString("gemini")
|
||||
translatedReq := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||
translatedReq = disableGeminiThinkingConfig(translatedReq, req.Model)
|
||||
translatedReq = fixGeminiImageAspectRatio(req.Model, translatedReq)
|
||||
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
||||
translatedReq, _ = sjson.DeleteBytes(translatedReq, "tools")
|
||||
translatedReq, _ = sjson.DeleteBytes(translatedReq, "generationConfig")
|
||||
@@ -230,10 +230,7 @@ func (e *GeminiExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Aut
|
||||
httpReq.Header.Set("Authorization", "Bearer "+bearer)
|
||||
}
|
||||
|
||||
httpClient := &http.Client{}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
@@ -320,7 +317,7 @@ func (e *GeminiExecutor) Refresh(ctx context.Context, auth *cliproxyauth.Auth) (
|
||||
conf := &oauth2.Config{ClientID: clientID, ClientSecret: clientSecret, Endpoint: endpoint}
|
||||
|
||||
// Ensure proxy-aware HTTP client for token refresh
|
||||
httpClient := util.SetProxy(e.cfg, &http.Client{})
|
||||
httpClient := util.SetProxy(&e.cfg.SDKConfig, &http.Client{})
|
||||
ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
|
||||
|
||||
// Build base token
|
||||
@@ -380,3 +377,45 @@ func geminiCreds(a *cliproxyauth.Auth) (apiKey, bearer string) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fixGeminiImageAspectRatio(modelName string, rawJSON []byte) []byte {
|
||||
if modelName == "gemini-2.5-flash-image-preview" {
|
||||
aspectRatioResult := gjson.GetBytes(rawJSON, "generationConfig.imageConfig.aspectRatio")
|
||||
if aspectRatioResult.Exists() {
|
||||
contents := gjson.GetBytes(rawJSON, "contents")
|
||||
contentArray := contents.Array()
|
||||
if len(contentArray) > 0 {
|
||||
hasInlineData := false
|
||||
loopContent:
|
||||
for i := 0; i < len(contentArray); i++ {
|
||||
parts := contentArray[i].Get("parts").Array()
|
||||
for j := 0; j < len(parts); j++ {
|
||||
if parts[j].Get("inlineData").Exists() {
|
||||
hasInlineData = true
|
||||
break loopContent
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !hasInlineData {
|
||||
emptyImageBase64ed, _ := util.CreateWhiteImageBase64(aspectRatioResult.String())
|
||||
emptyImagePart := `{"inlineData":{"mime_type":"image/png","data":""}}`
|
||||
emptyImagePart, _ = sjson.Set(emptyImagePart, "inlineData.data", emptyImageBase64ed)
|
||||
newPartsJson := `[]`
|
||||
newPartsJson, _ = sjson.SetRaw(newPartsJson, "-1", `{"text": "Based on the following requirements, create an image within the uploaded picture. The new content *MUST* completely cover the entire area of the original picture, maintaining its exact proportions, and *NO* blank areas should appear."}`)
|
||||
newPartsJson, _ = sjson.SetRaw(newPartsJson, "-1", emptyImagePart)
|
||||
|
||||
parts := contentArray[0].Get("parts").Array()
|
||||
for j := 0; j < len(parts); j++ {
|
||||
newPartsJson, _ = sjson.SetRaw(newPartsJson, "-1", parts[j].Raw)
|
||||
}
|
||||
|
||||
rawJSON, _ = sjson.SetRawBytes(rawJSON, "contents.0.parts", []byte(newPartsJson))
|
||||
rawJSON, _ = sjson.SetRawBytes(rawJSON, "generationConfig.responseModalities", []byte(`["Image", "Text"]`))
|
||||
}
|
||||
}
|
||||
rawJSON, _ = sjson.DeleteBytes(rawJSON, "generationConfig.imageConfig")
|
||||
}
|
||||
}
|
||||
return rawJSON
|
||||
}
|
||||
|
||||
@@ -1,243 +0,0 @@
|
||||
package executor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
geminiwebapi "github.com/router-for-me/CLIProxyAPI/v6/internal/provider/gemini-web"
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type GeminiWebExecutor struct {
|
||||
cfg *config.Config
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewGeminiWebExecutor(cfg *config.Config) *GeminiWebExecutor {
|
||||
return &GeminiWebExecutor{cfg: cfg}
|
||||
}
|
||||
|
||||
func (e *GeminiWebExecutor) Identifier() string { return "gemini-web" }
|
||||
|
||||
func (e *GeminiWebExecutor) PrepareRequest(_ *http.Request, _ *cliproxyauth.Auth) error { return nil }
|
||||
|
||||
func (e *GeminiWebExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||
state, err := e.stateFor(auth)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
}
|
||||
if err = state.EnsureClient(); err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
}
|
||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||
|
||||
mutex := state.GetRequestMutex()
|
||||
if mutex != nil {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
}
|
||||
|
||||
payload := bytes.Clone(req.Payload)
|
||||
resp, errMsg, prep := state.Send(ctx, req.Model, payload, opts)
|
||||
if errMsg != nil {
|
||||
return cliproxyexecutor.Response{}, geminiWebErrorFromMessage(errMsg)
|
||||
}
|
||||
resp = state.ConvertToTarget(ctx, req.Model, prep, resp)
|
||||
reporter.publish(ctx, parseGeminiUsage(resp))
|
||||
|
||||
from := opts.SourceFormat
|
||||
to := sdktranslator.FromString("gemini-web")
|
||||
var param any
|
||||
out := sdktranslator.TranslateNonStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), payload, bytes.Clone(resp), ¶m)
|
||||
|
||||
return cliproxyexecutor.Response{Payload: []byte(out)}, nil
|
||||
}
|
||||
|
||||
func (e *GeminiWebExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error) {
|
||||
state, err := e.stateFor(auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = state.EnsureClient(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||
|
||||
mutex := state.GetRequestMutex()
|
||||
if mutex != nil {
|
||||
mutex.Lock()
|
||||
}
|
||||
|
||||
gemBytes, errMsg, prep := state.Send(ctx, req.Model, bytes.Clone(req.Payload), opts)
|
||||
if errMsg != nil {
|
||||
if mutex != nil {
|
||||
mutex.Unlock()
|
||||
}
|
||||
return nil, geminiWebErrorFromMessage(errMsg)
|
||||
}
|
||||
reporter.publish(ctx, parseGeminiUsage(gemBytes))
|
||||
|
||||
from := opts.SourceFormat
|
||||
to := sdktranslator.FromString("gemini-web")
|
||||
var param any
|
||||
|
||||
lines := state.ConvertStream(ctx, req.Model, prep, gemBytes)
|
||||
done := state.DoneStream(ctx, req.Model, prep)
|
||||
out := make(chan cliproxyexecutor.StreamChunk)
|
||||
go func() {
|
||||
defer close(out)
|
||||
if mutex != nil {
|
||||
defer mutex.Unlock()
|
||||
}
|
||||
for _, line := range lines {
|
||||
lines = sdktranslator.TranslateStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), req.Payload, bytes.Clone([]byte(line)), ¶m)
|
||||
for _, l := range lines {
|
||||
out <- cliproxyexecutor.StreamChunk{Payload: []byte(l)}
|
||||
}
|
||||
}
|
||||
for _, line := range done {
|
||||
lines = sdktranslator.TranslateStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), req.Payload, bytes.Clone([]byte(line)), ¶m)
|
||||
for _, l := range lines {
|
||||
out <- cliproxyexecutor.StreamChunk{Payload: []byte(l)}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (e *GeminiWebExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||
return cliproxyexecutor.Response{Payload: []byte{}}, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (e *GeminiWebExecutor) Refresh(ctx context.Context, auth *cliproxyauth.Auth) (*cliproxyauth.Auth, error) {
|
||||
log.Debugf("gemini web executor: refresh called")
|
||||
state, err := e.stateFor(auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = state.Refresh(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts := state.TokenSnapshot()
|
||||
if auth.Metadata == nil {
|
||||
auth.Metadata = make(map[string]any)
|
||||
}
|
||||
auth.Metadata["secure_1psid"] = ts.Secure1PSID
|
||||
auth.Metadata["secure_1psidts"] = ts.Secure1PSIDTS
|
||||
auth.Metadata["type"] = "gemini-web"
|
||||
auth.Metadata["last_refresh"] = time.Now().Format(time.RFC3339)
|
||||
if v, ok := auth.Metadata["label"].(string); !ok || strings.TrimSpace(v) == "" {
|
||||
if lbl := state.Label(); strings.TrimSpace(lbl) != "" {
|
||||
auth.Metadata["label"] = strings.TrimSpace(lbl)
|
||||
}
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
type geminiWebRuntime struct {
|
||||
state *geminiwebapi.GeminiWebState
|
||||
}
|
||||
|
||||
func (e *GeminiWebExecutor) stateFor(auth *cliproxyauth.Auth) (*geminiwebapi.GeminiWebState, error) {
|
||||
if auth == nil {
|
||||
return nil, fmt.Errorf("gemini-web executor: auth is nil")
|
||||
}
|
||||
if runtime, ok := auth.Runtime.(*geminiWebRuntime); ok && runtime != nil && runtime.state != nil {
|
||||
return runtime.state, nil
|
||||
}
|
||||
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
if runtime, ok := auth.Runtime.(*geminiWebRuntime); ok && runtime != nil && runtime.state != nil {
|
||||
return runtime.state, nil
|
||||
}
|
||||
|
||||
ts, err := parseGeminiWebToken(auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := e.cfg
|
||||
if auth.ProxyURL != "" && cfg != nil {
|
||||
copyCfg := *cfg
|
||||
copyCfg.ProxyURL = auth.ProxyURL
|
||||
cfg = ©Cfg
|
||||
}
|
||||
|
||||
storagePath := ""
|
||||
if auth.Attributes != nil {
|
||||
if p, ok := auth.Attributes["path"]; ok {
|
||||
storagePath = p
|
||||
}
|
||||
}
|
||||
state := geminiwebapi.NewGeminiWebState(cfg, ts, storagePath)
|
||||
runtime := &geminiWebRuntime{state: state}
|
||||
auth.Runtime = runtime
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func parseGeminiWebToken(auth *cliproxyauth.Auth) (*gemini.GeminiWebTokenStorage, error) {
|
||||
if auth == nil {
|
||||
return nil, fmt.Errorf("gemini-web executor: auth is nil")
|
||||
}
|
||||
if auth.Metadata == nil {
|
||||
return nil, fmt.Errorf("gemini-web executor: missing metadata")
|
||||
}
|
||||
psid := stringFromMetadata(auth.Metadata, "secure_1psid", "secure_1psid", "__Secure-1PSID")
|
||||
psidts := stringFromMetadata(auth.Metadata, "secure_1psidts", "secure_1psidts", "__Secure-1PSIDTS")
|
||||
if psid == "" || psidts == "" {
|
||||
return nil, fmt.Errorf("gemini-web executor: incomplete cookie metadata")
|
||||
}
|
||||
return &gemini.GeminiWebTokenStorage{Secure1PSID: psid, Secure1PSIDTS: psidts}, nil
|
||||
}
|
||||
|
||||
func stringFromMetadata(meta map[string]any, keys ...string) string {
|
||||
for _, key := range keys {
|
||||
if val, ok := meta[key]; ok {
|
||||
if s, okStr := val.(string); okStr && s != "" {
|
||||
return s
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func geminiWebErrorFromMessage(msg *interfaces.ErrorMessage) error {
|
||||
if msg == nil {
|
||||
return nil
|
||||
}
|
||||
return geminiWebError{message: msg}
|
||||
}
|
||||
|
||||
type geminiWebError struct {
|
||||
message *interfaces.ErrorMessage
|
||||
}
|
||||
|
||||
func (e geminiWebError) Error() string {
|
||||
if e.message == nil {
|
||||
return "gemini-web error"
|
||||
}
|
||||
if e.message.Error != nil {
|
||||
return e.message.Error.Error()
|
||||
}
|
||||
return fmt.Sprintf("gemini-web error: status %d", e.message.StatusCode)
|
||||
}
|
||||
|
||||
func (e geminiWebError) StatusCode() int {
|
||||
if e.message == nil {
|
||||
return 0
|
||||
}
|
||||
return e.message.StatusCode
|
||||
}
|
||||
261
internal/runtime/executor/iflow_executor.go
Normal file
261
internal/runtime/executor/iflow_executor.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package executor
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
iflowauth "github.com/router-for-me/CLIProxyAPI/v6/internal/auth/iflow"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
|
||||
const (
|
||||
iflowDefaultEndpoint = "/chat/completions"
|
||||
iflowUserAgent = "iFlow-Cli"
|
||||
)
|
||||
|
||||
// IFlowExecutor executes OpenAI-compatible chat completions against the iFlow API using API keys derived from OAuth.
|
||||
type IFlowExecutor struct {
|
||||
cfg *config.Config
|
||||
}
|
||||
|
||||
// NewIFlowExecutor constructs a new executor instance.
|
||||
func NewIFlowExecutor(cfg *config.Config) *IFlowExecutor { return &IFlowExecutor{cfg: cfg} }
|
||||
|
||||
// Identifier returns the provider key.
|
||||
func (e *IFlowExecutor) Identifier() string { return "iflow" }
|
||||
|
||||
// PrepareRequest implements ProviderExecutor but requires no preprocessing.
|
||||
func (e *IFlowExecutor) PrepareRequest(_ *http.Request, _ *cliproxyauth.Auth) error { return nil }
|
||||
|
||||
// Execute performs a non-streaming chat completion request.
|
||||
func (e *IFlowExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||
apiKey, baseURL := iflowCreds(auth)
|
||||
if strings.TrimSpace(apiKey) == "" {
|
||||
return cliproxyexecutor.Response{}, fmt.Errorf("iflow executor: missing api key")
|
||||
}
|
||||
if baseURL == "" {
|
||||
baseURL = iflowauth.DefaultAPIBaseURL
|
||||
}
|
||||
|
||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||
|
||||
from := opts.SourceFormat
|
||||
to := sdktranslator.FromString("openai")
|
||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||
|
||||
endpoint := strings.TrimSuffix(baseURL, "/") + iflowDefaultEndpoint
|
||||
recordAPIRequest(ctx, e.cfg, body)
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
}
|
||||
applyIFlowHeaders(httpReq, apiKey, false)
|
||||
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
appendAPIResponseChunk(ctx, e.cfg, b)
|
||||
log.Debugf("iflow request error: status %d body %s", resp.StatusCode, string(b))
|
||||
return cliproxyexecutor.Response{}, statusErr{code: resp.StatusCode, msg: string(b)}
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
}
|
||||
appendAPIResponseChunk(ctx, e.cfg, data)
|
||||
reporter.publish(ctx, parseOpenAIUsage(data))
|
||||
|
||||
var param any
|
||||
out := sdktranslator.TranslateNonStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), body, data, ¶m)
|
||||
return cliproxyexecutor.Response{Payload: []byte(out)}, nil
|
||||
}
|
||||
|
||||
// ExecuteStream performs a streaming chat completion request.
|
||||
func (e *IFlowExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error) {
|
||||
apiKey, baseURL := iflowCreds(auth)
|
||||
if strings.TrimSpace(apiKey) == "" {
|
||||
return nil, fmt.Errorf("iflow executor: missing api key")
|
||||
}
|
||||
if baseURL == "" {
|
||||
baseURL = iflowauth.DefaultAPIBaseURL
|
||||
}
|
||||
|
||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||
|
||||
from := opts.SourceFormat
|
||||
to := sdktranslator.FromString("openai")
|
||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
||||
|
||||
// Ensure tools array exists to avoid provider quirks similar to Qwen's behaviour.
|
||||
toolsResult := gjson.GetBytes(body, "tools")
|
||||
if toolsResult.Exists() && toolsResult.IsArray() && len(toolsResult.Array()) == 0 {
|
||||
body = ensureToolsArray(body)
|
||||
}
|
||||
|
||||
endpoint := strings.TrimSuffix(baseURL, "/") + iflowDefaultEndpoint
|
||||
recordAPIRequest(ctx, e.cfg, body)
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
applyIFlowHeaders(httpReq, apiKey, true)
|
||||
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
appendAPIResponseChunk(ctx, e.cfg, b)
|
||||
log.Debugf("iflow streaming error: status %d body %s", resp.StatusCode, string(b))
|
||||
return nil, statusErr{code: resp.StatusCode, msg: string(b)}
|
||||
}
|
||||
|
||||
out := make(chan cliproxyexecutor.StreamChunk)
|
||||
go func() {
|
||||
defer close(out)
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
buf := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buf, 20_971_520)
|
||||
var param any
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
appendAPIResponseChunk(ctx, e.cfg, line)
|
||||
if detail, ok := parseOpenAIStreamUsage(line); ok {
|
||||
reporter.publish(ctx, detail)
|
||||
}
|
||||
chunks := sdktranslator.TranslateStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), body, bytes.Clone(line), ¶m)
|
||||
for i := range chunks {
|
||||
out <- cliproxyexecutor.StreamChunk{Payload: []byte(chunks[i])}
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
out <- cliproxyexecutor.StreamChunk{Err: err}
|
||||
}
|
||||
}()
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// CountTokens is not implemented for iFlow.
|
||||
func (e *IFlowExecutor) CountTokens(context.Context, *cliproxyauth.Auth, cliproxyexecutor.Request, cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||
return cliproxyexecutor.Response{Payload: nil}, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// Refresh refreshes OAuth tokens and updates the stored API key.
|
||||
func (e *IFlowExecutor) Refresh(ctx context.Context, auth *cliproxyauth.Auth) (*cliproxyauth.Auth, error) {
|
||||
log.Debugf("iflow executor: refresh called")
|
||||
if auth == nil {
|
||||
return nil, fmt.Errorf("iflow executor: auth is nil")
|
||||
}
|
||||
|
||||
refreshToken := ""
|
||||
if auth.Metadata != nil {
|
||||
if v, ok := auth.Metadata["refresh_token"].(string); ok {
|
||||
refreshToken = strings.TrimSpace(v)
|
||||
}
|
||||
}
|
||||
if refreshToken == "" {
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
svc := iflowauth.NewIFlowAuth(e.cfg)
|
||||
tokenData, err := svc.RefreshTokens(ctx, refreshToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if auth.Metadata == nil {
|
||||
auth.Metadata = make(map[string]any)
|
||||
}
|
||||
auth.Metadata["access_token"] = tokenData.AccessToken
|
||||
if tokenData.RefreshToken != "" {
|
||||
auth.Metadata["refresh_token"] = tokenData.RefreshToken
|
||||
}
|
||||
if tokenData.APIKey != "" {
|
||||
auth.Metadata["api_key"] = tokenData.APIKey
|
||||
}
|
||||
auth.Metadata["expired"] = tokenData.Expire
|
||||
auth.Metadata["type"] = "iflow"
|
||||
auth.Metadata["last_refresh"] = time.Now().Format(time.RFC3339)
|
||||
|
||||
if auth.Attributes == nil {
|
||||
auth.Attributes = make(map[string]string)
|
||||
}
|
||||
if tokenData.APIKey != "" {
|
||||
auth.Attributes["api_key"] = tokenData.APIKey
|
||||
}
|
||||
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func applyIFlowHeaders(r *http.Request, apiKey string, stream bool) {
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
r.Header.Set("User-Agent", iflowUserAgent)
|
||||
if stream {
|
||||
r.Header.Set("Accept", "text/event-stream")
|
||||
} else {
|
||||
r.Header.Set("Accept", "application/json")
|
||||
}
|
||||
}
|
||||
|
||||
func iflowCreds(a *cliproxyauth.Auth) (apiKey, baseURL string) {
|
||||
if a == nil {
|
||||
return "", ""
|
||||
}
|
||||
if a.Attributes != nil {
|
||||
if v := strings.TrimSpace(a.Attributes["api_key"]); v != "" {
|
||||
apiKey = v
|
||||
}
|
||||
if v := strings.TrimSpace(a.Attributes["base_url"]); v != "" {
|
||||
baseURL = v
|
||||
}
|
||||
}
|
||||
if apiKey == "" && a.Metadata != nil {
|
||||
if v, ok := a.Metadata["api_key"].(string); ok {
|
||||
apiKey = strings.TrimSpace(v)
|
||||
}
|
||||
}
|
||||
if baseURL == "" && a.Metadata != nil {
|
||||
if v, ok := a.Metadata["base_url"].(string); ok {
|
||||
baseURL = strings.TrimSpace(v)
|
||||
}
|
||||
}
|
||||
return apiKey, baseURL
|
||||
}
|
||||
|
||||
func ensureToolsArray(body []byte) []byte {
|
||||
placeholder := `[{"type":"function","function":{"name":"noop","description":"Placeholder tool to stabilise streaming","parameters":{"type":"object"}}}]`
|
||||
updated, err := sjson.SetRawBytes(body, "tools", []byte(placeholder))
|
||||
if err != nil {
|
||||
return body
|
||||
}
|
||||
return updated
|
||||
}
|
||||
@@ -40,8 +40,8 @@ func (e *OpenAICompatExecutor) PrepareRequest(_ *http.Request, _ *cliproxyauth.A
|
||||
|
||||
func (e *OpenAICompatExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||
baseURL, apiKey := e.resolveCredentials(auth)
|
||||
if baseURL == "" || apiKey == "" {
|
||||
return cliproxyexecutor.Response{}, statusErr{code: http.StatusUnauthorized, msg: "missing provider baseURL or apiKey"}
|
||||
if baseURL == "" {
|
||||
return cliproxyexecutor.Response{}, statusErr{code: http.StatusUnauthorized, msg: "missing provider baseURL"}
|
||||
}
|
||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||
|
||||
@@ -60,13 +60,12 @@ func (e *OpenAICompatExecutor) Execute(ctx context.Context, auth *cliproxyauth.A
|
||||
return cliproxyexecutor.Response{}, err
|
||||
}
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
if apiKey != "" {
|
||||
httpReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
httpReq.Header.Set("User-Agent", "cli-proxy-openai-compat")
|
||||
|
||||
httpClient := &http.Client{}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
@@ -92,8 +91,8 @@ func (e *OpenAICompatExecutor) Execute(ctx context.Context, auth *cliproxyauth.A
|
||||
|
||||
func (e *OpenAICompatExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error) {
|
||||
baseURL, apiKey := e.resolveCredentials(auth)
|
||||
if baseURL == "" || apiKey == "" {
|
||||
return nil, statusErr{code: http.StatusUnauthorized, msg: "missing provider baseURL or apiKey"}
|
||||
if baseURL == "" {
|
||||
return nil, statusErr{code: http.StatusUnauthorized, msg: "missing provider baseURL"}
|
||||
}
|
||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||
from := opts.SourceFormat
|
||||
@@ -110,15 +109,14 @@ func (e *OpenAICompatExecutor) ExecuteStream(ctx context.Context, auth *cliproxy
|
||||
return nil, err
|
||||
}
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
if apiKey != "" {
|
||||
httpReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
httpReq.Header.Set("User-Agent", "cli-proxy-openai-compat")
|
||||
httpReq.Header.Set("Accept", "text/event-stream")
|
||||
httpReq.Header.Set("Cache-Control", "no-cache")
|
||||
|
||||
httpClient := &http.Client{Timeout: 0}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -135,8 +133,8 @@ func (e *OpenAICompatExecutor) ExecuteStream(ctx context.Context, auth *cliproxy
|
||||
defer close(out)
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
buf := make([]byte, 1024*1024)
|
||||
scanner.Buffer(buf, 1024*1024)
|
||||
buf := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buf, 20_971_520)
|
||||
var param any
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
@@ -177,8 +175,8 @@ func (e *OpenAICompatExecutor) resolveCredentials(auth *cliproxyauth.Auth) (base
|
||||
return "", ""
|
||||
}
|
||||
if auth.Attributes != nil {
|
||||
baseURL = auth.Attributes["base_url"]
|
||||
apiKey = auth.Attributes["api_key"]
|
||||
baseURL = strings.TrimSpace(auth.Attributes["base_url"])
|
||||
apiKey = strings.TrimSpace(auth.Attributes["api_key"])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
116
internal/runtime/executor/proxy_helpers.go
Normal file
116
internal/runtime/executor/proxy_helpers.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package executor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
// newProxyAwareHTTPClient creates an HTTP client with proper proxy configuration priority:
|
||||
// 1. Use auth.ProxyURL if configured (highest priority)
|
||||
// 2. Use cfg.ProxyURL if auth proxy is not configured
|
||||
// 3. Use RoundTripper from context if neither are configured
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: The context containing optional RoundTripper
|
||||
// - cfg: The application configuration
|
||||
// - auth: The authentication information
|
||||
// - timeout: The client timeout (0 means no timeout)
|
||||
//
|
||||
// Returns:
|
||||
// - *http.Client: An HTTP client with configured proxy or transport
|
||||
func newProxyAwareHTTPClient(ctx context.Context, cfg *config.Config, auth *cliproxyauth.Auth, timeout time.Duration) *http.Client {
|
||||
httpClient := &http.Client{}
|
||||
if timeout > 0 {
|
||||
httpClient.Timeout = timeout
|
||||
}
|
||||
|
||||
// Priority 1: Use auth.ProxyURL if configured
|
||||
var proxyURL string
|
||||
if auth != nil {
|
||||
proxyURL = strings.TrimSpace(auth.ProxyURL)
|
||||
}
|
||||
|
||||
// Priority 2: Use cfg.ProxyURL if auth proxy is not configured
|
||||
if proxyURL == "" && cfg != nil {
|
||||
proxyURL = strings.TrimSpace(cfg.ProxyURL)
|
||||
}
|
||||
|
||||
// If we have a proxy URL configured, set up the transport
|
||||
if proxyURL != "" {
|
||||
transport := buildProxyTransport(proxyURL)
|
||||
if transport != nil {
|
||||
httpClient.Transport = transport
|
||||
return httpClient
|
||||
}
|
||||
// If proxy setup failed, log and fall through to context RoundTripper
|
||||
log.Debugf("failed to setup proxy from URL: %s, falling back to context transport", proxyURL)
|
||||
}
|
||||
|
||||
// Priority 3: Use RoundTripper from context (typically from RoundTripperFor)
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
|
||||
return httpClient
|
||||
}
|
||||
|
||||
// buildProxyTransport creates an HTTP transport configured for the given proxy URL.
|
||||
// It supports SOCKS5, HTTP, and HTTPS proxy protocols.
|
||||
//
|
||||
// Parameters:
|
||||
// - proxyURL: The proxy URL string (e.g., "socks5://user:pass@host:port", "http://host:port")
|
||||
//
|
||||
// Returns:
|
||||
// - *http.Transport: A configured transport, or nil if the proxy URL is invalid
|
||||
func buildProxyTransport(proxyURL string) *http.Transport {
|
||||
if proxyURL == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
parsedURL, errParse := url.Parse(proxyURL)
|
||||
if errParse != nil {
|
||||
log.Errorf("parse proxy URL failed: %v", errParse)
|
||||
return nil
|
||||
}
|
||||
|
||||
var transport *http.Transport
|
||||
|
||||
// Handle different proxy schemes
|
||||
if parsedURL.Scheme == "socks5" {
|
||||
// Configure SOCKS5 proxy with optional authentication
|
||||
var proxyAuth *proxy.Auth
|
||||
if parsedURL.User != nil {
|
||||
username := parsedURL.User.Username()
|
||||
password, _ := parsedURL.User.Password()
|
||||
proxyAuth = &proxy.Auth{User: username, Password: password}
|
||||
}
|
||||
dialer, errSOCKS5 := proxy.SOCKS5("tcp", parsedURL.Host, proxyAuth, proxy.Direct)
|
||||
if errSOCKS5 != nil {
|
||||
log.Errorf("create SOCKS5 dialer failed: %v", errSOCKS5)
|
||||
return nil
|
||||
}
|
||||
// Set up a custom transport using the SOCKS5 dialer
|
||||
transport = &http.Transport{
|
||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return dialer.Dial(network, addr)
|
||||
},
|
||||
}
|
||||
} else if parsedURL.Scheme == "http" || parsedURL.Scheme == "https" {
|
||||
// Configure HTTP or HTTPS proxy
|
||||
transport = &http.Transport{Proxy: http.ProxyURL(parsedURL)}
|
||||
} else {
|
||||
log.Errorf("unsupported proxy scheme: %s", parsedURL.Scheme)
|
||||
return nil
|
||||
}
|
||||
|
||||
return transport
|
||||
}
|
||||
@@ -58,10 +58,7 @@ func (e *QwenExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req
|
||||
}
|
||||
applyQwenHeaders(httpReq, token, false)
|
||||
|
||||
httpClient := &http.Client{}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return cliproxyexecutor.Response{}, err
|
||||
@@ -112,10 +109,7 @@ func (e *QwenExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Aut
|
||||
}
|
||||
applyQwenHeaders(httpReq, token, true)
|
||||
|
||||
httpClient := &http.Client{Timeout: 0}
|
||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||
httpClient.Transport = rt
|
||||
}
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
resp, err := httpClient.Do(httpReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -132,8 +126,8 @@ func (e *QwenExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Aut
|
||||
defer close(out)
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
buf := make([]byte, 1024*1024)
|
||||
scanner.Buffer(buf, 1024*1024)
|
||||
buf := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buf, 20_971_520)
|
||||
var param any
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
|
||||
749
internal/store/gitstore.go
Normal file
749
internal/store/gitstore.go
Normal file
@@ -0,0 +1,749 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-git/v6"
|
||||
"github.com/go-git/go-git/v6/config"
|
||||
"github.com/go-git/go-git/v6/plumbing"
|
||||
"github.com/go-git/go-git/v6/plumbing/object"
|
||||
"github.com/go-git/go-git/v6/plumbing/transport"
|
||||
"github.com/go-git/go-git/v6/plumbing/transport/http"
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
// GitTokenStore persists token records and auth metadata using git as the backing storage.
|
||||
type GitTokenStore struct {
|
||||
mu sync.Mutex
|
||||
dirLock sync.RWMutex
|
||||
baseDir string
|
||||
repoDir string
|
||||
configDir string
|
||||
remote string
|
||||
username string
|
||||
password string
|
||||
}
|
||||
|
||||
// NewGitTokenStore creates a token store that saves credentials to disk through the
|
||||
// TokenStorage implementation embedded in the token record.
|
||||
func NewGitTokenStore(remote, username, password string) *GitTokenStore {
|
||||
return &GitTokenStore{
|
||||
remote: remote,
|
||||
username: username,
|
||||
password: password,
|
||||
}
|
||||
}
|
||||
|
||||
// SetBaseDir updates the default directory used for auth JSON persistence when no explicit path is provided.
|
||||
func (s *GitTokenStore) SetBaseDir(dir string) {
|
||||
clean := strings.TrimSpace(dir)
|
||||
if clean == "" {
|
||||
s.dirLock.Lock()
|
||||
s.baseDir = ""
|
||||
s.repoDir = ""
|
||||
s.configDir = ""
|
||||
s.dirLock.Unlock()
|
||||
return
|
||||
}
|
||||
if abs, err := filepath.Abs(clean); err == nil {
|
||||
clean = abs
|
||||
}
|
||||
repoDir := filepath.Dir(clean)
|
||||
if repoDir == "" || repoDir == "." {
|
||||
repoDir = clean
|
||||
}
|
||||
configDir := filepath.Join(repoDir, "config")
|
||||
s.dirLock.Lock()
|
||||
s.baseDir = clean
|
||||
s.repoDir = repoDir
|
||||
s.configDir = configDir
|
||||
s.dirLock.Unlock()
|
||||
}
|
||||
|
||||
// AuthDir returns the directory used for auth persistence.
|
||||
func (s *GitTokenStore) AuthDir() string {
|
||||
return s.baseDirSnapshot()
|
||||
}
|
||||
|
||||
// ConfigPath returns the managed config file path.
|
||||
func (s *GitTokenStore) ConfigPath() string {
|
||||
s.dirLock.RLock()
|
||||
defer s.dirLock.RUnlock()
|
||||
if s.configDir == "" {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(s.configDir, "config.yaml")
|
||||
}
|
||||
|
||||
// EnsureRepository prepares the local git working tree by cloning or opening the repository.
|
||||
func (s *GitTokenStore) EnsureRepository() error {
|
||||
s.dirLock.Lock()
|
||||
if s.remote == "" {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: remote not configured")
|
||||
}
|
||||
if s.baseDir == "" {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: base directory not configured")
|
||||
}
|
||||
repoDir := s.repoDir
|
||||
if repoDir == "" {
|
||||
repoDir = filepath.Dir(s.baseDir)
|
||||
if repoDir == "" || repoDir == "." {
|
||||
repoDir = s.baseDir
|
||||
}
|
||||
s.repoDir = repoDir
|
||||
}
|
||||
if s.configDir == "" {
|
||||
s.configDir = filepath.Join(repoDir, "config")
|
||||
}
|
||||
authDir := filepath.Join(repoDir, "auths")
|
||||
configDir := filepath.Join(repoDir, "config")
|
||||
gitDir := filepath.Join(repoDir, ".git")
|
||||
authMethod := s.gitAuth()
|
||||
var initPaths []string
|
||||
if _, err := os.Stat(gitDir); errors.Is(err, fs.ErrNotExist) {
|
||||
if errMk := os.MkdirAll(repoDir, 0o700); errMk != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: create repo dir: %w", errMk)
|
||||
}
|
||||
if _, errClone := git.PlainClone(repoDir, &git.CloneOptions{Auth: authMethod, URL: s.remote}); errClone != nil {
|
||||
if errors.Is(errClone, transport.ErrEmptyRemoteRepository) {
|
||||
_ = os.RemoveAll(gitDir)
|
||||
repo, errInit := git.PlainInit(repoDir, false)
|
||||
if errInit != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: init empty repo: %w", errInit)
|
||||
}
|
||||
if _, errRemote := repo.Remote("origin"); errRemote != nil {
|
||||
if _, errCreate := repo.CreateRemote(&config.RemoteConfig{
|
||||
Name: "origin",
|
||||
URLs: []string{s.remote},
|
||||
}); errCreate != nil && !errors.Is(errCreate, git.ErrRemoteExists) {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: configure remote: %w", errCreate)
|
||||
}
|
||||
}
|
||||
if err := os.MkdirAll(authDir, 0o700); err != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: create auth dir: %w", err)
|
||||
}
|
||||
if err := os.MkdirAll(configDir, 0o700); err != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: create config dir: %w", err)
|
||||
}
|
||||
if err := ensureEmptyFile(filepath.Join(authDir, ".gitkeep")); err != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: create auth placeholder: %w", err)
|
||||
}
|
||||
if err := ensureEmptyFile(filepath.Join(configDir, ".gitkeep")); err != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: create config placeholder: %w", err)
|
||||
}
|
||||
initPaths = []string{
|
||||
filepath.Join("auths", ".gitkeep"),
|
||||
filepath.Join("config", ".gitkeep"),
|
||||
}
|
||||
} else {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: clone remote: %w", errClone)
|
||||
}
|
||||
}
|
||||
} else if err != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: stat repo: %w", err)
|
||||
} else {
|
||||
repo, errOpen := git.PlainOpen(repoDir)
|
||||
if errOpen != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: open repo: %w", errOpen)
|
||||
}
|
||||
worktree, errWorktree := repo.Worktree()
|
||||
if errWorktree != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: worktree: %w", errWorktree)
|
||||
}
|
||||
if errPull := worktree.Pull(&git.PullOptions{Auth: authMethod, RemoteName: "origin"}); errPull != nil {
|
||||
switch {
|
||||
case errors.Is(errPull, git.NoErrAlreadyUpToDate),
|
||||
errors.Is(errPull, git.ErrUnstagedChanges),
|
||||
errors.Is(errPull, git.ErrNonFastForwardUpdate):
|
||||
// Ignore clean syncs, local edits, and remote divergence—local changes win.
|
||||
case errors.Is(errPull, transport.ErrAuthenticationRequired),
|
||||
errors.Is(errPull, plumbing.ErrReferenceNotFound),
|
||||
errors.Is(errPull, transport.ErrEmptyRemoteRepository):
|
||||
// Ignore authentication prompts and empty remote references on initial sync.
|
||||
default:
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: pull: %w", errPull)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := os.MkdirAll(s.baseDir, 0o700); err != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: create auth dir: %w", err)
|
||||
}
|
||||
if err := os.MkdirAll(s.configDir, 0o700); err != nil {
|
||||
s.dirLock.Unlock()
|
||||
return fmt.Errorf("git token store: create config dir: %w", err)
|
||||
}
|
||||
s.dirLock.Unlock()
|
||||
if len(initPaths) > 0 {
|
||||
s.mu.Lock()
|
||||
err := s.commitAndPushLocked("Initialize git token store", initPaths...)
|
||||
s.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save persists token storage and metadata to the resolved auth file path.
|
||||
func (s *GitTokenStore) Save(_ context.Context, auth *cliproxyauth.Auth) (string, error) {
|
||||
if auth == nil {
|
||||
return "", fmt.Errorf("auth filestore: auth is nil")
|
||||
}
|
||||
|
||||
path, err := s.resolveAuthPath(auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("auth filestore: missing file path attribute for %s", auth.ID)
|
||||
}
|
||||
|
||||
if auth.Disabled {
|
||||
if _, statErr := os.Stat(path); os.IsNotExist(statErr) {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
if err = s.EnsureRepository(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
||||
return "", fmt.Errorf("auth filestore: create dir failed: %w", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case auth.Storage != nil:
|
||||
if err = auth.Storage.SaveTokenToFile(path); err != nil {
|
||||
return "", err
|
||||
}
|
||||
case auth.Metadata != nil:
|
||||
raw, errMarshal := json.Marshal(auth.Metadata)
|
||||
if errMarshal != nil {
|
||||
return "", fmt.Errorf("auth filestore: marshal metadata failed: %w", errMarshal)
|
||||
}
|
||||
if existing, errRead := os.ReadFile(path); errRead == nil {
|
||||
if jsonEqual(existing, raw) {
|
||||
return path, nil
|
||||
}
|
||||
} else if !os.IsNotExist(errRead) {
|
||||
return "", fmt.Errorf("auth filestore: read existing failed: %w", errRead)
|
||||
}
|
||||
tmp := path + ".tmp"
|
||||
if errWrite := os.WriteFile(tmp, raw, 0o600); errWrite != nil {
|
||||
return "", fmt.Errorf("auth filestore: write temp failed: %w", errWrite)
|
||||
}
|
||||
if errRename := os.Rename(tmp, path); errRename != nil {
|
||||
return "", fmt.Errorf("auth filestore: rename failed: %w", errRename)
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("auth filestore: nothing to persist for %s", auth.ID)
|
||||
}
|
||||
|
||||
if auth.Attributes == nil {
|
||||
auth.Attributes = make(map[string]string)
|
||||
}
|
||||
auth.Attributes["path"] = path
|
||||
|
||||
if strings.TrimSpace(auth.FileName) == "" {
|
||||
auth.FileName = auth.ID
|
||||
}
|
||||
|
||||
relPath, errRel := s.relativeToRepo(path)
|
||||
if errRel != nil {
|
||||
return "", errRel
|
||||
}
|
||||
messageID := auth.ID
|
||||
if strings.TrimSpace(messageID) == "" {
|
||||
messageID = filepath.Base(path)
|
||||
}
|
||||
if errCommit := s.commitAndPushLocked(fmt.Sprintf("Update auth %s", strings.TrimSpace(messageID)), relPath); errCommit != nil {
|
||||
return "", errCommit
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// List enumerates all auth JSON files under the configured directory.
|
||||
func (s *GitTokenStore) List(_ context.Context) ([]*cliproxyauth.Auth, error) {
|
||||
if err := s.EnsureRepository(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir := s.baseDirSnapshot()
|
||||
if dir == "" {
|
||||
return nil, fmt.Errorf("auth filestore: directory not configured")
|
||||
}
|
||||
entries := make([]*cliproxyauth.Auth, 0)
|
||||
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, walkErr error) error {
|
||||
if walkErr != nil {
|
||||
return walkErr
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if !strings.HasSuffix(strings.ToLower(d.Name()), ".json") {
|
||||
return nil
|
||||
}
|
||||
auth, err := s.readAuthFile(path, dir)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if auth != nil {
|
||||
entries = append(entries, auth)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Delete removes the auth file.
|
||||
func (s *GitTokenStore) Delete(_ context.Context, id string) error {
|
||||
id = strings.TrimSpace(id)
|
||||
if id == "" {
|
||||
return fmt.Errorf("auth filestore: id is empty")
|
||||
}
|
||||
path, err := s.resolveDeletePath(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = s.EnsureRepository(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if err = os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("auth filestore: delete failed: %w", err)
|
||||
}
|
||||
if err == nil {
|
||||
rel, errRel := s.relativeToRepo(path)
|
||||
if errRel != nil {
|
||||
return errRel
|
||||
}
|
||||
messageID := id
|
||||
if errCommit := s.commitAndPushLocked(fmt.Sprintf("Delete auth %s", messageID), rel); errCommit != nil {
|
||||
return errCommit
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommitPaths commits and pushes the provided paths to the remote repository.
|
||||
// It no-ops when the store is not fully configured or when there are no paths.
|
||||
func (s *GitTokenStore) CommitPaths(_ context.Context, message string, paths ...string) error {
|
||||
if len(paths) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := s.EnsureRepository(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filtered := make([]string, 0, len(paths))
|
||||
for _, p := range paths {
|
||||
trimmed := strings.TrimSpace(p)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
rel, err := s.relativeToRepo(trimmed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filtered = append(filtered, rel)
|
||||
}
|
||||
if len(filtered) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if strings.TrimSpace(message) == "" {
|
||||
message = "Sync watcher updates"
|
||||
}
|
||||
return s.commitAndPushLocked(message, filtered...)
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) resolveDeletePath(id string) (string, error) {
|
||||
if strings.ContainsRune(id, os.PathSeparator) || filepath.IsAbs(id) {
|
||||
return id, nil
|
||||
}
|
||||
dir := s.baseDirSnapshot()
|
||||
if dir == "" {
|
||||
return "", fmt.Errorf("auth filestore: directory not configured")
|
||||
}
|
||||
return filepath.Join(dir, id), nil
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) readAuthFile(path, baseDir string) (*cliproxyauth.Auth, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read file: %w", err)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
metadata := make(map[string]any)
|
||||
if err = json.Unmarshal(data, &metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal auth json: %w", err)
|
||||
}
|
||||
provider, _ := metadata["type"].(string)
|
||||
if provider == "" {
|
||||
provider = "unknown"
|
||||
}
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat file: %w", err)
|
||||
}
|
||||
id := s.idFor(path, baseDir)
|
||||
auth := &cliproxyauth.Auth{
|
||||
ID: id,
|
||||
Provider: provider,
|
||||
FileName: id,
|
||||
Label: s.labelFor(metadata),
|
||||
Status: cliproxyauth.StatusActive,
|
||||
Attributes: map[string]string{"path": path},
|
||||
Metadata: metadata,
|
||||
CreatedAt: info.ModTime(),
|
||||
UpdatedAt: info.ModTime(),
|
||||
LastRefreshedAt: time.Time{},
|
||||
NextRefreshAfter: time.Time{},
|
||||
}
|
||||
if email, ok := metadata["email"].(string); ok && email != "" {
|
||||
auth.Attributes["email"] = email
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) idFor(path, baseDir string) string {
|
||||
if baseDir == "" {
|
||||
return path
|
||||
}
|
||||
rel, err := filepath.Rel(baseDir, path)
|
||||
if err != nil {
|
||||
return path
|
||||
}
|
||||
return rel
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) resolveAuthPath(auth *cliproxyauth.Auth) (string, error) {
|
||||
if auth == nil {
|
||||
return "", fmt.Errorf("auth filestore: auth is nil")
|
||||
}
|
||||
if auth.Attributes != nil {
|
||||
if p := strings.TrimSpace(auth.Attributes["path"]); p != "" {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
if fileName := strings.TrimSpace(auth.FileName); fileName != "" {
|
||||
if filepath.IsAbs(fileName) {
|
||||
return fileName, nil
|
||||
}
|
||||
if dir := s.baseDirSnapshot(); dir != "" {
|
||||
return filepath.Join(dir, fileName), nil
|
||||
}
|
||||
return fileName, nil
|
||||
}
|
||||
if auth.ID == "" {
|
||||
return "", fmt.Errorf("auth filestore: missing id")
|
||||
}
|
||||
if filepath.IsAbs(auth.ID) {
|
||||
return auth.ID, nil
|
||||
}
|
||||
dir := s.baseDirSnapshot()
|
||||
if dir == "" {
|
||||
return "", fmt.Errorf("auth filestore: directory not configured")
|
||||
}
|
||||
return filepath.Join(dir, auth.ID), nil
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) labelFor(metadata map[string]any) string {
|
||||
if metadata == nil {
|
||||
return ""
|
||||
}
|
||||
if v, ok := metadata["label"].(string); ok && v != "" {
|
||||
return v
|
||||
}
|
||||
if v, ok := metadata["email"].(string); ok && v != "" {
|
||||
return v
|
||||
}
|
||||
if project, ok := metadata["project_id"].(string); ok && project != "" {
|
||||
return project
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) baseDirSnapshot() string {
|
||||
s.dirLock.RLock()
|
||||
defer s.dirLock.RUnlock()
|
||||
return s.baseDir
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) repoDirSnapshot() string {
|
||||
s.dirLock.RLock()
|
||||
defer s.dirLock.RUnlock()
|
||||
return s.repoDir
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) gitAuth() transport.AuthMethod {
|
||||
if s.username == "" && s.password == "" {
|
||||
return nil
|
||||
}
|
||||
user := s.username
|
||||
if user == "" {
|
||||
user = "git"
|
||||
}
|
||||
return &http.BasicAuth{Username: user, Password: s.password}
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) relativeToRepo(path string) (string, error) {
|
||||
repoDir := s.repoDirSnapshot()
|
||||
if repoDir == "" {
|
||||
return "", fmt.Errorf("git token store: repository path not configured")
|
||||
}
|
||||
absRepo := repoDir
|
||||
if abs, err := filepath.Abs(repoDir); err == nil {
|
||||
absRepo = abs
|
||||
}
|
||||
cleanPath := path
|
||||
if abs, err := filepath.Abs(path); err == nil {
|
||||
cleanPath = abs
|
||||
}
|
||||
rel, err := filepath.Rel(absRepo, cleanPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("git token store: relative path: %w", err)
|
||||
}
|
||||
if rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return "", fmt.Errorf("git token store: path outside repository")
|
||||
}
|
||||
return rel, nil
|
||||
}
|
||||
|
||||
func (s *GitTokenStore) commitAndPushLocked(message string, relPaths ...string) error {
|
||||
repoDir := s.repoDirSnapshot()
|
||||
if repoDir == "" {
|
||||
return fmt.Errorf("git token store: repository path not configured")
|
||||
}
|
||||
repo, err := git.PlainOpen(repoDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("git token store: open repo: %w", err)
|
||||
}
|
||||
worktree, err := repo.Worktree()
|
||||
if err != nil {
|
||||
return fmt.Errorf("git token store: worktree: %w", err)
|
||||
}
|
||||
added := false
|
||||
for _, rel := range relPaths {
|
||||
if strings.TrimSpace(rel) == "" {
|
||||
continue
|
||||
}
|
||||
if _, err = worktree.Add(rel); err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
if _, errRemove := worktree.Remove(rel); errRemove != nil && !errors.Is(errRemove, os.ErrNotExist) {
|
||||
return fmt.Errorf("git token store: remove %s: %w", rel, errRemove)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("git token store: add %s: %w", rel, err)
|
||||
}
|
||||
}
|
||||
added = true
|
||||
}
|
||||
if !added {
|
||||
return nil
|
||||
}
|
||||
status, err := worktree.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("git token store: status: %w", err)
|
||||
}
|
||||
if status.IsClean() {
|
||||
return nil
|
||||
}
|
||||
if strings.TrimSpace(message) == "" {
|
||||
message = "Update auth store"
|
||||
}
|
||||
signature := &object.Signature{
|
||||
Name: "CLIProxyAPI",
|
||||
Email: "cliproxy@local",
|
||||
When: time.Now(),
|
||||
}
|
||||
commitHash, err := worktree.Commit(message, &git.CommitOptions{
|
||||
Author: signature,
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, git.ErrEmptyCommit) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("git token store: commit: %w", err)
|
||||
}
|
||||
headRef, errHead := repo.Head()
|
||||
if errHead != nil {
|
||||
if !errors.Is(errHead, plumbing.ErrReferenceNotFound) {
|
||||
return fmt.Errorf("git token store: get head: %w", errHead)
|
||||
}
|
||||
} else if errRewrite := s.rewriteHeadAsSingleCommit(repo, headRef.Name(), commitHash, message, signature); errRewrite != nil {
|
||||
return errRewrite
|
||||
}
|
||||
if err = repo.Push(&git.PushOptions{Auth: s.gitAuth(), Force: true}); err != nil {
|
||||
if errors.Is(err, git.NoErrAlreadyUpToDate) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("git token store: push: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rewriteHeadAsSingleCommit rewrites the current branch tip to a single-parentless commit and leaves history squashed.
|
||||
func (s *GitTokenStore) rewriteHeadAsSingleCommit(repo *git.Repository, branch plumbing.ReferenceName, commitHash plumbing.Hash, message string, signature *object.Signature) error {
|
||||
commitObj, err := repo.CommitObject(commitHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("git token store: inspect head commit: %w", err)
|
||||
}
|
||||
squashed := &object.Commit{
|
||||
Author: *signature,
|
||||
Committer: *signature,
|
||||
Message: message,
|
||||
TreeHash: commitObj.TreeHash,
|
||||
ParentHashes: nil,
|
||||
Encoding: commitObj.Encoding,
|
||||
ExtraHeaders: commitObj.ExtraHeaders,
|
||||
}
|
||||
mem := &plumbing.MemoryObject{}
|
||||
mem.SetType(plumbing.CommitObject)
|
||||
if err := squashed.Encode(mem); err != nil {
|
||||
return fmt.Errorf("git token store: encode squashed commit: %w", err)
|
||||
}
|
||||
newHash, err := repo.Storer.SetEncodedObject(mem)
|
||||
if err != nil {
|
||||
return fmt.Errorf("git token store: write squashed commit: %w", err)
|
||||
}
|
||||
if err := repo.Storer.SetReference(plumbing.NewHashReference(branch, newHash)); err != nil {
|
||||
return fmt.Errorf("git token store: update branch reference: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommitConfig commits and pushes configuration changes to git.
|
||||
func (s *GitTokenStore) CommitConfig(_ context.Context) error {
|
||||
if err := s.EnsureRepository(); err != nil {
|
||||
return err
|
||||
}
|
||||
configPath := s.ConfigPath()
|
||||
if configPath == "" {
|
||||
return fmt.Errorf("git token store: config path not configured")
|
||||
}
|
||||
if _, err := os.Stat(configPath); err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("git token store: stat config: %w", err)
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
rel, err := s.relativeToRepo(configPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.commitAndPushLocked("Update config", rel)
|
||||
}
|
||||
|
||||
func ensureEmptyFile(path string) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return os.WriteFile(path, []byte{}, 0o600)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func jsonEqual(a, b []byte) bool {
|
||||
var objA any
|
||||
var objB any
|
||||
if err := json.Unmarshal(a, &objA); err != nil {
|
||||
return false
|
||||
}
|
||||
if err := json.Unmarshal(b, &objB); err != nil {
|
||||
return false
|
||||
}
|
||||
return deepEqualJSON(objA, objB)
|
||||
}
|
||||
|
||||
func deepEqualJSON(a, b any) bool {
|
||||
switch valA := a.(type) {
|
||||
case map[string]any:
|
||||
valB, ok := b.(map[string]any)
|
||||
if !ok || len(valA) != len(valB) {
|
||||
return false
|
||||
}
|
||||
for key, subA := range valA {
|
||||
subB, ok1 := valB[key]
|
||||
if !ok1 || !deepEqualJSON(subA, subB) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case []any:
|
||||
sliceB, ok := b.([]any)
|
||||
if !ok || len(valA) != len(sliceB) {
|
||||
return false
|
||||
}
|
||||
for i := range valA {
|
||||
if !deepEqualJSON(valA[i], sliceB[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case float64:
|
||||
valB, ok := b.(float64)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return valA == valB
|
||||
case string:
|
||||
valB, ok := b.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return valA == valB
|
||||
case bool:
|
||||
valB, ok := b.(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return valA == valB
|
||||
case nil:
|
||||
return b == nil
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -8,15 +8,24 @@ package gemini
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
|
||||
var (
|
||||
user = ""
|
||||
account = ""
|
||||
session = ""
|
||||
)
|
||||
|
||||
// ConvertGeminiRequestToClaude parses and transforms a Gemini API request into Claude Code API format.
|
||||
// It extracts the model name, system instruction, message contents, and tool declarations
|
||||
// from the raw JSON request and returns them in the format expected by the Claude Code API.
|
||||
@@ -37,8 +46,23 @@ import (
|
||||
// - []byte: The transformed request data in Claude Code API format
|
||||
func ConvertGeminiRequestToClaude(modelName string, inputRawJSON []byte, stream bool) []byte {
|
||||
rawJSON := bytes.Clone(inputRawJSON)
|
||||
// Base Claude Code API template with default max_tokens value
|
||||
out := `{"model":"","max_tokens":32000,"messages":[]}`
|
||||
|
||||
if account == "" {
|
||||
u, _ := uuid.NewRandom()
|
||||
account = u.String()
|
||||
}
|
||||
if session == "" {
|
||||
u, _ := uuid.NewRandom()
|
||||
session = u.String()
|
||||
}
|
||||
if user == "" {
|
||||
sum := sha256.Sum256([]byte(account + session))
|
||||
user = hex.EncodeToString(sum[:])
|
||||
}
|
||||
userID := fmt.Sprintf("user_%s_account_%s_session_%s", user, account, session)
|
||||
|
||||
// Base Claude message payload
|
||||
out := fmt.Sprintf(`{"model":"","max_tokens":32000,"messages":[],"metadata":{"user_id":"%s"}}`, userID)
|
||||
|
||||
root := gjson.ParseBytes(rawJSON)
|
||||
|
||||
|
||||
@@ -331,8 +331,8 @@ func ConvertClaudeResponseToGeminiNonStream(_ context.Context, modelName string,
|
||||
streamingEvents := make([][]byte, 0)
|
||||
|
||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||
buffer := make([]byte, 10240*1024)
|
||||
scanner.Buffer(buffer, 10240*1024)
|
||||
buffer := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buffer, 20_971_520)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
// log.Debug(string(line))
|
||||
|
||||
@@ -8,14 +8,24 @@ package chat_completions
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
|
||||
var (
|
||||
user = ""
|
||||
account = ""
|
||||
session = ""
|
||||
)
|
||||
|
||||
// ConvertOpenAIRequestToClaude parses and transforms an OpenAI Chat Completions API request into Claude Code API format.
|
||||
// It extracts the model name, system instruction, message contents, and tool declarations
|
||||
// from the raw JSON request and returns them in the format expected by the Claude Code API.
|
||||
@@ -36,8 +46,22 @@ import (
|
||||
func ConvertOpenAIRequestToClaude(modelName string, inputRawJSON []byte, stream bool) []byte {
|
||||
rawJSON := bytes.Clone(inputRawJSON)
|
||||
|
||||
if account == "" {
|
||||
u, _ := uuid.NewRandom()
|
||||
account = u.String()
|
||||
}
|
||||
if session == "" {
|
||||
u, _ := uuid.NewRandom()
|
||||
session = u.String()
|
||||
}
|
||||
if user == "" {
|
||||
sum := sha256.Sum256([]byte(account + session))
|
||||
user = hex.EncodeToString(sum[:])
|
||||
}
|
||||
userID := fmt.Sprintf("user_%s_account_%s_session_%s", user, account, session)
|
||||
|
||||
// Base Claude Code API template with default max_tokens value
|
||||
out := `{"model":"","max_tokens":32000,"messages":[]}`
|
||||
out := fmt.Sprintf(`{"model":"","max_tokens":32000,"messages":[],"metadata":{"user_id":"%s"}}`, userID)
|
||||
|
||||
root := gjson.ParseBytes(rawJSON)
|
||||
|
||||
|
||||
@@ -3,13 +3,23 @@ package responses
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
|
||||
var (
|
||||
user = ""
|
||||
account = ""
|
||||
session = ""
|
||||
)
|
||||
|
||||
// ConvertOpenAIResponsesRequestToClaude transforms an OpenAI Responses API request
|
||||
// into a Claude Messages API request using only gjson/sjson for JSON handling.
|
||||
// It supports:
|
||||
@@ -23,8 +33,22 @@ import (
|
||||
func ConvertOpenAIResponsesRequestToClaude(modelName string, inputRawJSON []byte, stream bool) []byte {
|
||||
rawJSON := bytes.Clone(inputRawJSON)
|
||||
|
||||
if account == "" {
|
||||
u, _ := uuid.NewRandom()
|
||||
account = u.String()
|
||||
}
|
||||
if session == "" {
|
||||
u, _ := uuid.NewRandom()
|
||||
session = u.String()
|
||||
}
|
||||
if user == "" {
|
||||
sum := sha256.Sum256([]byte(account + session))
|
||||
user = hex.EncodeToString(sum[:])
|
||||
}
|
||||
userID := fmt.Sprintf("user_%s_account_%s_session_%s", user, account, session)
|
||||
|
||||
// Base Claude message payload
|
||||
out := `{"model":"","max_tokens":32000,"messages":[]}`
|
||||
out := fmt.Sprintf(`{"model":"","max_tokens":32000,"messages":[],"metadata":{"user_id":"%s"}}`, userID)
|
||||
|
||||
root := gjson.ParseBytes(rawJSON)
|
||||
|
||||
|
||||
@@ -32,6 +32,10 @@ type claudeToResponsesState struct {
|
||||
ReasoningBuf strings.Builder
|
||||
ReasoningPartAdded bool
|
||||
ReasoningIndex int
|
||||
// usage aggregation
|
||||
InputTokens int64
|
||||
OutputTokens int64
|
||||
UsageSeen bool
|
||||
}
|
||||
|
||||
var dataTag = []byte("data:")
|
||||
@@ -77,6 +81,19 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
||||
st.FuncArgsBuf = make(map[int]*strings.Builder)
|
||||
st.FuncNames = make(map[int]string)
|
||||
st.FuncCallIDs = make(map[int]string)
|
||||
st.InputTokens = 0
|
||||
st.OutputTokens = 0
|
||||
st.UsageSeen = false
|
||||
if usage := msg.Get("usage"); usage.Exists() {
|
||||
if v := usage.Get("input_tokens"); v.Exists() {
|
||||
st.InputTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
}
|
||||
if v := usage.Get("output_tokens"); v.Exists() {
|
||||
st.OutputTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
}
|
||||
}
|
||||
// response.created
|
||||
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null,"instructions":""}}`
|
||||
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
||||
@@ -227,7 +244,6 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
||||
out = append(out, emitEvent("response.output_item.done", itemDone))
|
||||
st.InFuncBlock = false
|
||||
} else if st.ReasoningActive {
|
||||
// close reasoning
|
||||
full := st.ReasoningBuf.String()
|
||||
textDone := `{"type":"response.reasoning_summary_text.done","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"text":""}`
|
||||
textDone, _ = sjson.Set(textDone, "sequence_number", nextSeq())
|
||||
@@ -244,7 +260,19 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
||||
st.ReasoningActive = false
|
||||
st.ReasoningPartAdded = false
|
||||
}
|
||||
case "message_delta":
|
||||
if usage := root.Get("usage"); usage.Exists() {
|
||||
if v := usage.Get("output_tokens"); v.Exists() {
|
||||
st.OutputTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
}
|
||||
if v := usage.Get("input_tokens"); v.Exists() {
|
||||
st.InputTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
}
|
||||
}
|
||||
case "message_stop":
|
||||
|
||||
completed := `{"type":"response.completed","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"completed","background":false,"error":null}}`
|
||||
completed, _ = sjson.Set(completed, "sequence_number", nextSeq())
|
||||
completed, _ = sjson.Set(completed, "response.id", st.ResponseID)
|
||||
@@ -381,6 +409,24 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
||||
if len(outputs) > 0 {
|
||||
completed, _ = sjson.Set(completed, "response.output", outputs)
|
||||
}
|
||||
|
||||
reasoningTokens := int64(0)
|
||||
if st.ReasoningBuf.Len() > 0 {
|
||||
reasoningTokens = int64(st.ReasoningBuf.Len() / 4)
|
||||
}
|
||||
usagePresent := st.UsageSeen || reasoningTokens > 0
|
||||
if usagePresent {
|
||||
completed, _ = sjson.Set(completed, "response.usage.input_tokens", st.InputTokens)
|
||||
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", 0)
|
||||
completed, _ = sjson.Set(completed, "response.usage.output_tokens", st.OutputTokens)
|
||||
if reasoningTokens > 0 {
|
||||
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", reasoningTokens)
|
||||
}
|
||||
total := st.InputTokens + st.OutputTokens
|
||||
if total > 0 || st.UsageSeen {
|
||||
completed, _ = sjson.Set(completed, "response.usage.total_tokens", total)
|
||||
}
|
||||
}
|
||||
out = append(out, emitEvent("response.completed", completed))
|
||||
}
|
||||
|
||||
@@ -399,8 +445,8 @@ func ConvertClaudeResponseToOpenAIResponsesNonStream(_ context.Context, _ string
|
||||
// Use a simple scanner to iterate through raw bytes
|
||||
// Note: extremely large responses may require increasing the buffer
|
||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||
buf := make([]byte, 10240*1024)
|
||||
scanner.Buffer(buf, 10240*1024)
|
||||
buf := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buf, 20_971_520)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
if !bytes.HasPrefix(line, dataTag) {
|
||||
|
||||
@@ -181,8 +181,8 @@ func ConvertCodexResponseToClaude(_ context.Context, _ string, originalRequestRa
|
||||
// - string: A Claude Code-compatible JSON response containing all message content and metadata
|
||||
func ConvertCodexResponseToClaudeNonStream(_ context.Context, _ string, originalRequestRawJSON, _ []byte, rawJSON []byte, _ *any) string {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||
buffer := make([]byte, 10240*1024)
|
||||
scanner.Buffer(buffer, 10240*1024)
|
||||
buffer := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buffer, 20_971_520)
|
||||
revNames := buildReverseMapFromClaudeOriginalShortToOriginal(originalRequestRawJSON)
|
||||
|
||||
for scanner.Scan() {
|
||||
|
||||
@@ -153,8 +153,8 @@ func ConvertCodexResponseToGemini(_ context.Context, modelName string, originalR
|
||||
// - string: A Gemini-compatible JSON response containing all message content and metadata
|
||||
func ConvertCodexResponseToGeminiNonStream(_ context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||
buffer := make([]byte, 10240*1024)
|
||||
scanner.Buffer(buffer, 10240*1024)
|
||||
buffer := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buffer, 20_971_520)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
// log.Debug(string(line))
|
||||
|
||||
@@ -17,6 +17,9 @@ func ConvertOpenAIResponsesRequestToCodex(modelName string, inputRawJSON []byte,
|
||||
rawJSON, _ = sjson.SetBytes(rawJSON, "store", false)
|
||||
rawJSON, _ = sjson.SetBytes(rawJSON, "parallel_tool_calls", true)
|
||||
rawJSON, _ = sjson.SetBytes(rawJSON, "include", []string{"reasoning.encrypted_content"})
|
||||
// Codex Responses rejects token limit fields, so strip them out before forwarding.
|
||||
rawJSON, _ = sjson.DeleteBytes(rawJSON, "max_output_tokens")
|
||||
rawJSON, _ = sjson.DeleteBytes(rawJSON, "max_completion_tokens")
|
||||
rawJSON, _ = sjson.DeleteBytes(rawJSON, "temperature")
|
||||
rawJSON, _ = sjson.DeleteBytes(rawJSON, "top_p")
|
||||
|
||||
@@ -31,9 +34,17 @@ func ConvertOpenAIResponsesRequestToCodex(modelName string, inputRawJSON []byte,
|
||||
}
|
||||
|
||||
inputResult := gjson.GetBytes(rawJSON, "input")
|
||||
inputResults := []gjson.Result{}
|
||||
if inputResult.Exists() && inputResult.IsArray() {
|
||||
inputResults = inputResult.Array()
|
||||
var inputResults []gjson.Result
|
||||
if inputResult.Exists() {
|
||||
if inputResult.IsArray() {
|
||||
inputResults = inputResult.Array()
|
||||
} else if inputResult.Type == gjson.String {
|
||||
newInput := `[{"type":"message","role":"user","content":[{"type":"input_text","text":""}]}]`
|
||||
newInput, _ = sjson.Set(newInput, "0.content.0.text", inputResult.String())
|
||||
inputResults = gjson.Parse(newInput).Array()
|
||||
}
|
||||
} else {
|
||||
inputResults = []gjson.Result{}
|
||||
}
|
||||
|
||||
extractedSystemInstructions := false
|
||||
|
||||
@@ -30,8 +30,8 @@ func ConvertCodexResponseToOpenAIResponses(ctx context.Context, modelName string
|
||||
// from a non-streaming OpenAI Chat Completions response.
|
||||
func ConvertCodexResponseToOpenAIResponsesNonStream(_ context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||
buffer := make([]byte, 10240*1024)
|
||||
scanner.Buffer(buffer, 10240*1024)
|
||||
buffer := make([]byte, 20_971_520)
|
||||
scanner.Buffer(buffer, 20_971_520)
|
||||
dataTag := []byte("data:")
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
|
||||
@@ -25,8 +25,22 @@ import (
|
||||
// Returns:
|
||||
// - []byte: The transformed request data in Gemini CLI API format
|
||||
func ConvertOpenAIRequestToGeminiCLI(modelName string, inputRawJSON []byte, _ bool) []byte {
|
||||
log.Debug("ConvertOpenAIRequestToGeminiCLI")
|
||||
rawJSON := bytes.Clone(inputRawJSON)
|
||||
var pathsToDelete []string
|
||||
root := gjson.ParseBytes(rawJSON)
|
||||
util.Walk(root, "", "additionalProperties", &pathsToDelete)
|
||||
util.Walk(root, "", "$schema", &pathsToDelete)
|
||||
util.Walk(root, "", "ref", &pathsToDelete)
|
||||
util.Walk(root, "", "strict", &pathsToDelete)
|
||||
|
||||
var err error
|
||||
for _, p := range pathsToDelete {
|
||||
rawJSON, err = sjson.DeleteBytes(rawJSON, p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Base envelope
|
||||
out := []byte(`{"project":"","request":{"contents":[],"generationConfig":{"thinkingConfig":{"include_thoughts":true}}},"model":"gemini-2.5-pro"}`)
|
||||
|
||||
@@ -66,6 +80,31 @@ func ConvertOpenAIRequestToGeminiCLI(modelName string, inputRawJSON []byte, _ bo
|
||||
out, _ = sjson.SetBytes(out, "request.generationConfig.topK", tkr.Num)
|
||||
}
|
||||
|
||||
// Map OpenAI modalities -> Gemini CLI request.generationConfig.responseModalities
|
||||
// e.g. "modalities": ["image", "text"] -> ["Image", "Text"]
|
||||
if mods := gjson.GetBytes(rawJSON, "modalities"); mods.Exists() && mods.IsArray() {
|
||||
var responseMods []string
|
||||
for _, m := range mods.Array() {
|
||||
switch strings.ToLower(m.String()) {
|
||||
case "text":
|
||||
responseMods = append(responseMods, "Text")
|
||||
case "image":
|
||||
responseMods = append(responseMods, "Image")
|
||||
}
|
||||
}
|
||||
if len(responseMods) > 0 {
|
||||
out, _ = sjson.SetBytes(out, "request.generationConfig.responseModalities", responseMods)
|
||||
}
|
||||
}
|
||||
|
||||
// OpenRouter-style image_config support
|
||||
// If the input uses top-level image_config.aspect_ratio, map it into request.generationConfig.imageConfig.aspectRatio.
|
||||
if imgCfg := gjson.GetBytes(rawJSON, "image_config"); imgCfg.Exists() && imgCfg.IsObject() {
|
||||
if ar := imgCfg.Get("aspect_ratio"); ar.Exists() && ar.Type == gjson.String {
|
||||
out, _ = sjson.SetBytes(out, "request.generationConfig.imageConfig.aspectRatio", ar.Str)
|
||||
}
|
||||
}
|
||||
|
||||
// messages -> systemInstruction + contents
|
||||
messages := gjson.GetBytes(rawJSON, "messages")
|
||||
if messages.IsArray() {
|
||||
@@ -233,7 +272,7 @@ func ConvertOpenAIRequestToGeminiCLI(modelName string, inputRawJSON []byte, _ bo
|
||||
}
|
||||
|
||||
var pathsToType []string
|
||||
root := gjson.ParseBytes(out)
|
||||
root = gjson.ParseBytes(out)
|
||||
util.Walk(root, "", "type", &pathsToType)
|
||||
for _, p := range pathsToType {
|
||||
typeResult := gjson.GetBytes(out, p)
|
||||
|
||||
@@ -8,6 +8,7 @@ package chat_completions
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
// convertCliResponseToOpenAIChatParams holds parameters for response conversion.
|
||||
type convertCliResponseToOpenAIChatParams struct {
|
||||
UnixTimestamp int64
|
||||
FunctionIndex int
|
||||
}
|
||||
|
||||
// ConvertCliResponseToOpenAI translates a single chunk of a streaming response from the
|
||||
@@ -39,6 +41,7 @@ func ConvertCliResponseToOpenAI(_ context.Context, _ string, originalRequestRawJ
|
||||
if *param == nil {
|
||||
*param = &convertCliResponseToOpenAIChatParams{
|
||||
UnixTimestamp: 0,
|
||||
FunctionIndex: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,6 +103,10 @@ func ConvertCliResponseToOpenAI(_ context.Context, _ string, originalRequestRawJ
|
||||
partResult := partResults[i]
|
||||
partTextResult := partResult.Get("text")
|
||||
functionCallResult := partResult.Get("functionCall")
|
||||
inlineDataResult := partResult.Get("inlineData")
|
||||
if !inlineDataResult.Exists() {
|
||||
inlineDataResult = partResult.Get("inline_data")
|
||||
}
|
||||
|
||||
if partTextResult.Exists() {
|
||||
// Handle text content, distinguishing between regular content and reasoning/thoughts.
|
||||
@@ -112,19 +119,52 @@ func ConvertCliResponseToOpenAI(_ context.Context, _ string, originalRequestRawJ
|
||||
} else if functionCallResult.Exists() {
|
||||
// Handle function call content.
|
||||
toolCallsResult := gjson.Get(template, "choices.0.delta.tool_calls")
|
||||
if !toolCallsResult.Exists() || !toolCallsResult.IsArray() {
|
||||
functionCallIndex := (*param).(*convertCliResponseToOpenAIChatParams).FunctionIndex
|
||||
(*param).(*convertCliResponseToOpenAIChatParams).FunctionIndex++
|
||||
if toolCallsResult.Exists() && toolCallsResult.IsArray() {
|
||||
functionCallIndex = len(toolCallsResult.Array())
|
||||
} else {
|
||||
template, _ = sjson.SetRaw(template, "choices.0.delta.tool_calls", `[]`)
|
||||
}
|
||||
|
||||
functionCallTemplate := `{"id": "","type": "function","function": {"name": "","arguments": ""}}`
|
||||
functionCallTemplate := `{"id": "","index": 0,"type": "function","function": {"name": "","arguments": ""}}`
|
||||
fcName := functionCallResult.Get("name").String()
|
||||
functionCallTemplate, _ = sjson.Set(functionCallTemplate, "id", fmt.Sprintf("%s-%d", fcName, time.Now().UnixNano()))
|
||||
functionCallTemplate, _ = sjson.Set(functionCallTemplate, "index", functionCallIndex)
|
||||
functionCallTemplate, _ = sjson.Set(functionCallTemplate, "function.name", fcName)
|
||||
if fcArgsResult := functionCallResult.Get("args"); fcArgsResult.Exists() {
|
||||
functionCallTemplate, _ = sjson.Set(functionCallTemplate, "function.arguments", fcArgsResult.Raw)
|
||||
}
|
||||
template, _ = sjson.Set(template, "choices.0.delta.role", "assistant")
|
||||
template, _ = sjson.SetRaw(template, "choices.0.delta.tool_calls.-1", functionCallTemplate)
|
||||
} else if inlineDataResult.Exists() {
|
||||
data := inlineDataResult.Get("data").String()
|
||||
if data == "" {
|
||||
continue
|
||||
}
|
||||
mimeType := inlineDataResult.Get("mimeType").String()
|
||||
if mimeType == "" {
|
||||
mimeType = inlineDataResult.Get("mime_type").String()
|
||||
}
|
||||
if mimeType == "" {
|
||||
mimeType = "image/png"
|
||||
}
|
||||
imageURL := fmt.Sprintf("data:%s;base64,%s", mimeType, data)
|
||||
imagePayload, err := json.Marshal(map[string]any{
|
||||
"type": "image_url",
|
||||
"image_url": map[string]string{
|
||||
"url": imageURL,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
imagesResult := gjson.Get(template, "choices.0.delta.images")
|
||||
if !imagesResult.Exists() || !imagesResult.IsArray() {
|
||||
template, _ = sjson.SetRaw(template, "choices.0.delta.images", `[]`)
|
||||
}
|
||||
template, _ = sjson.Set(template, "choices.0.delta.role", "assistant")
|
||||
template, _ = sjson.SetRaw(template, "choices.0.delta.images.-1", string(imagePayload))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
package chat_completions
|
||||
|
||||
import (
|
||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
geminiChat "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/chat-completions"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator"
|
||||
)
|
||||
|
||||
func init() {
|
||||
translator.Register(
|
||||
OpenAI,
|
||||
GeminiWeb,
|
||||
geminiChat.ConvertOpenAIRequestToGemini,
|
||||
interfaces.TranslateResponse{
|
||||
Stream: geminiChat.ConvertGeminiResponseToOpenAI,
|
||||
NonStream: geminiChat.ConvertGeminiResponseToOpenAINonStream,
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package responses
|
||||
|
||||
import (
|
||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
geminiResponses "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/responses"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator"
|
||||
)
|
||||
|
||||
func init() {
|
||||
translator.Register(
|
||||
OpenaiResponse,
|
||||
GeminiWeb,
|
||||
geminiResponses.ConvertOpenAIResponsesRequestToGemini,
|
||||
interfaces.TranslateResponse{
|
||||
Stream: geminiResponses.ConvertGeminiResponseToOpenAIResponses,
|
||||
NonStream: geminiResponses.ConvertGeminiResponseToOpenAIResponsesNonStream,
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -26,6 +26,21 @@ import (
|
||||
// - []byte: The transformed request data in Gemini API format
|
||||
func ConvertOpenAIRequestToGemini(modelName string, inputRawJSON []byte, _ bool) []byte {
|
||||
rawJSON := bytes.Clone(inputRawJSON)
|
||||
var pathsToDelete []string
|
||||
root := gjson.ParseBytes(rawJSON)
|
||||
util.Walk(root, "", "additionalProperties", &pathsToDelete)
|
||||
util.Walk(root, "", "$schema", &pathsToDelete)
|
||||
util.Walk(root, "", "ref", &pathsToDelete)
|
||||
util.Walk(root, "", "strict", &pathsToDelete)
|
||||
|
||||
var err error
|
||||
for _, p := range pathsToDelete {
|
||||
rawJSON, err = sjson.DeleteBytes(rawJSON, p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Base envelope
|
||||
out := []byte(`{"contents":[],"generationConfig":{"thinkingConfig":{"include_thoughts":true}}}`)
|
||||
|
||||
@@ -65,6 +80,31 @@ func ConvertOpenAIRequestToGemini(modelName string, inputRawJSON []byte, _ bool)
|
||||
out, _ = sjson.SetBytes(out, "generationConfig.topK", tkr.Num)
|
||||
}
|
||||
|
||||
// Map OpenAI modalities -> Gemini generationConfig.responseModalities
|
||||
// e.g. "modalities": ["image", "text"] -> ["Image", "Text"]
|
||||
if mods := gjson.GetBytes(rawJSON, "modalities"); mods.Exists() && mods.IsArray() {
|
||||
var responseMods []string
|
||||
for _, m := range mods.Array() {
|
||||
switch strings.ToLower(m.String()) {
|
||||
case "text":
|
||||
responseMods = append(responseMods, "Text")
|
||||
case "image":
|
||||
responseMods = append(responseMods, "Image")
|
||||
}
|
||||
}
|
||||
if len(responseMods) > 0 {
|
||||
out, _ = sjson.SetBytes(out, "generationConfig.responseModalities", responseMods)
|
||||
}
|
||||
}
|
||||
|
||||
// OpenRouter-style image_config support
|
||||
// If the input uses top-level image_config.aspect_ratio, map it into generationConfig.imageConfig.aspectRatio.
|
||||
if imgCfg := gjson.GetBytes(rawJSON, "image_config"); imgCfg.Exists() && imgCfg.IsObject() {
|
||||
if ar := imgCfg.Get("aspect_ratio"); ar.Exists() && ar.Type == gjson.String {
|
||||
out, _ = sjson.SetBytes(out, "generationConfig.imageConfig.aspectRatio", ar.Str)
|
||||
}
|
||||
}
|
||||
|
||||
// messages -> systemInstruction + contents
|
||||
messages := gjson.GetBytes(rawJSON, "messages")
|
||||
if messages.IsArray() {
|
||||
@@ -257,7 +297,7 @@ func ConvertOpenAIRequestToGemini(modelName string, inputRawJSON []byte, _ bool)
|
||||
}
|
||||
|
||||
var pathsToType []string
|
||||
root := gjson.ParseBytes(out)
|
||||
root = gjson.ParseBytes(out)
|
||||
util.Walk(root, "", "type", &pathsToType)
|
||||
for _, p := range pathsToType {
|
||||
typeResult := gjson.GetBytes(out, p)
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
// convertGeminiResponseToOpenAIChatParams holds parameters for response conversion.
|
||||
type convertGeminiResponseToOpenAIChatParams struct {
|
||||
UnixTimestamp int64
|
||||
FunctionIndex int
|
||||
}
|
||||
|
||||
// ConvertGeminiResponseToOpenAI translates a single chunk of a streaming response from the
|
||||
@@ -39,6 +40,7 @@ func ConvertGeminiResponseToOpenAI(_ context.Context, _ string, originalRequestR
|
||||
if *param == nil {
|
||||
*param = &convertGeminiResponseToOpenAIChatParams{
|
||||
UnixTimestamp: 0,
|
||||
FunctionIndex: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,13 +122,18 @@ func ConvertGeminiResponseToOpenAI(_ context.Context, _ string, originalRequestR
|
||||
} else if functionCallResult.Exists() {
|
||||
// Handle function call content.
|
||||
toolCallsResult := gjson.Get(template, "choices.0.delta.tool_calls")
|
||||
if !toolCallsResult.Exists() || !toolCallsResult.IsArray() {
|
||||
functionCallIndex := (*param).(*convertGeminiResponseToOpenAIChatParams).FunctionIndex
|
||||
(*param).(*convertGeminiResponseToOpenAIChatParams).FunctionIndex++
|
||||
if toolCallsResult.Exists() && toolCallsResult.IsArray() {
|
||||
functionCallIndex = len(toolCallsResult.Array())
|
||||
} else {
|
||||
template, _ = sjson.SetRaw(template, "choices.0.delta.tool_calls", `[]`)
|
||||
}
|
||||
|
||||
functionCallTemplate := `{"id": "","type": "function","function": {"name": "","arguments": ""}}`
|
||||
functionCallTemplate := `{"id": "","index": 0,"type": "function","function": {"name": "","arguments": ""}}`
|
||||
fcName := functionCallResult.Get("name").String()
|
||||
functionCallTemplate, _ = sjson.Set(functionCallTemplate, "id", fmt.Sprintf("%s-%d", fcName, time.Now().UnixNano()))
|
||||
functionCallTemplate, _ = sjson.Set(functionCallTemplate, "index", functionCallIndex)
|
||||
functionCallTemplate, _ = sjson.Set(functionCallTemplate, "function.name", fcName)
|
||||
if fcArgsResult := functionCallResult.Get("args"); fcArgsResult.Exists() {
|
||||
functionCallTemplate, _ = sjson.Set(functionCallTemplate, "function.arguments", fcArgsResult.Raw)
|
||||
|
||||
@@ -78,12 +78,21 @@ func ConvertGeminiResponseToOpenAIResponses(_ context.Context, modelName string,
|
||||
textDone, _ = sjson.Set(textDone, "output_index", st.ReasoningIndex)
|
||||
textDone, _ = sjson.Set(textDone, "text", full)
|
||||
out = append(out, emitEvent("response.reasoning_summary_text.done", textDone))
|
||||
|
||||
partDone := `{"type":"response.reasoning_summary_part.done","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"part":{"type":"summary_text","text":""}}`
|
||||
partDone, _ = sjson.Set(partDone, "sequence_number", nextSeq())
|
||||
partDone, _ = sjson.Set(partDone, "item_id", st.ReasoningItemID)
|
||||
partDone, _ = sjson.Set(partDone, "output_index", st.ReasoningIndex)
|
||||
partDone, _ = sjson.Set(partDone, "part.text", full)
|
||||
out = append(out, emitEvent("response.reasoning_summary_part.done", partDone))
|
||||
|
||||
itemDone := `{"type":"response.output_item.done","sequence_number":0,"output_index":0,"item":{"id":"","type":"reasoning","encrypted_content":"","summary":[{"type":"summary_text","text":""}]}}`
|
||||
itemDone, _ = sjson.Set(itemDone, "sequence_number", nextSeq())
|
||||
itemDone, _ = sjson.Set(itemDone, "item.id", st.ReasoningItemID)
|
||||
itemDone, _ = sjson.Set(itemDone, "output_index", st.ReasoningIndex)
|
||||
itemDone, _ = sjson.Set(itemDone, "item.summary.0.text", full)
|
||||
out = append(out, emitEvent("response.output_item.done", itemDone))
|
||||
|
||||
st.ReasoningClosed = true
|
||||
}
|
||||
|
||||
@@ -414,6 +423,25 @@ func ConvertGeminiResponseToOpenAIResponses(_ context.Context, modelName string,
|
||||
completed, _ = sjson.Set(completed, "response.output", outputs)
|
||||
}
|
||||
|
||||
// usage mapping
|
||||
if um := root.Get("usageMetadata"); um.Exists() {
|
||||
// input tokens = prompt + thoughts
|
||||
input := um.Get("promptTokenCount").Int() + um.Get("thoughtsTokenCount").Int()
|
||||
completed, _ = sjson.Set(completed, "response.usage.input_tokens", input)
|
||||
// cached_tokens not provided by Gemini; default to 0 for structure compatibility
|
||||
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", 0)
|
||||
// output tokens
|
||||
if v := um.Get("candidatesTokenCount"); v.Exists() {
|
||||
completed, _ = sjson.Set(completed, "response.usage.output_tokens", v.Int())
|
||||
}
|
||||
if v := um.Get("thoughtsTokenCount"); v.Exists() {
|
||||
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", v.Int())
|
||||
}
|
||||
if v := um.Get("totalTokenCount"); v.Exists() {
|
||||
completed, _ = sjson.Set(completed, "response.usage.total_tokens", v.Int())
|
||||
}
|
||||
}
|
||||
|
||||
out = append(out, emitEvent("response.completed", completed))
|
||||
}
|
||||
|
||||
|
||||
@@ -23,9 +23,6 @@ import (
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/chat-completions"
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini/openai/responses"
|
||||
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini-web/openai/chat-completions"
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/gemini-web/openai/responses"
|
||||
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/openai/claude"
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/openai/gemini"
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator/openai/gemini-cli"
|
||||
|
||||
@@ -32,6 +32,13 @@ type oaiToResponsesState struct {
|
||||
// function item done state
|
||||
FuncArgsDone map[int]bool
|
||||
FuncItemDone map[int]bool
|
||||
// usage aggregation
|
||||
PromptTokens int64
|
||||
CachedTokens int64
|
||||
CompletionTokens int64
|
||||
TotalTokens int64
|
||||
ReasoningTokens int64
|
||||
UsageSeen bool
|
||||
}
|
||||
|
||||
func emitRespEvent(event string, payload string) string {
|
||||
@@ -66,6 +73,35 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
|
||||
return []string{}
|
||||
}
|
||||
|
||||
if usage := root.Get("usage"); usage.Exists() {
|
||||
if v := usage.Get("prompt_tokens"); v.Exists() {
|
||||
st.PromptTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
}
|
||||
if v := usage.Get("prompt_tokens_details.cached_tokens"); v.Exists() {
|
||||
st.CachedTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
}
|
||||
if v := usage.Get("completion_tokens"); v.Exists() {
|
||||
st.CompletionTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
} else if v := usage.Get("output_tokens"); v.Exists() {
|
||||
st.CompletionTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
}
|
||||
if v := usage.Get("output_tokens_details.reasoning_tokens"); v.Exists() {
|
||||
st.ReasoningTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
} else if v := usage.Get("completion_tokens_details.reasoning_tokens"); v.Exists() {
|
||||
st.ReasoningTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
}
|
||||
if v := usage.Get("total_tokens"); v.Exists() {
|
||||
st.TotalTokens = v.Int()
|
||||
st.UsageSeen = true
|
||||
}
|
||||
}
|
||||
|
||||
nextSeq := func() int { st.Seq++; return st.Seq }
|
||||
var out []string
|
||||
|
||||
@@ -85,6 +121,12 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
|
||||
st.MsgItemDone = make(map[int]bool)
|
||||
st.FuncArgsDone = make(map[int]bool)
|
||||
st.FuncItemDone = make(map[int]bool)
|
||||
st.PromptTokens = 0
|
||||
st.CachedTokens = 0
|
||||
st.CompletionTokens = 0
|
||||
st.TotalTokens = 0
|
||||
st.ReasoningTokens = 0
|
||||
st.UsageSeen = false
|
||||
// response.created
|
||||
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null}}`
|
||||
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
||||
@@ -503,6 +545,19 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
|
||||
if len(outputs) > 0 {
|
||||
completed, _ = sjson.Set(completed, "response.output", outputs)
|
||||
}
|
||||
if st.UsageSeen {
|
||||
completed, _ = sjson.Set(completed, "response.usage.input_tokens", st.PromptTokens)
|
||||
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", st.CachedTokens)
|
||||
completed, _ = sjson.Set(completed, "response.usage.output_tokens", st.CompletionTokens)
|
||||
if st.ReasoningTokens > 0 {
|
||||
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", st.ReasoningTokens)
|
||||
}
|
||||
total := st.TotalTokens
|
||||
if total == 0 {
|
||||
total = st.PromptTokens + st.CompletionTokens
|
||||
}
|
||||
completed, _ = sjson.Set(completed, "response.usage.total_tokens", total)
|
||||
}
|
||||
out = append(out, emitRespEvent("response.completed", completed))
|
||||
}
|
||||
|
||||
|
||||
59
internal/util/image.go
Normal file
59
internal/util/image.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"image"
|
||||
"image/draw"
|
||||
"image/png"
|
||||
)
|
||||
|
||||
func CreateWhiteImageBase64(aspectRatio string) (string, error) {
|
||||
width := 1024
|
||||
height := 1024
|
||||
|
||||
switch aspectRatio {
|
||||
case "1:1":
|
||||
width = 1024
|
||||
height = 1024
|
||||
case "2:3":
|
||||
width = 832
|
||||
height = 1248
|
||||
case "3:2":
|
||||
width = 1248
|
||||
height = 832
|
||||
case "3:4":
|
||||
width = 864
|
||||
height = 1184
|
||||
case "4:3":
|
||||
width = 1184
|
||||
height = 864
|
||||
case "4:5":
|
||||
width = 896
|
||||
height = 1152
|
||||
case "5:4":
|
||||
width = 1152
|
||||
height = 896
|
||||
case "9:16":
|
||||
width = 768
|
||||
height = 1344
|
||||
case "16:9":
|
||||
width = 1344
|
||||
height = 768
|
||||
case "21:9":
|
||||
width = 1536
|
||||
height = 672
|
||||
}
|
||||
|
||||
img := image.NewRGBA(image.Rect(0, 0, width, height))
|
||||
draw.Draw(img, img.Bounds(), image.White, image.Point{}, draw.Src)
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
if err := png.Encode(&buf, img); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
base64String := base64.StdEncoding.EncodeToString(buf.Bytes())
|
||||
return base64String, nil
|
||||
}
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
//
|
||||
// Returns:
|
||||
// - []string: All provider identifiers capable of serving the model, ordered by preference.
|
||||
func GetProviderName(modelName string, cfg *config.Config) []string {
|
||||
func GetProviderName(modelName string) []string {
|
||||
if modelName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// SetProxy configures the provided HTTP client with proxy settings from the configuration.
|
||||
// It supports SOCKS5, HTTP, and HTTPS proxies. The function modifies the client's transport
|
||||
// to route requests through the configured proxy server.
|
||||
func SetProxy(cfg *config.Config, httpClient *http.Client) *http.Client {
|
||||
func SetProxy(cfg *config.SDKConfig, httpClient *http.Client) *http.Client {
|
||||
var transport *http.Transport
|
||||
// Attempt to parse the proxy URL from the configuration.
|
||||
proxyURL, errParse := url.Parse(cfg.ProxyURL)
|
||||
@@ -25,9 +25,12 @@ func SetProxy(cfg *config.Config, httpClient *http.Client) *http.Client {
|
||||
// Handle different proxy schemes.
|
||||
if proxyURL.Scheme == "socks5" {
|
||||
// Configure SOCKS5 proxy with optional authentication.
|
||||
username := proxyURL.User.Username()
|
||||
password, _ := proxyURL.User.Password()
|
||||
proxyAuth := &proxy.Auth{User: username, Password: password}
|
||||
var proxyAuth *proxy.Auth
|
||||
if proxyURL.User != nil {
|
||||
username := proxyURL.User.Username()
|
||||
password, _ := proxyURL.User.Password()
|
||||
proxyAuth = &proxy.Auth{User: username, Password: password}
|
||||
}
|
||||
dialer, errSOCKS5 := proxy.SOCKS5("tcp", proxyURL.Host, proxyAuth, proxy.Direct)
|
||||
if errSOCKS5 != nil {
|
||||
log.Errorf("create SOCKS5 dialer failed: %v", errSOCKS5)
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -30,23 +31,42 @@ func SetLogLevel(cfg *config.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// CountAuthFiles returns the number of JSON auth files located under the provided directory.
|
||||
// The function resolves leading tildes to the user's home directory and performs a case-insensitive
|
||||
// match on the ".json" suffix so that files saved with uppercase extensions are also counted.
|
||||
func CountAuthFiles(authDir string) int {
|
||||
// ResolveAuthDir normalizes the auth directory path for consistent reuse throughout the app.
|
||||
// It expands a leading tilde (~) to the user's home directory and returns a cleaned path.
|
||||
func ResolveAuthDir(authDir string) (string, error) {
|
||||
if authDir == "" {
|
||||
return 0
|
||||
return "", nil
|
||||
}
|
||||
if strings.HasPrefix(authDir, "~") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
log.Debugf("countAuthFiles: failed to resolve home directory: %v", err)
|
||||
return 0
|
||||
return "", fmt.Errorf("resolve auth dir: %w", err)
|
||||
}
|
||||
authDir = filepath.Join(home, authDir[1:])
|
||||
remainder := strings.TrimPrefix(authDir, "~")
|
||||
remainder = strings.TrimLeft(remainder, "/\\")
|
||||
if remainder == "" {
|
||||
return filepath.Clean(home), nil
|
||||
}
|
||||
normalized := strings.ReplaceAll(remainder, "\\", "/")
|
||||
return filepath.Clean(filepath.Join(home, filepath.FromSlash(normalized))), nil
|
||||
}
|
||||
return filepath.Clean(authDir), nil
|
||||
}
|
||||
|
||||
// CountAuthFiles returns the number of JSON auth files located under the provided directory.
|
||||
// The function resolves leading tildes to the user's home directory and performs a case-insensitive
|
||||
// match on the ".json" suffix so that files saved with uppercase extensions are also counted.
|
||||
func CountAuthFiles(authDir string) int {
|
||||
dir, err := ResolveAuthDir(authDir)
|
||||
if err != nil {
|
||||
log.Debugf("countAuthFiles: failed to resolve auth directory: %v", err)
|
||||
return 0
|
||||
}
|
||||
if dir == "" {
|
||||
return 0
|
||||
}
|
||||
count := 0
|
||||
walkErr := filepath.WalkDir(authDir, func(path string, d fs.DirEntry, err error) error {
|
||||
walkErr := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
log.Debugf("countAuthFiles: error accessing %s: %v", path, err)
|
||||
return nil
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -28,11 +29,18 @@ import (
|
||||
// "github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
// "github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
// gitCommitter captures the subset of git-backed token store capabilities used by the watcher.
|
||||
type gitCommitter interface {
|
||||
CommitConfig(ctx context.Context) error
|
||||
CommitPaths(ctx context.Context, message string, paths ...string) error
|
||||
}
|
||||
|
||||
// Watcher manages file watching for configuration and authentication files
|
||||
type Watcher struct {
|
||||
configPath string
|
||||
@@ -50,6 +58,40 @@ type Watcher struct {
|
||||
pendingUpdates map[string]AuthUpdate
|
||||
pendingOrder []string
|
||||
dispatchCancel context.CancelFunc
|
||||
gitCommitter gitCommitter
|
||||
}
|
||||
|
||||
type stableIDGenerator struct {
|
||||
counters map[string]int
|
||||
}
|
||||
|
||||
func newStableIDGenerator() *stableIDGenerator {
|
||||
return &stableIDGenerator{counters: make(map[string]int)}
|
||||
}
|
||||
|
||||
func (g *stableIDGenerator) next(kind string, parts ...string) (string, string) {
|
||||
if g == nil {
|
||||
return kind + ":000000000000", "000000000000"
|
||||
}
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(kind))
|
||||
for _, part := range parts {
|
||||
trimmed := strings.TrimSpace(part)
|
||||
hasher.Write([]byte{0})
|
||||
hasher.Write([]byte(trimmed))
|
||||
}
|
||||
digest := hex.EncodeToString(hasher.Sum(nil))
|
||||
if len(digest) < 12 {
|
||||
digest = fmt.Sprintf("%012s", digest)
|
||||
}
|
||||
short := digest[:12]
|
||||
key := kind + ":" + short
|
||||
index := g.counters[key]
|
||||
g.counters[key] = index + 1
|
||||
if index > 0 {
|
||||
short = fmt.Sprintf("%s-%d", short, index)
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", kind, short), short
|
||||
}
|
||||
|
||||
// AuthUpdateAction represents the type of change detected in auth sources.
|
||||
@@ -80,7 +122,6 @@ func NewWatcher(configPath, authDir string, reloadCallback func(*config.Config))
|
||||
if errNewWatcher != nil {
|
||||
return nil, errNewWatcher
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
configPath: configPath,
|
||||
authDir: authDir,
|
||||
@@ -89,6 +130,12 @@ func NewWatcher(configPath, authDir string, reloadCallback func(*config.Config))
|
||||
lastAuthHashes: make(map[string]string),
|
||||
}
|
||||
w.dispatchCond = sync.NewCond(&w.dispatchMu)
|
||||
if store := sdkAuth.GetTokenStore(); store != nil {
|
||||
if committer, ok := store.(gitCommitter); ok {
|
||||
w.gitCommitter = committer
|
||||
log.Debug("gitstore mode detected; watcher will commit changes to remote repository")
|
||||
}
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
@@ -112,7 +159,7 @@ func (w *Watcher) Start(ctx context.Context) error {
|
||||
go w.processEvents(ctx)
|
||||
|
||||
// Perform an initial full reload based on current config and auth dir
|
||||
w.reloadClients()
|
||||
w.reloadClients(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -302,6 +349,41 @@ func (w *Watcher) stopDispatch() {
|
||||
w.clientsMutex.Unlock()
|
||||
}
|
||||
|
||||
func (w *Watcher) commitConfigAsync() {
|
||||
if w == nil || w.gitCommitter == nil {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
if err := w.gitCommitter.CommitConfig(ctx); err != nil {
|
||||
log.Errorf("failed to commit config change: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (w *Watcher) commitAuthAsync(message string, paths ...string) {
|
||||
if w == nil || w.gitCommitter == nil {
|
||||
return
|
||||
}
|
||||
filtered := make([]string, 0, len(paths))
|
||||
for _, p := range paths {
|
||||
if trimmed := strings.TrimSpace(p); trimmed != "" {
|
||||
filtered = append(filtered, trimmed)
|
||||
}
|
||||
}
|
||||
if len(filtered) == 0 {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
if err := w.gitCommitter.CommitPaths(ctx, message, filtered...); err != nil {
|
||||
log.Errorf("failed to commit auth changes: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func authEqual(a, b *coreauth.Auth) bool {
|
||||
return reflect.DeepEqual(normalizeAuth(a), normalizeAuth(b))
|
||||
}
|
||||
@@ -396,9 +478,17 @@ func (w *Watcher) handleEvent(event fsnotify.Event) {
|
||||
}
|
||||
fmt.Printf("config file changed, reloading: %s\n", w.configPath)
|
||||
if w.reloadConfig() {
|
||||
finalHash := newHash
|
||||
if updatedData, errRead := os.ReadFile(w.configPath); errRead == nil && len(updatedData) > 0 {
|
||||
sumUpdated := sha256.Sum256(updatedData)
|
||||
finalHash = hex.EncodeToString(sumUpdated[:])
|
||||
} else if errRead != nil {
|
||||
log.WithError(errRead).Debug("failed to compute updated config hash after reload")
|
||||
}
|
||||
w.clientsMutex.Lock()
|
||||
w.lastConfigHash = newHash
|
||||
w.lastConfigHash = finalHash
|
||||
w.clientsMutex.Unlock()
|
||||
w.commitConfigAsync()
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -422,6 +512,7 @@ func (w *Watcher) handleEvent(event fsnotify.Event) {
|
||||
|
||||
// reloadConfig reloads the configuration and triggers a full reload
|
||||
func (w *Watcher) reloadConfig() bool {
|
||||
log.Debug("=========================== CONFIG RELOAD ============================")
|
||||
log.Debugf("starting config reload from: %s", w.configPath)
|
||||
|
||||
newConfig, errLoadConfig := config.LoadConfig(w.configPath)
|
||||
@@ -430,6 +521,12 @@ func (w *Watcher) reloadConfig() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if resolvedAuthDir, errResolveAuthDir := util.ResolveAuthDir(newConfig.AuthDir); errResolveAuthDir != nil {
|
||||
log.Errorf("failed to resolve auth directory from config: %v", errResolveAuthDir)
|
||||
} else {
|
||||
newConfig.AuthDir = resolvedAuthDir
|
||||
}
|
||||
|
||||
w.clientsMutex.Lock()
|
||||
oldConfig := w.config
|
||||
w.config = newConfig
|
||||
@@ -464,18 +561,6 @@ func (w *Watcher) reloadConfig() bool {
|
||||
if oldConfig.RequestRetry != newConfig.RequestRetry {
|
||||
log.Debugf(" request-retry: %d -> %d", oldConfig.RequestRetry, newConfig.RequestRetry)
|
||||
}
|
||||
if oldConfig.GeminiWeb.Context != newConfig.GeminiWeb.Context {
|
||||
log.Debugf(" gemini-web.context: %t -> %t", oldConfig.GeminiWeb.Context, newConfig.GeminiWeb.Context)
|
||||
}
|
||||
if oldConfig.GeminiWeb.MaxCharsPerRequest != newConfig.GeminiWeb.MaxCharsPerRequest {
|
||||
log.Debugf(" gemini-web.max-chars-per-request: %d -> %d", oldConfig.GeminiWeb.MaxCharsPerRequest, newConfig.GeminiWeb.MaxCharsPerRequest)
|
||||
}
|
||||
if oldConfig.GeminiWeb.DisableContinuationHint != newConfig.GeminiWeb.DisableContinuationHint {
|
||||
log.Debugf(" gemini-web.disable-continuation-hint: %t -> %t", oldConfig.GeminiWeb.DisableContinuationHint, newConfig.GeminiWeb.DisableContinuationHint)
|
||||
}
|
||||
if oldConfig.GeminiWeb.CodeMode != newConfig.GeminiWeb.CodeMode {
|
||||
log.Debugf(" gemini-web.code-mode: %t -> %t", oldConfig.GeminiWeb.CodeMode, newConfig.GeminiWeb.CodeMode)
|
||||
}
|
||||
if len(oldConfig.APIKeys) != len(newConfig.APIKeys) {
|
||||
log.Debugf(" api-keys count: %d -> %d", len(oldConfig.APIKeys), len(newConfig.APIKeys))
|
||||
}
|
||||
@@ -491,23 +576,49 @@ func (w *Watcher) reloadConfig() bool {
|
||||
if oldConfig.RemoteManagement.AllowRemote != newConfig.RemoteManagement.AllowRemote {
|
||||
log.Debugf(" remote-management.allow-remote: %t -> %t", oldConfig.RemoteManagement.AllowRemote, newConfig.RemoteManagement.AllowRemote)
|
||||
}
|
||||
if oldConfig.RemoteManagement.SecretKey != newConfig.RemoteManagement.SecretKey {
|
||||
switch {
|
||||
case oldConfig.RemoteManagement.SecretKey == "" && newConfig.RemoteManagement.SecretKey != "":
|
||||
log.Debug(" remote-management.secret-key: created")
|
||||
case oldConfig.RemoteManagement.SecretKey != "" && newConfig.RemoteManagement.SecretKey == "":
|
||||
log.Debug(" remote-management.secret-key: deleted")
|
||||
default:
|
||||
log.Debug(" remote-management.secret-key: updated")
|
||||
}
|
||||
if newConfig.RemoteManagement.SecretKey == "" {
|
||||
log.Info("management routes will be disabled after secret key removal")
|
||||
} else {
|
||||
log.Info("management routes will be enabled after secret key update")
|
||||
}
|
||||
}
|
||||
if oldConfig.RemoteManagement.DisableControlPanel != newConfig.RemoteManagement.DisableControlPanel {
|
||||
log.Debugf(" remote-management.disable-control-panel: %t -> %t", oldConfig.RemoteManagement.DisableControlPanel, newConfig.RemoteManagement.DisableControlPanel)
|
||||
}
|
||||
if oldConfig.LoggingToFile != newConfig.LoggingToFile {
|
||||
log.Debugf(" logging-to-file: %t -> %t", oldConfig.LoggingToFile, newConfig.LoggingToFile)
|
||||
}
|
||||
if oldConfig.UsageStatisticsEnabled != newConfig.UsageStatisticsEnabled {
|
||||
log.Debugf(" usage-statistics-enabled: %t -> %t", oldConfig.UsageStatisticsEnabled, newConfig.UsageStatisticsEnabled)
|
||||
}
|
||||
if changes := diffOpenAICompatibility(oldConfig.OpenAICompatibility, newConfig.OpenAICompatibility); len(changes) > 0 {
|
||||
log.Debugf(" openai-compatibility:")
|
||||
for _, change := range changes {
|
||||
log.Debugf(" %s", change)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
authDirChanged := oldConfig == nil || oldConfig.AuthDir != newConfig.AuthDir
|
||||
|
||||
log.Infof("config successfully reloaded, triggering client reload")
|
||||
// Reload clients with new config
|
||||
w.reloadClients()
|
||||
w.reloadClients(authDirChanged)
|
||||
return true
|
||||
}
|
||||
|
||||
// reloadClients performs a full scan and reload of all clients.
|
||||
func (w *Watcher) reloadClients() {
|
||||
log.Debugf("starting full client reload process")
|
||||
func (w *Watcher) reloadClients(rescanAuth bool) {
|
||||
log.Debugf("starting full client load process")
|
||||
|
||||
w.clientsMutex.RLock()
|
||||
cfg := w.config
|
||||
@@ -523,40 +634,60 @@ func (w *Watcher) reloadClients() {
|
||||
|
||||
// Create new API key clients based on the new config
|
||||
glAPIKeyCount, claudeAPIKeyCount, codexAPIKeyCount, openAICompatCount := BuildAPIKeyClients(cfg)
|
||||
log.Debugf("created %d new API key clients", 0)
|
||||
totalAPIKeyClients := glAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + openAICompatCount
|
||||
log.Debugf("loaded %d API key clients", totalAPIKeyClients)
|
||||
|
||||
// Load file-based clients
|
||||
authFileCount := w.loadFileClients(cfg)
|
||||
log.Debugf("loaded %d new file-based clients", 0)
|
||||
var authFileCount int
|
||||
if rescanAuth {
|
||||
// Load file-based clients when explicitly requested (startup or authDir change)
|
||||
authFileCount = w.loadFileClients(cfg)
|
||||
log.Debugf("loaded %d file-based clients", authFileCount)
|
||||
} else {
|
||||
// Preserve existing auth hashes and only report current known count to avoid redundant scans.
|
||||
w.clientsMutex.RLock()
|
||||
authFileCount = len(w.lastAuthHashes)
|
||||
w.clientsMutex.RUnlock()
|
||||
log.Debugf("skipping auth directory rescan; retaining %d existing auth files", authFileCount)
|
||||
}
|
||||
|
||||
// no legacy file-based clients to unregister
|
||||
|
||||
// Update client maps
|
||||
w.clientsMutex.Lock()
|
||||
if rescanAuth {
|
||||
w.clientsMutex.Lock()
|
||||
|
||||
// Rebuild auth file hash cache for current clients
|
||||
w.lastAuthHashes = make(map[string]string)
|
||||
// Recompute hashes for current auth files
|
||||
_ = filepath.Walk(cfg.AuthDir, func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
// Rebuild auth file hash cache for current clients
|
||||
w.lastAuthHashes = make(map[string]string)
|
||||
if resolvedAuthDir, errResolveAuthDir := util.ResolveAuthDir(cfg.AuthDir); errResolveAuthDir != nil {
|
||||
log.Errorf("failed to resolve auth directory for hash cache: %v", errResolveAuthDir)
|
||||
} else if resolvedAuthDir != "" {
|
||||
_ = filepath.Walk(resolvedAuthDir, func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".json") {
|
||||
if data, errReadFile := os.ReadFile(path); errReadFile == nil && len(data) > 0 {
|
||||
sum := sha256.Sum256(data)
|
||||
w.lastAuthHashes[path] = hex.EncodeToString(sum[:])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".json") {
|
||||
if data, errReadFile := os.ReadFile(path); errReadFile == nil && len(data) > 0 {
|
||||
sum := sha256.Sum256(data)
|
||||
w.lastAuthHashes[path] = hex.EncodeToString(sum[:])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
w.clientsMutex.Unlock()
|
||||
w.clientsMutex.Unlock()
|
||||
}
|
||||
|
||||
totalNewClients := authFileCount + glAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + openAICompatCount
|
||||
|
||||
// Ensure consumers observe the new configuration before auth updates dispatch.
|
||||
if w.reloadCallback != nil {
|
||||
log.Debugf("triggering server update callback before auth refresh")
|
||||
w.reloadCallback(cfg)
|
||||
}
|
||||
|
||||
w.refreshAuthState()
|
||||
|
||||
log.Infof("full client reload complete - old: %d clients, new: %d clients (%d auth files + %d GL API keys + %d Claude API keys + %d Codex keys + %d OpenAI-compat)",
|
||||
0,
|
||||
log.Infof("full client load complete - %d clients (%d auth files + %d GL API keys + %d Claude API keys + %d Codex keys + %d OpenAI-compat)",
|
||||
totalNewClients,
|
||||
authFileCount,
|
||||
glAPIKeyCount,
|
||||
@@ -564,12 +695,6 @@ func (w *Watcher) reloadClients() {
|
||||
codexAPIKeyCount,
|
||||
openAICompatCount,
|
||||
)
|
||||
|
||||
// Trigger the callback to update the server
|
||||
if w.reloadCallback != nil {
|
||||
log.Debugf("triggering server update callback")
|
||||
w.reloadCallback(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
// createClientFromFile creates a single client instance from a given token file path.
|
||||
@@ -615,6 +740,7 @@ func (w *Watcher) addOrUpdateClient(path string) {
|
||||
log.Debugf("triggering server update callback after add/update")
|
||||
w.reloadCallback(cfg)
|
||||
}
|
||||
w.commitAuthAsync(fmt.Sprintf("Sync auth %s", filepath.Base(path)), path)
|
||||
}
|
||||
|
||||
// removeClient handles the removal of a single client.
|
||||
@@ -632,6 +758,7 @@ func (w *Watcher) removeClient(path string) {
|
||||
log.Debugf("triggering server update callback after removal")
|
||||
w.reloadCallback(cfg)
|
||||
}
|
||||
w.commitAuthAsync(fmt.Sprintf("Remove auth %s", filepath.Base(path)), path)
|
||||
}
|
||||
|
||||
// SnapshotCombinedClients returns a snapshot of current combined clients.
|
||||
@@ -641,6 +768,7 @@ func (w *Watcher) removeClient(path string) {
|
||||
func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
||||
out := make([]*coreauth.Auth, 0, 32)
|
||||
now := time.Now()
|
||||
idGen := newStableIDGenerator()
|
||||
// Also synthesize auth entries for OpenAI-compatibility providers directly from config
|
||||
w.clientsMutex.RLock()
|
||||
cfg := w.config
|
||||
@@ -648,14 +776,18 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
||||
if cfg != nil {
|
||||
// Gemini official API keys -> synthesize auths
|
||||
for i := range cfg.GlAPIKey {
|
||||
k := cfg.GlAPIKey[i]
|
||||
k := strings.TrimSpace(cfg.GlAPIKey[i])
|
||||
if k == "" {
|
||||
continue
|
||||
}
|
||||
id, token := idGen.next("gemini:apikey", k)
|
||||
a := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("gemini:apikey:%d", i),
|
||||
ID: id,
|
||||
Provider: "gemini",
|
||||
Label: "gemini-apikey",
|
||||
Status: coreauth.StatusActive,
|
||||
Attributes: map[string]string{
|
||||
"source": fmt.Sprintf("config:gemini#%d", i),
|
||||
"source": fmt.Sprintf("config:gemini[%s]", token),
|
||||
"api_key": k,
|
||||
},
|
||||
CreatedAt: now,
|
||||
@@ -666,18 +798,25 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
||||
// Claude API keys -> synthesize auths
|
||||
for i := range cfg.ClaudeKey {
|
||||
ck := cfg.ClaudeKey[i]
|
||||
key := strings.TrimSpace(ck.APIKey)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
id, token := idGen.next("claude:apikey", key, ck.BaseURL)
|
||||
attrs := map[string]string{
|
||||
"source": fmt.Sprintf("config:claude#%d", i),
|
||||
"api_key": ck.APIKey,
|
||||
"source": fmt.Sprintf("config:claude[%s]", token),
|
||||
"api_key": key,
|
||||
}
|
||||
if ck.BaseURL != "" {
|
||||
attrs["base_url"] = ck.BaseURL
|
||||
}
|
||||
proxyURL := strings.TrimSpace(ck.ProxyURL)
|
||||
a := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("claude:apikey:%d", i),
|
||||
ID: id,
|
||||
Provider: "claude",
|
||||
Label: "claude-apikey",
|
||||
Status: coreauth.StatusActive,
|
||||
ProxyURL: proxyURL,
|
||||
Attributes: attrs,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
@@ -687,18 +826,25 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
||||
// Codex API keys -> synthesize auths
|
||||
for i := range cfg.CodexKey {
|
||||
ck := cfg.CodexKey[i]
|
||||
key := strings.TrimSpace(ck.APIKey)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
id, token := idGen.next("codex:apikey", key, ck.BaseURL)
|
||||
attrs := map[string]string{
|
||||
"source": fmt.Sprintf("config:codex#%d", i),
|
||||
"api_key": ck.APIKey,
|
||||
"source": fmt.Sprintf("config:codex[%s]", token),
|
||||
"api_key": key,
|
||||
}
|
||||
if ck.BaseURL != "" {
|
||||
attrs["base_url"] = ck.BaseURL
|
||||
}
|
||||
proxyURL := strings.TrimSpace(ck.ProxyURL)
|
||||
a := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("codex:apikey:%d", i),
|
||||
ID: id,
|
||||
Provider: "codex",
|
||||
Label: "codex-apikey",
|
||||
Status: coreauth.StatusActive,
|
||||
ProxyURL: proxyURL,
|
||||
Attributes: attrs,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
@@ -711,13 +857,80 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
||||
if providerName == "" {
|
||||
providerName = "openai-compatibility"
|
||||
}
|
||||
base := compat.BaseURL
|
||||
for j := range compat.APIKeys {
|
||||
key := compat.APIKeys[j]
|
||||
base := strings.TrimSpace(compat.BaseURL)
|
||||
|
||||
// Handle new APIKeyEntries format (preferred)
|
||||
createdEntries := 0
|
||||
if len(compat.APIKeyEntries) > 0 {
|
||||
for j := range compat.APIKeyEntries {
|
||||
entry := &compat.APIKeyEntries[j]
|
||||
key := strings.TrimSpace(entry.APIKey)
|
||||
proxyURL := strings.TrimSpace(entry.ProxyURL)
|
||||
idKind := fmt.Sprintf("openai-compatibility:%s", providerName)
|
||||
id, token := idGen.next(idKind, key, base, proxyURL)
|
||||
attrs := map[string]string{
|
||||
"source": fmt.Sprintf("config:%s[%s]", providerName, token),
|
||||
"base_url": base,
|
||||
"compat_name": compat.Name,
|
||||
"provider_key": providerName,
|
||||
}
|
||||
if key != "" {
|
||||
attrs["api_key"] = key
|
||||
}
|
||||
if hash := computeOpenAICompatModelsHash(compat.Models); hash != "" {
|
||||
attrs["models_hash"] = hash
|
||||
}
|
||||
a := &coreauth.Auth{
|
||||
ID: id,
|
||||
Provider: providerName,
|
||||
Label: compat.Name,
|
||||
Status: coreauth.StatusActive,
|
||||
ProxyURL: proxyURL,
|
||||
Attributes: attrs,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
out = append(out, a)
|
||||
createdEntries++
|
||||
}
|
||||
} else {
|
||||
// Handle legacy APIKeys format for backward compatibility
|
||||
for j := range compat.APIKeys {
|
||||
key := strings.TrimSpace(compat.APIKeys[j])
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
idKind := fmt.Sprintf("openai-compatibility:%s", providerName)
|
||||
id, token := idGen.next(idKind, key, base)
|
||||
attrs := map[string]string{
|
||||
"source": fmt.Sprintf("config:%s[%s]", providerName, token),
|
||||
"base_url": base,
|
||||
"compat_name": compat.Name,
|
||||
"provider_key": providerName,
|
||||
}
|
||||
attrs["api_key"] = key
|
||||
if hash := computeOpenAICompatModelsHash(compat.Models); hash != "" {
|
||||
attrs["models_hash"] = hash
|
||||
}
|
||||
a := &coreauth.Auth{
|
||||
ID: id,
|
||||
Provider: providerName,
|
||||
Label: compat.Name,
|
||||
Status: coreauth.StatusActive,
|
||||
Attributes: attrs,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
out = append(out, a)
|
||||
createdEntries++
|
||||
}
|
||||
}
|
||||
if createdEntries == 0 {
|
||||
idKind := fmt.Sprintf("openai-compatibility:%s", providerName)
|
||||
id, token := idGen.next(idKind, base)
|
||||
attrs := map[string]string{
|
||||
"source": fmt.Sprintf("config:%s#%d", compat.Name, j),
|
||||
"source": fmt.Sprintf("config:%s[%s]", providerName, token),
|
||||
"base_url": base,
|
||||
"api_key": key,
|
||||
"compat_name": compat.Name,
|
||||
"provider_key": providerName,
|
||||
}
|
||||
@@ -725,7 +938,7 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
||||
attrs["models_hash"] = hash
|
||||
}
|
||||
a := &coreauth.Auth{
|
||||
ID: fmt.Sprintf("openai-compatibility:%s:%d", compat.Name, j),
|
||||
ID: id,
|
||||
Provider: providerName,
|
||||
Label: compat.Name,
|
||||
Status: coreauth.StatusActive,
|
||||
@@ -774,6 +987,11 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
||||
id = rel
|
||||
}
|
||||
|
||||
proxyURL := ""
|
||||
if p, ok := metadata["proxy_url"].(string); ok {
|
||||
proxyURL = p
|
||||
}
|
||||
|
||||
a := &coreauth.Auth{
|
||||
ID: id,
|
||||
Provider: provider,
|
||||
@@ -783,6 +1001,7 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
||||
"source": full,
|
||||
"path": full,
|
||||
},
|
||||
ProxyURL: proxyURL,
|
||||
Metadata: metadata,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
@@ -803,14 +1022,13 @@ func (w *Watcher) loadFileClients(cfg *config.Config) int {
|
||||
authFileCount := 0
|
||||
successfulAuthCount := 0
|
||||
|
||||
authDir := cfg.AuthDir
|
||||
if strings.HasPrefix(authDir, "~") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
log.Errorf("failed to get home directory: %v", err)
|
||||
return 0
|
||||
}
|
||||
authDir = filepath.Join(home, authDir[1:])
|
||||
authDir, errResolveAuthDir := util.ResolveAuthDir(cfg.AuthDir)
|
||||
if errResolveAuthDir != nil {
|
||||
log.Errorf("failed to resolve auth directory: %v", errResolveAuthDir)
|
||||
return 0
|
||||
}
|
||||
if authDir == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
errWalk := filepath.Walk(authDir, func(path string, info fs.FileInfo, err error) error {
|
||||
@@ -855,8 +1073,139 @@ func BuildAPIKeyClients(cfg *config.Config) (int, int, int, int) {
|
||||
if len(cfg.OpenAICompatibility) > 0 {
|
||||
// Do not construct legacy clients for OpenAI-compat providers; these are handled by the stateless executor.
|
||||
for _, compatConfig := range cfg.OpenAICompatibility {
|
||||
openAICompatCount += len(compatConfig.APIKeys)
|
||||
// Count from new APIKeyEntries format if present, otherwise fall back to legacy APIKeys
|
||||
if len(compatConfig.APIKeyEntries) > 0 {
|
||||
openAICompatCount += len(compatConfig.APIKeyEntries)
|
||||
} else {
|
||||
openAICompatCount += len(compatConfig.APIKeys)
|
||||
}
|
||||
}
|
||||
}
|
||||
return glAPIKeyCount, claudeAPIKeyCount, codexAPIKeyCount, openAICompatCount
|
||||
}
|
||||
|
||||
func diffOpenAICompatibility(oldList, newList []config.OpenAICompatibility) []string {
|
||||
changes := make([]string, 0)
|
||||
oldMap := make(map[string]config.OpenAICompatibility, len(oldList))
|
||||
oldLabels := make(map[string]string, len(oldList))
|
||||
for idx, entry := range oldList {
|
||||
key, label := openAICompatKey(entry, idx)
|
||||
oldMap[key] = entry
|
||||
oldLabels[key] = label
|
||||
}
|
||||
newMap := make(map[string]config.OpenAICompatibility, len(newList))
|
||||
newLabels := make(map[string]string, len(newList))
|
||||
for idx, entry := range newList {
|
||||
key, label := openAICompatKey(entry, idx)
|
||||
newMap[key] = entry
|
||||
newLabels[key] = label
|
||||
}
|
||||
keySet := make(map[string]struct{}, len(oldMap)+len(newMap))
|
||||
for key := range oldMap {
|
||||
keySet[key] = struct{}{}
|
||||
}
|
||||
for key := range newMap {
|
||||
keySet[key] = struct{}{}
|
||||
}
|
||||
orderedKeys := make([]string, 0, len(keySet))
|
||||
for key := range keySet {
|
||||
orderedKeys = append(orderedKeys, key)
|
||||
}
|
||||
sort.Strings(orderedKeys)
|
||||
for _, key := range orderedKeys {
|
||||
oldEntry, oldOk := oldMap[key]
|
||||
newEntry, newOk := newMap[key]
|
||||
label := oldLabels[key]
|
||||
if label == "" {
|
||||
label = newLabels[key]
|
||||
}
|
||||
switch {
|
||||
case !oldOk:
|
||||
changes = append(changes, fmt.Sprintf("provider added: %s (api-keys=%d, models=%d)", label, countAPIKeys(newEntry), countOpenAIModels(newEntry.Models)))
|
||||
case !newOk:
|
||||
changes = append(changes, fmt.Sprintf("provider removed: %s (api-keys=%d, models=%d)", label, countAPIKeys(oldEntry), countOpenAIModels(oldEntry.Models)))
|
||||
default:
|
||||
if detail := describeOpenAICompatibilityUpdate(oldEntry, newEntry); detail != "" {
|
||||
changes = append(changes, fmt.Sprintf("provider updated: %s %s", label, detail))
|
||||
}
|
||||
}
|
||||
}
|
||||
return changes
|
||||
}
|
||||
|
||||
func describeOpenAICompatibilityUpdate(oldEntry, newEntry config.OpenAICompatibility) string {
|
||||
oldKeyCount := countAPIKeys(oldEntry)
|
||||
newKeyCount := countAPIKeys(newEntry)
|
||||
oldModelCount := countOpenAIModels(oldEntry.Models)
|
||||
newModelCount := countOpenAIModels(newEntry.Models)
|
||||
details := make([]string, 0, 2)
|
||||
if oldKeyCount != newKeyCount {
|
||||
details = append(details, fmt.Sprintf("api-keys %d -> %d", oldKeyCount, newKeyCount))
|
||||
}
|
||||
if oldModelCount != newModelCount {
|
||||
details = append(details, fmt.Sprintf("models %d -> %d", oldModelCount, newModelCount))
|
||||
}
|
||||
if len(details) == 0 {
|
||||
return ""
|
||||
}
|
||||
return "(" + strings.Join(details, ", ") + ")"
|
||||
}
|
||||
|
||||
func countAPIKeys(entry config.OpenAICompatibility) int {
|
||||
// Prefer new APIKeyEntries format
|
||||
if len(entry.APIKeyEntries) > 0 {
|
||||
count := 0
|
||||
for _, keyEntry := range entry.APIKeyEntries {
|
||||
if strings.TrimSpace(keyEntry.APIKey) != "" {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
// Fall back to legacy APIKeys format
|
||||
return countNonEmptyStrings(entry.APIKeys)
|
||||
}
|
||||
|
||||
func countNonEmptyStrings(values []string) int {
|
||||
count := 0
|
||||
for _, value := range values {
|
||||
if strings.TrimSpace(value) != "" {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func countOpenAIModels(models []config.OpenAICompatibilityModel) int {
|
||||
count := 0
|
||||
for _, model := range models {
|
||||
name := strings.TrimSpace(model.Name)
|
||||
alias := strings.TrimSpace(model.Alias)
|
||||
if name == "" && alias == "" {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func openAICompatKey(entry config.OpenAICompatibility, index int) (string, string) {
|
||||
name := strings.TrimSpace(entry.Name)
|
||||
if name != "" {
|
||||
return "name:" + name, name
|
||||
}
|
||||
base := strings.TrimSpace(entry.BaseURL)
|
||||
if base != "" {
|
||||
return "base:" + base, base
|
||||
}
|
||||
for _, model := range entry.Models {
|
||||
alias := strings.TrimSpace(model.Alias)
|
||||
if alias == "" {
|
||||
alias = strings.TrimSpace(model.Name)
|
||||
}
|
||||
if alias != "" {
|
||||
return "alias:" + alias, alias
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("index:%d", index), fmt.Sprintf("entry-%d", index+1)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
)
|
||||
|
||||
// Provider validates credentials for incoming requests.
|
||||
@@ -23,7 +23,7 @@ type Result struct {
|
||||
}
|
||||
|
||||
// ProviderFactory builds a provider from configuration data.
|
||||
type ProviderFactory func(cfg *config.AccessProvider, root *config.Config) (Provider, error)
|
||||
type ProviderFactory func(cfg *config.AccessProvider, root *config.SDKConfig) (Provider, error)
|
||||
|
||||
var (
|
||||
registryMu sync.RWMutex
|
||||
@@ -40,7 +40,7 @@ func RegisterProvider(typ string, factory ProviderFactory) {
|
||||
registryMu.Unlock()
|
||||
}
|
||||
|
||||
func buildProvider(cfg *config.AccessProvider, root *config.Config) (Provider, error) {
|
||||
func BuildProvider(cfg *config.AccessProvider, root *config.SDKConfig) (Provider, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("access: nil provider config")
|
||||
}
|
||||
@@ -58,7 +58,7 @@ func buildProvider(cfg *config.AccessProvider, root *config.Config) (Provider, e
|
||||
}
|
||||
|
||||
// BuildProviders constructs providers declared in configuration.
|
||||
func BuildProviders(root *config.Config) ([]Provider, error) {
|
||||
func BuildProviders(root *config.SDKConfig) ([]Provider, error) {
|
||||
if root == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -68,16 +68,15 @@ func BuildProviders(root *config.Config) ([]Provider, error) {
|
||||
if providerCfg.Type == "" {
|
||||
continue
|
||||
}
|
||||
provider, err := buildProvider(providerCfg, root)
|
||||
provider, err := BuildProvider(providerCfg, root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
providers = append(providers, provider)
|
||||
}
|
||||
if len(providers) == 0 && len(root.APIKeys) > 0 {
|
||||
config.SyncInlineAPIKeys(root, root.APIKeys)
|
||||
if providerCfg := root.ConfigAPIKeyProvider(); providerCfg != nil {
|
||||
provider, err := buildProvider(providerCfg, root)
|
||||
if len(providers) == 0 {
|
||||
if inline := config.MakeInlineAPIKeyProvider(root.APIKeys); inline != nil {
|
||||
provider, err := BuildProvider(inline, root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
@@ -13,10 +13,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
)
|
||||
|
||||
// GeminiAPIHandler contains the handlers for Gemini API endpoints.
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
coreexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
@@ -45,7 +45,7 @@ type BaseAPIHandler struct {
|
||||
AuthManager *coreauth.Manager
|
||||
|
||||
// Cfg holds the current application configuration.
|
||||
Cfg *config.Config
|
||||
Cfg *config.SDKConfig
|
||||
}
|
||||
|
||||
// NewBaseAPIHandlers creates a new API handlers instance.
|
||||
@@ -57,7 +57,7 @@ type BaseAPIHandler struct {
|
||||
//
|
||||
// Returns:
|
||||
// - *BaseAPIHandler: A new API handlers instance
|
||||
func NewBaseAPIHandlers(cfg *config.Config, authManager *coreauth.Manager) *BaseAPIHandler {
|
||||
func NewBaseAPIHandlers(cfg *config.SDKConfig, authManager *coreauth.Manager) *BaseAPIHandler {
|
||||
return &BaseAPIHandler{
|
||||
Cfg: cfg,
|
||||
AuthManager: authManager,
|
||||
@@ -70,7 +70,7 @@ func NewBaseAPIHandlers(cfg *config.Config, authManager *coreauth.Manager) *Base
|
||||
// Parameters:
|
||||
// - clients: The new slice of AI service clients
|
||||
// - cfg: The new application configuration
|
||||
func (h *BaseAPIHandler) UpdateClients(cfg *config.Config) { h.Cfg = cfg }
|
||||
func (h *BaseAPIHandler) UpdateClients(cfg *config.SDKConfig) { h.Cfg = cfg }
|
||||
|
||||
// GetAlt extracts the 'alt' parameter from the request query string.
|
||||
// It checks both 'alt' and '$alt' parameters and returns the appropriate value.
|
||||
@@ -133,7 +133,7 @@ func (h *BaseAPIHandler) GetContextWithCancel(handler interfaces.APIHandler, c *
|
||||
// ExecuteWithAuthManager executes a non-streaming request via the core auth manager.
|
||||
// This path is the only supported execution route.
|
||||
func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
||||
providers := util.GetProviderName(modelName, h.Cfg)
|
||||
providers := util.GetProviderName(modelName)
|
||||
if len(providers) == 0 {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType
|
||||
// ExecuteCountWithAuthManager executes a non-streaming request via the core auth manager.
|
||||
// This path is the only supported execution route.
|
||||
func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
||||
providers := util.GetProviderName(modelName, h.Cfg)
|
||||
providers := util.GetProviderName(modelName)
|
||||
if len(providers) == 0 {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||
}
|
||||
@@ -181,7 +181,7 @@ func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handle
|
||||
// ExecuteStreamWithAuthManager executes a streaming request via the core auth manager.
|
||||
// This path is the only supported execution route.
|
||||
func (h *BaseAPIHandler) ExecuteStreamWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) (<-chan []byte, <-chan *interfaces.ErrorMessage) {
|
||||
providers := util.GetProviderName(modelName, h.Cfg)
|
||||
providers := util.GetProviderName(modelName)
|
||||
if len(providers) == 0 {
|
||||
errChan := make(chan *interfaces.ErrorMessage, 1)
|
||||
errChan <- &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -35,7 +36,7 @@ func (a *ClaudeAuthenticator) RefreshLead() *time.Duration {
|
||||
return &d
|
||||
}
|
||||
|
||||
func (a *ClaudeAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
||||
func (a *ClaudeAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||
}
|
||||
@@ -127,7 +128,7 @@ func (a *ClaudeAuthenticator) Login(ctx context.Context, cfg *config.Config, opt
|
||||
}
|
||||
|
||||
fileName := fmt.Sprintf("claude-%s.json", tokenStorage.Email)
|
||||
metadata := map[string]string{
|
||||
metadata := map[string]any{
|
||||
"email": tokenStorage.Email,
|
||||
}
|
||||
|
||||
@@ -136,7 +137,8 @@ func (a *ClaudeAuthenticator) Login(ctx context.Context, cfg *config.Config, opt
|
||||
fmt.Println("Claude API key obtained and stored")
|
||||
}
|
||||
|
||||
return &TokenRecord{
|
||||
return &coreauth.Auth{
|
||||
ID: fileName,
|
||||
Provider: a.Provider(),
|
||||
FileName: fileName,
|
||||
Storage: tokenStorage,
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -35,7 +36,7 @@ func (a *CodexAuthenticator) RefreshLead() *time.Duration {
|
||||
return &d
|
||||
}
|
||||
|
||||
func (a *CodexAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
||||
func (a *CodexAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||
}
|
||||
@@ -126,7 +127,7 @@ func (a *CodexAuthenticator) Login(ctx context.Context, cfg *config.Config, opts
|
||||
}
|
||||
|
||||
fileName := fmt.Sprintf("codex-%s.json", tokenStorage.Email)
|
||||
metadata := map[string]string{
|
||||
metadata := map[string]any{
|
||||
"email": tokenStorage.Email,
|
||||
}
|
||||
|
||||
@@ -135,7 +136,8 @@ func (a *CodexAuthenticator) Login(ctx context.Context, cfg *config.Config, opts
|
||||
fmt.Println("Codex API key obtained and stored")
|
||||
}
|
||||
|
||||
return &TokenRecord{
|
||||
return &coreauth.Auth{
|
||||
ID: fileName,
|
||||
Provider: a.Provider(),
|
||||
FileName: fileName,
|
||||
Storage: tokenStorage,
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
@@ -35,27 +34,71 @@ func (s *FileTokenStore) SetBaseDir(dir string) {
|
||||
s.dirLock.Unlock()
|
||||
}
|
||||
|
||||
// Save writes the token storage to the resolved file path.
|
||||
func (s *FileTokenStore) Save(ctx context.Context, cfg *config.Config, record *TokenRecord) (string, error) {
|
||||
if record == nil || record.Storage == nil {
|
||||
return "", fmt.Errorf("cliproxy auth: token record is incomplete")
|
||||
// Save persists token storage and metadata to the resolved auth file path.
|
||||
func (s *FileTokenStore) Save(ctx context.Context, auth *cliproxyauth.Auth) (string, error) {
|
||||
if auth == nil {
|
||||
return "", fmt.Errorf("auth filestore: auth is nil")
|
||||
}
|
||||
target := strings.TrimSpace(record.FileName)
|
||||
if target == "" {
|
||||
return "", fmt.Errorf("cliproxy auth: missing file name for provider %s", record.Provider)
|
||||
}
|
||||
if !filepath.IsAbs(target) {
|
||||
baseDir := s.baseDirFromConfig(cfg)
|
||||
if baseDir != "" {
|
||||
target = filepath.Join(baseDir, target)
|
||||
}
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err := record.Storage.SaveTokenToFile(target); err != nil {
|
||||
|
||||
path, err := s.resolveAuthPath(auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return target, nil
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("auth filestore: missing file path attribute for %s", auth.ID)
|
||||
}
|
||||
|
||||
if auth.Disabled {
|
||||
if _, statErr := os.Stat(path); os.IsNotExist(statErr) {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
||||
return "", fmt.Errorf("auth filestore: create dir failed: %w", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case auth.Storage != nil:
|
||||
if err = auth.Storage.SaveTokenToFile(path); err != nil {
|
||||
return "", err
|
||||
}
|
||||
case auth.Metadata != nil:
|
||||
raw, errMarshal := json.Marshal(auth.Metadata)
|
||||
if errMarshal != nil {
|
||||
return "", fmt.Errorf("auth filestore: marshal metadata failed: %w", errMarshal)
|
||||
}
|
||||
if existing, errRead := os.ReadFile(path); errRead == nil {
|
||||
if jsonEqual(existing, raw) {
|
||||
return path, nil
|
||||
}
|
||||
} else if errRead != nil && !os.IsNotExist(errRead) {
|
||||
return "", fmt.Errorf("auth filestore: read existing failed: %w", errRead)
|
||||
}
|
||||
tmp := path + ".tmp"
|
||||
if errWrite := os.WriteFile(tmp, raw, 0o600); errWrite != nil {
|
||||
return "", fmt.Errorf("auth filestore: write temp failed: %w", errWrite)
|
||||
}
|
||||
if errRename := os.Rename(tmp, path); errRename != nil {
|
||||
return "", fmt.Errorf("auth filestore: rename failed: %w", errRename)
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("auth filestore: nothing to persist for %s", auth.ID)
|
||||
}
|
||||
|
||||
if auth.Attributes == nil {
|
||||
auth.Attributes = make(map[string]string)
|
||||
}
|
||||
auth.Attributes["path"] = path
|
||||
|
||||
if strings.TrimSpace(auth.FileName) == "" {
|
||||
auth.FileName = auth.ID
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// List enumerates all auth JSON files under the configured directory.
|
||||
@@ -90,50 +133,6 @@ func (s *FileTokenStore) List(ctx context.Context) ([]*cliproxyauth.Auth, error)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// SaveAuth writes the auth metadata back to its source file location.
|
||||
func (s *FileTokenStore) SaveAuth(ctx context.Context, auth *cliproxyauth.Auth) error {
|
||||
if auth == nil {
|
||||
return fmt.Errorf("auth filestore: auth is nil")
|
||||
}
|
||||
path, err := s.resolveAuthPath(auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("auth filestore: missing file path attribute for %s", auth.ID)
|
||||
}
|
||||
// If the auth has been disabled and the original file was removed, avoid recreating it on disk.
|
||||
if auth.Disabled {
|
||||
if _, statErr := os.Stat(path); statErr != nil {
|
||||
if os.IsNotExist(statErr) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
||||
return fmt.Errorf("auth filestore: create dir failed: %w", err)
|
||||
}
|
||||
raw, err := json.Marshal(auth.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("auth filestore: marshal metadata failed: %w", err)
|
||||
}
|
||||
if existing, errRead := os.ReadFile(path); errRead == nil {
|
||||
if jsonEqual(existing, raw) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
tmp := path + ".tmp"
|
||||
if err = os.WriteFile(tmp, raw, 0o600); err != nil {
|
||||
return fmt.Errorf("auth filestore: write temp failed: %w", err)
|
||||
}
|
||||
if err = os.Rename(tmp, path); err != nil {
|
||||
return fmt.Errorf("auth filestore: rename failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes the auth file.
|
||||
func (s *FileTokenStore) Delete(ctx context.Context, id string) error {
|
||||
id = strings.TrimSpace(id)
|
||||
@@ -185,6 +184,7 @@ func (s *FileTokenStore) readAuthFile(path, baseDir string) (*cliproxyauth.Auth,
|
||||
auth := &cliproxyauth.Auth{
|
||||
ID: id,
|
||||
Provider: provider,
|
||||
FileName: id,
|
||||
Label: s.labelFor(metadata),
|
||||
Status: cliproxyauth.StatusActive,
|
||||
Attributes: map[string]string{"path": path},
|
||||
@@ -220,6 +220,15 @@ func (s *FileTokenStore) resolveAuthPath(auth *cliproxyauth.Auth) (string, error
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
if fileName := strings.TrimSpace(auth.FileName); fileName != "" {
|
||||
if filepath.IsAbs(fileName) {
|
||||
return fileName, nil
|
||||
}
|
||||
if dir := s.baseDirSnapshot(); dir != "" {
|
||||
return filepath.Join(dir, fileName), nil
|
||||
}
|
||||
return fileName, nil
|
||||
}
|
||||
if auth.ID == "" {
|
||||
return "", fmt.Errorf("auth filestore: missing id")
|
||||
}
|
||||
@@ -249,13 +258,6 @@ func (s *FileTokenStore) labelFor(metadata map[string]any) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *FileTokenStore) baseDirFromConfig(cfg *config.Config) string {
|
||||
if cfg != nil && strings.TrimSpace(cfg.AuthDir) != "" {
|
||||
return strings.TrimSpace(cfg.AuthDir)
|
||||
}
|
||||
return s.baseDirSnapshot()
|
||||
}
|
||||
|
||||
func (s *FileTokenStore) baseDirSnapshot() string {
|
||||
s.dirLock.RLock()
|
||||
defer s.dirLock.RUnlock()
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
)
|
||||
|
||||
// GeminiWebAuthenticator provides a minimal wrapper so core components can treat
|
||||
// Gemini Web credentials via the shared Authenticator contract.
|
||||
type GeminiWebAuthenticator struct{}
|
||||
|
||||
func NewGeminiWebAuthenticator() *GeminiWebAuthenticator { return &GeminiWebAuthenticator{} }
|
||||
|
||||
func (a *GeminiWebAuthenticator) Provider() string { return "gemini-web" }
|
||||
|
||||
func (a *GeminiWebAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
||||
_ = ctx
|
||||
_ = cfg
|
||||
_ = opts
|
||||
return nil, fmt.Errorf("gemini-web authenticator does not support scripted login; use CLI --gemini-web-auth")
|
||||
}
|
||||
|
||||
func (a *GeminiWebAuthenticator) RefreshLead() *time.Duration {
|
||||
d := 15 * time.Minute
|
||||
return &d
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||
// legacy client removed
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
// GeminiAuthenticator implements the login flow for Google Gemini CLI accounts.
|
||||
@@ -26,7 +27,7 @@ func (a *GeminiAuthenticator) RefreshLead() *time.Duration {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *GeminiAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
||||
func (a *GeminiAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||
}
|
||||
@@ -51,14 +52,15 @@ func (a *GeminiAuthenticator) Login(ctx context.Context, cfg *config.Config, opt
|
||||
// Skip onboarding here; rely on upstream configuration
|
||||
|
||||
fileName := fmt.Sprintf("%s-%s.json", ts.Email, ts.ProjectID)
|
||||
metadata := map[string]string{
|
||||
metadata := map[string]any{
|
||||
"email": ts.Email,
|
||||
"project_id": ts.ProjectID,
|
||||
}
|
||||
|
||||
fmt.Println("Gemini authentication successful")
|
||||
|
||||
return &TokenRecord{
|
||||
return &coreauth.Auth{
|
||||
ID: fileName,
|
||||
Provider: a.Provider(),
|
||||
FileName: fileName,
|
||||
Storage: &ts,
|
||||
|
||||
131
sdk/auth/iflow.go
Normal file
131
sdk/auth/iflow.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/iflow"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/browser"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// IFlowAuthenticator implements the OAuth login flow for iFlow accounts.
|
||||
type IFlowAuthenticator struct{}
|
||||
|
||||
// NewIFlowAuthenticator constructs a new authenticator instance.
|
||||
func NewIFlowAuthenticator() *IFlowAuthenticator { return &IFlowAuthenticator{} }
|
||||
|
||||
// Provider returns the provider key for the authenticator.
|
||||
func (a *IFlowAuthenticator) Provider() string { return "iflow" }
|
||||
|
||||
// RefreshLead indicates how soon before expiry a refresh should be attempted.
|
||||
func (a *IFlowAuthenticator) RefreshLead() *time.Duration {
|
||||
d := 3 * time.Hour
|
||||
return &d
|
||||
}
|
||||
|
||||
// Login performs the OAuth code flow using a local callback server.
|
||||
func (a *IFlowAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||
}
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
if opts == nil {
|
||||
opts = &LoginOptions{}
|
||||
}
|
||||
|
||||
authSvc := iflow.NewIFlowAuth(cfg)
|
||||
|
||||
oauthServer := iflow.NewOAuthServer(iflow.CallbackPort)
|
||||
if err := oauthServer.Start(); err != nil {
|
||||
if strings.Contains(err.Error(), "already in use") {
|
||||
return nil, fmt.Errorf("iflow authentication server port in use: %w", err)
|
||||
}
|
||||
return nil, fmt.Errorf("iflow authentication server failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
stopCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
if stopErr := oauthServer.Stop(stopCtx); stopErr != nil {
|
||||
log.Warnf("iflow oauth server stop error: %v", stopErr)
|
||||
}
|
||||
}()
|
||||
|
||||
state, err := misc.GenerateRandomState()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow auth: failed to generate state: %w", err)
|
||||
}
|
||||
|
||||
authURL, redirectURI := authSvc.AuthorizationURL(state, iflow.CallbackPort)
|
||||
|
||||
if !opts.NoBrowser {
|
||||
fmt.Println("Opening browser for iFlow authentication")
|
||||
if !browser.IsAvailable() {
|
||||
log.Warn("No browser available; please open the URL manually")
|
||||
util.PrintSSHTunnelInstructions(iflow.CallbackPort)
|
||||
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||
} else if err = browser.OpenURL(authURL); err != nil {
|
||||
log.Warnf("Failed to open browser automatically: %v", err)
|
||||
util.PrintSSHTunnelInstructions(iflow.CallbackPort)
|
||||
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||
}
|
||||
} else {
|
||||
util.PrintSSHTunnelInstructions(iflow.CallbackPort)
|
||||
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for iFlow authentication callback...")
|
||||
|
||||
result, err := oauthServer.WaitForCallback(5 * time.Minute)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow auth: callback wait failed: %w", err)
|
||||
}
|
||||
if result.Error != "" {
|
||||
return nil, fmt.Errorf("iflow auth: provider returned error %s", result.Error)
|
||||
}
|
||||
if result.State != state {
|
||||
return nil, fmt.Errorf("iflow auth: state mismatch")
|
||||
}
|
||||
|
||||
tokenData, err := authSvc.ExchangeCodeForTokens(ctx, result.Code, redirectURI)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow authentication failed: %w", err)
|
||||
}
|
||||
|
||||
tokenStorage := authSvc.CreateTokenStorage(tokenData)
|
||||
|
||||
email := strings.TrimSpace(tokenStorage.Email)
|
||||
if email == "" {
|
||||
return nil, fmt.Errorf("iflow authentication failed: missing account identifier")
|
||||
}
|
||||
|
||||
fileName := fmt.Sprintf("iflow-%s.json", email)
|
||||
metadata := map[string]any{
|
||||
"email": email,
|
||||
"api_key": tokenStorage.APIKey,
|
||||
"access_token": tokenStorage.AccessToken,
|
||||
"refresh_token": tokenStorage.RefreshToken,
|
||||
"expired": tokenStorage.Expire,
|
||||
}
|
||||
|
||||
fmt.Println("iFlow authentication successful")
|
||||
|
||||
return &coreauth.Auth{
|
||||
ID: fileName,
|
||||
Provider: a.Provider(),
|
||||
FileName: fileName,
|
||||
Storage: tokenStorage,
|
||||
Metadata: metadata,
|
||||
Attributes: map[string]string{
|
||||
"api_key": tokenStorage.APIKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
baseauth "github.com/router-for-me/CLIProxyAPI/v6/internal/auth"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
var ErrRefreshNotSupported = errors.New("cliproxy auth: refresh not supported")
|
||||
@@ -20,22 +20,9 @@ type LoginOptions struct {
|
||||
Prompt func(prompt string) (string, error)
|
||||
}
|
||||
|
||||
// TokenRecord represents credential material produced by an authenticator.
|
||||
type TokenRecord struct {
|
||||
Provider string
|
||||
FileName string
|
||||
Storage baseauth.TokenStorage
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// TokenStore persists token records.
|
||||
type TokenStore interface {
|
||||
Save(ctx context.Context, cfg *config.Config, record *TokenRecord) (string, error)
|
||||
}
|
||||
|
||||
// Authenticator manages login and optional refresh flows for a provider.
|
||||
type Authenticator interface {
|
||||
Provider() string
|
||||
Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error)
|
||||
Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error)
|
||||
RefreshLead() *time.Duration
|
||||
}
|
||||
|
||||
@@ -5,17 +5,18 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
// Manager aggregates authenticators and coordinates persistence via a token store.
|
||||
type Manager struct {
|
||||
authenticators map[string]Authenticator
|
||||
store TokenStore
|
||||
store coreauth.Store
|
||||
}
|
||||
|
||||
// NewManager constructs a manager with the provided token store and authenticators.
|
||||
// If store is nil, the caller must set it later using SetStore.
|
||||
func NewManager(store TokenStore, authenticators ...Authenticator) *Manager {
|
||||
func NewManager(store coreauth.Store, authenticators ...Authenticator) *Manager {
|
||||
mgr := &Manager{
|
||||
authenticators: make(map[string]Authenticator),
|
||||
store: store,
|
||||
@@ -38,12 +39,12 @@ func (m *Manager) Register(a Authenticator) {
|
||||
}
|
||||
|
||||
// SetStore updates the token store used for persistence.
|
||||
func (m *Manager) SetStore(store TokenStore) {
|
||||
func (m *Manager) SetStore(store coreauth.Store) {
|
||||
m.store = store
|
||||
}
|
||||
|
||||
// Login executes the provider login flow and persists the resulting token record.
|
||||
func (m *Manager) Login(ctx context.Context, provider string, cfg *config.Config, opts *LoginOptions) (*TokenRecord, string, error) {
|
||||
// Login executes the provider login flow and persists the resulting auth record.
|
||||
func (m *Manager) Login(ctx context.Context, provider string, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, string, error) {
|
||||
auth, ok := m.authenticators[provider]
|
||||
if !ok {
|
||||
return nil, "", fmt.Errorf("cliproxy auth: authenticator %s not registered", provider)
|
||||
@@ -61,7 +62,13 @@ func (m *Manager) Login(ctx context.Context, provider string, cfg *config.Config
|
||||
return record, "", nil
|
||||
}
|
||||
|
||||
savedPath, err := m.store.Save(ctx, cfg, record)
|
||||
if cfg != nil {
|
||||
if dirSetter, ok := m.store.(interface{ SetBaseDir(string) }); ok {
|
||||
dirSetter.SetBaseDir(cfg.AuthDir)
|
||||
}
|
||||
}
|
||||
|
||||
savedPath, err := m.store.Save(ctx, record)
|
||||
if err != nil {
|
||||
return record, "", err
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/browser"
|
||||
// legacy client removed
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -30,7 +31,7 @@ func (a *QwenAuthenticator) RefreshLead() *time.Duration {
|
||||
return &d
|
||||
}
|
||||
|
||||
func (a *QwenAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
||||
func (a *QwenAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||
}
|
||||
@@ -97,13 +98,14 @@ func (a *QwenAuthenticator) Login(ctx context.Context, cfg *config.Config, opts
|
||||
// no legacy client construction
|
||||
|
||||
fileName := fmt.Sprintf("qwen-%s.json", tokenStorage.Email)
|
||||
metadata := map[string]string{
|
||||
metadata := map[string]any{
|
||||
"email": tokenStorage.Email,
|
||||
}
|
||||
|
||||
fmt.Println("Qwen authentication successful")
|
||||
|
||||
return &TokenRecord{
|
||||
return &coreauth.Auth{
|
||||
ID: fileName,
|
||||
Provider: a.Provider(),
|
||||
FileName: fileName,
|
||||
Storage: tokenStorage,
|
||||
|
||||
@@ -10,9 +10,9 @@ func init() {
|
||||
registerRefreshLead("codex", func() Authenticator { return NewCodexAuthenticator() })
|
||||
registerRefreshLead("claude", func() Authenticator { return NewClaudeAuthenticator() })
|
||||
registerRefreshLead("qwen", func() Authenticator { return NewQwenAuthenticator() })
|
||||
registerRefreshLead("iflow", func() Authenticator { return NewIFlowAuthenticator() })
|
||||
registerRefreshLead("gemini", func() Authenticator { return NewGeminiAuthenticator() })
|
||||
registerRefreshLead("gemini-cli", func() Authenticator { return NewGeminiAuthenticator() })
|
||||
registerRefreshLead("gemini-web", func() Authenticator { return NewGeminiWebAuthenticator() })
|
||||
}
|
||||
|
||||
func registerRefreshLead(provider string, factory func() Authenticator) {
|
||||
|
||||
@@ -1,31 +1,35 @@
|
||||
package auth
|
||||
|
||||
import "sync"
|
||||
import (
|
||||
"sync"
|
||||
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
var (
|
||||
storeMu sync.RWMutex
|
||||
registeredTokenStore TokenStore
|
||||
storeMu sync.RWMutex
|
||||
registeredStore coreauth.Store
|
||||
)
|
||||
|
||||
// RegisterTokenStore sets the global token store used by the authentication helpers.
|
||||
func RegisterTokenStore(store TokenStore) {
|
||||
func RegisterTokenStore(store coreauth.Store) {
|
||||
storeMu.Lock()
|
||||
registeredTokenStore = store
|
||||
registeredStore = store
|
||||
storeMu.Unlock()
|
||||
}
|
||||
|
||||
// GetTokenStore returns the globally registered token store.
|
||||
func GetTokenStore() TokenStore {
|
||||
func GetTokenStore() coreauth.Store {
|
||||
storeMu.RLock()
|
||||
s := registeredTokenStore
|
||||
s := registeredStore
|
||||
storeMu.RUnlock()
|
||||
if s != nil {
|
||||
return s
|
||||
}
|
||||
storeMu.Lock()
|
||||
defer storeMu.Unlock()
|
||||
if registeredTokenStore == nil {
|
||||
registeredTokenStore = NewFileTokenStore()
|
||||
if registeredStore == nil {
|
||||
registeredStore = NewFileTokenStore()
|
||||
}
|
||||
return registeredTokenStore
|
||||
return registeredStore
|
||||
}
|
||||
|
||||
@@ -285,9 +285,6 @@ func (m *Manager) executeWithProvider(ctx context.Context, provider string, req
|
||||
log.Debugf("Use API key %s for model %s", util.HideAPIKey(accountInfo), req.Model)
|
||||
} else if accountType == "oauth" {
|
||||
log.Debugf("Use OAuth %s for model %s", accountInfo, req.Model)
|
||||
} else if accountType == "cookie" {
|
||||
// Only Gemini Web uses cookie; print stable account label as-is.
|
||||
log.Debugf("Use Cookie %s for model %s", accountInfo, req.Model)
|
||||
}
|
||||
|
||||
tried[auth.ID] = struct{}{}
|
||||
@@ -333,8 +330,6 @@ func (m *Manager) executeCountWithProvider(ctx context.Context, provider string,
|
||||
log.Debugf("Use API key %s for model %s", util.HideAPIKey(accountInfo), req.Model)
|
||||
} else if accountType == "oauth" {
|
||||
log.Debugf("Use OAuth %s for model %s", accountInfo, req.Model)
|
||||
} else if accountType == "cookie" {
|
||||
log.Debugf("Use Cookie %s for model %s", accountInfo, req.Model)
|
||||
}
|
||||
|
||||
tried[auth.ID] = struct{}{}
|
||||
@@ -380,8 +375,6 @@ func (m *Manager) executeStreamWithProvider(ctx context.Context, provider string
|
||||
log.Debugf("Use API key %s for model %s", util.HideAPIKey(accountInfo), req.Model)
|
||||
} else if accountType == "oauth" {
|
||||
log.Debugf("Use OAuth %s for model %s", accountInfo, req.Model)
|
||||
} else if accountType == "cookie" {
|
||||
log.Debugf("Use Cookie %s for model %s", accountInfo, req.Model)
|
||||
}
|
||||
|
||||
tried[auth.ID] = struct{}{}
|
||||
@@ -787,27 +780,31 @@ func (m *Manager) pickNext(ctx context.Context, provider, model string, opts cli
|
||||
return nil, nil, &Error{Code: "executor_not_found", Message: "executor not registered"}
|
||||
}
|
||||
candidates := make([]*Auth, 0, len(m.auths))
|
||||
for _, auth := range m.auths {
|
||||
if auth.Provider != provider || auth.Disabled {
|
||||
for _, candidate := range m.auths {
|
||||
if candidate.Provider != provider || candidate.Disabled {
|
||||
continue
|
||||
}
|
||||
if _, used := tried[auth.ID]; used {
|
||||
if _, used := tried[candidate.ID]; used {
|
||||
continue
|
||||
}
|
||||
candidates = append(candidates, auth.Clone())
|
||||
candidates = append(candidates, candidate)
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
if len(candidates) == 0 {
|
||||
m.mu.RUnlock()
|
||||
return nil, nil, &Error{Code: "auth_not_found", Message: "no auth available"}
|
||||
}
|
||||
auth, errPick := m.selector.Pick(ctx, provider, model, opts, candidates)
|
||||
selected, errPick := m.selector.Pick(ctx, provider, model, opts, candidates)
|
||||
if errPick != nil {
|
||||
m.mu.RUnlock()
|
||||
return nil, nil, errPick
|
||||
}
|
||||
if auth == nil {
|
||||
if selected == nil {
|
||||
m.mu.RUnlock()
|
||||
return nil, nil, &Error{Code: "auth_not_found", Message: "selector returned no auth"}
|
||||
}
|
||||
return auth, executor, nil
|
||||
authCopy := selected.Clone()
|
||||
m.mu.RUnlock()
|
||||
return authCopy, executor, nil
|
||||
}
|
||||
|
||||
func (m *Manager) persist(ctx context.Context, auth *Auth) error {
|
||||
@@ -818,7 +815,8 @@ func (m *Manager) persist(ctx context.Context, auth *Auth) error {
|
||||
if auth.Metadata == nil {
|
||||
return nil
|
||||
}
|
||||
return m.store.SaveAuth(ctx, auth)
|
||||
_, err := m.store.Save(ctx, auth)
|
||||
return err
|
||||
}
|
||||
|
||||
// StartAutoRefresh launches a background loop that evaluates auth freshness
|
||||
|
||||
@@ -2,6 +2,7 @@ package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -36,6 +37,10 @@ func (s *RoundRobinSelector) Pick(ctx context.Context, provider, model string, o
|
||||
if len(available) == 0 {
|
||||
return nil, &Error{Code: "auth_unavailable", Message: "no auth available"}
|
||||
}
|
||||
// Make round-robin deterministic even if caller's candidate order is unstable.
|
||||
if len(available) > 1 {
|
||||
sort.Slice(available, func(i, j int) bool { return available[i].ID < available[j].ID })
|
||||
}
|
||||
key := provider + ":" + model
|
||||
s.mu.Lock()
|
||||
index := s.cursors[key]
|
||||
|
||||
@@ -6,8 +6,8 @@ import "context"
|
||||
type Store interface {
|
||||
// List returns all auth records stored in the backend.
|
||||
List(ctx context.Context) ([]*Auth, error)
|
||||
// SaveAuth persists the provided auth record, replacing any existing one with same ID.
|
||||
SaveAuth(ctx context.Context, auth *Auth) error
|
||||
// Save persists the provided auth record, replacing any existing one with same ID.
|
||||
Save(ctx context.Context, auth *Auth) (string, error)
|
||||
// Delete removes the auth record identified by id.
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user