mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-03 13:00:52 +08:00
Compare commits
129 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d822cf309 | ||
|
|
d03a75dba5 | ||
|
|
9ff21b67a8 | ||
|
|
5546c9d872 | ||
|
|
fb760718e2 | ||
|
|
d6721e4e75 | ||
|
|
514f5a8ad4 | ||
|
|
a68e0dd8aa | ||
|
|
75d7763c5c | ||
|
|
9bb7df7af7 | ||
|
|
43665cb649 | ||
|
|
39337627b9 | ||
|
|
4bc8a52771 | ||
|
|
b727e4e12e | ||
|
|
93588919e5 | ||
|
|
31659c790d | ||
|
|
c62ecc2442 | ||
|
|
b1fee5d266 | ||
|
|
4a10cfacc3 | ||
|
|
bbdd68a8b4 | ||
|
|
ac3ecd567c | ||
|
|
4fd70d5f1a | ||
|
|
49c52a01b0 | ||
|
|
389c8ecef1 | ||
|
|
f1f24f542a | ||
|
|
8ca041cfcf | ||
|
|
eac8b1a27f | ||
|
|
c8029b7166 | ||
|
|
64f4c18fea | ||
|
|
9abcaf177f | ||
|
|
b839e351c4 | ||
|
|
6b413a299b | ||
|
|
4657c98821 | ||
|
|
dd1e0da155 | ||
|
|
cf5476eb23 | ||
|
|
cf9a748159 | ||
|
|
2e328dd462 | ||
|
|
edd4b4d97f | ||
|
|
608d745159 | ||
|
|
fd795caf76 | ||
|
|
9e2d76f3ce | ||
|
|
ae646fba4b | ||
|
|
2eef6875e9 | ||
|
|
12c09f1a46 | ||
|
|
4a31f763af | ||
|
|
6629cadb87 | ||
|
|
41975c9e2b | ||
|
|
c589c0d998 | ||
|
|
7c157d6ab1 | ||
|
|
7c642bee09 | ||
|
|
beba2a7aa0 | ||
|
|
f2201dabfa | ||
|
|
108dcb7f70 | ||
|
|
8858e07d8b | ||
|
|
d33a89b89f | ||
|
|
1d70336a91 | ||
|
|
6080527e9e | ||
|
|
82187bffba | ||
|
|
f4977e5ef6 | ||
|
|
832268cae7 | ||
|
|
f6de2a709f | ||
|
|
de796ac1c2 | ||
|
|
6b5aefc27a | ||
|
|
5010b09329 | ||
|
|
368fd27393 | ||
|
|
b2ca49376c | ||
|
|
6d98a71796 | ||
|
|
1c91823308 | ||
|
|
352a67857b | ||
|
|
644a3ad220 | ||
|
|
19c32f58b2 | ||
|
|
d01c4904ff | ||
|
|
8cfa2282ef | ||
|
|
8e88a61021 | ||
|
|
ad4d045101 | ||
|
|
5888e04654 | ||
|
|
19b10cb894 | ||
|
|
aa25820698 | ||
|
|
9e3b84939f | ||
|
|
1dbb930660 | ||
|
|
6557d9b728 | ||
|
|
250628dae3 | ||
|
|
da72ac1f6d | ||
|
|
f9a170a3c4 | ||
|
|
88f06fc305 | ||
|
|
562a49a194 | ||
|
|
6136a77eb3 | ||
|
|
afff9216ea | ||
|
|
b56edd4db0 | ||
|
|
d512f20c56 | ||
|
|
57c9ba49f4 | ||
|
|
40255b128e | ||
|
|
6524d3a51e | ||
|
|
92c8cd7c72 | ||
|
|
c678ca21d5 | ||
|
|
6d4b43dd7a | ||
|
|
b0f2ad7cfe | ||
|
|
cd0b1be46c | ||
|
|
08856a97fb | ||
|
|
b6d5ce2d4d | ||
|
|
0f55e550cf | ||
|
|
e1de04230f | ||
|
|
a887a337a5 | ||
|
|
2717ba3e50 | ||
|
|
63af4c551d | ||
|
|
c675cf5e72 | ||
|
|
4fd95ead3b | ||
|
|
514add4b85 | ||
|
|
3ca01b60a5 | ||
|
|
39e398ae02 | ||
|
|
9bbe64489f | ||
|
|
7e54156f2f | ||
|
|
9b80820b17 | ||
|
|
e836b4ac10 | ||
|
|
f228a4dcca | ||
|
|
3297f75edd | ||
|
|
25ba042493 | ||
|
|
483229779c | ||
|
|
5a50856fc1 | ||
|
|
cf734f7e7b | ||
|
|
72325f792c | ||
|
|
9761ac5045 | ||
|
|
8fa52e9d31 | ||
|
|
80b6a95eba | ||
|
|
96cebd2a35 | ||
|
|
fc103f6c17 | ||
|
|
a45d2109f3 | ||
|
|
7a30e65175 | ||
|
|
c63dc7fe2f |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -10,5 +10,8 @@ auths/*
|
|||||||
.serena/*
|
.serena/*
|
||||||
AGENTS.md
|
AGENTS.md
|
||||||
CLAUDE.md
|
CLAUDE.md
|
||||||
|
GEMINI.md
|
||||||
*.exe
|
*.exe
|
||||||
temp/*
|
temp/*
|
||||||
|
cli-proxy-api
|
||||||
|
static/*
|
||||||
@@ -95,7 +95,7 @@ If a plaintext key is detected in the config at startup, it will be bcrypt‑has
|
|||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
```json
|
```json
|
||||||
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01", "AI...02", "AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api"},{"api-key":"sk-...q2","base-url":"https://example.com"}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1"}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk...01"],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-keys":["sk...7e"],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}],"allow-localhost-unauthenticated":true}
|
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01","AI...02","AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api","proxy-url":""},{"api-key":"sk-...q2","base-url":"https://example.com","proxy-url":""}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1","proxy-url":""}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk...01","proxy-url":""}],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-key-entries":[{"api-key":"sk...7e","proxy-url":"socks5://proxy.example.com:1080"}],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}]}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Debug
|
### Debug
|
||||||
@@ -335,14 +335,14 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
|||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
```json
|
```json
|
||||||
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "", "proxy-url": "" } ] }
|
||||||
```
|
```
|
||||||
- PUT `/codex-api-key` — Replace the list
|
- PUT `/codex-api-key` — Replace the list
|
||||||
- Request:
|
- Request:
|
||||||
```bash
|
```bash
|
||||||
curl -X PUT -H 'Content-Type: application/json' \
|
curl -X PUT -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
-d '[{"api-key":"sk-a","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"sk-b","base-url":"https://c.example.com","proxy-url":""}]' \
|
||||||
http://localhost:8317/v0/management/codex-api-key
|
http://localhost:8317/v0/management/codex-api-key
|
||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
@@ -354,14 +354,14 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
|||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com","proxy-url":""}}' \
|
||||||
http://localhost:8317/v0/management/codex-api-key
|
http://localhost:8317/v0/management/codex-api-key
|
||||||
```
|
```
|
||||||
- Request (by match):
|
- Request (by match):
|
||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":"","proxy-url":"socks5://proxy.example.com:1080"}}' \
|
||||||
http://localhost:8317/v0/management/codex-api-key
|
http://localhost:8317/v0/management/codex-api-key
|
||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
@@ -428,29 +428,6 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
|||||||
{ "status": "ok" }
|
{ "status": "ok" }
|
||||||
```
|
```
|
||||||
|
|
||||||
### Allow Localhost Unauthenticated
|
|
||||||
- GET `/allow-localhost-unauthenticated` — Get boolean
|
|
||||||
- Request:
|
|
||||||
```bash
|
|
||||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/allow-localhost-unauthenticated
|
|
||||||
```
|
|
||||||
- Response:
|
|
||||||
```json
|
|
||||||
{ "allow-localhost-unauthenticated": false }
|
|
||||||
```
|
|
||||||
- PUT/PATCH `/allow-localhost-unauthenticated` — Set boolean
|
|
||||||
- Request:
|
|
||||||
```bash
|
|
||||||
curl -X PUT -H 'Content-Type: application/json' \
|
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
|
||||||
-d '{"value":true}' \
|
|
||||||
http://localhost:8317/v0/management/allow-localhost-unauthenticated
|
|
||||||
```
|
|
||||||
- Response:
|
|
||||||
```json
|
|
||||||
{ "status": "ok" }
|
|
||||||
```
|
|
||||||
|
|
||||||
### Claude API KEY (object array)
|
### Claude API KEY (object array)
|
||||||
- GET `/claude-api-key` — List all
|
- GET `/claude-api-key` — List all
|
||||||
- Request:
|
- Request:
|
||||||
@@ -459,14 +436,14 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
|||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
```json
|
```json
|
||||||
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "", "proxy-url": "" } ] }
|
||||||
```
|
```
|
||||||
- PUT `/claude-api-key` — Replace the list
|
- PUT `/claude-api-key` — Replace the list
|
||||||
- Request:
|
- Request:
|
||||||
```bash
|
```bash
|
||||||
curl -X PUT -H 'Content-Type: application/json' \
|
curl -X PUT -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
-d '[{"api-key":"sk-a","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"sk-b","base-url":"https://c.example.com","proxy-url":""}]' \
|
||||||
http://localhost:8317/v0/management/claude-api-key
|
http://localhost:8317/v0/management/claude-api-key
|
||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
@@ -478,14 +455,14 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
|||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com","proxy-url":""}}' \
|
||||||
http://localhost:8317/v0/management/claude-api-key
|
http://localhost:8317/v0/management/claude-api-key
|
||||||
```
|
```
|
||||||
- Request (by match):
|
- Request (by match):
|
||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":"","proxy-url":"socks5://proxy.example.com:1080"}}' \
|
||||||
http://localhost:8317/v0/management/claude-api-key
|
http://localhost:8317/v0/management/claude-api-key
|
||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
@@ -514,14 +491,14 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
|||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
```json
|
```json
|
||||||
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-keys": [], "models": [] } ] }
|
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-key-entries": [ { "api-key": "sk", "proxy-url": "" } ], "models": [] } ] }
|
||||||
```
|
```
|
||||||
- PUT `/openai-compatibility` — Replace the list
|
- PUT `/openai-compatibility` — Replace the list
|
||||||
- Request:
|
- Request:
|
||||||
```bash
|
```bash
|
||||||
curl -X PUT -H 'Content-Type: application/json' \
|
curl -X PUT -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk"],"models":[{"name":"m","alias":"a"}]}]' \
|
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[{"name":"m","alias":"a"}]}]' \
|
||||||
http://localhost:8317/v0/management/openai-compatibility
|
http://localhost:8317/v0/management/openai-compatibility
|
||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
@@ -533,20 +510,23 @@ These endpoints update the inline `config-api-key` provider inside the `auth.pro
|
|||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[]}}' \
|
||||||
http://localhost:8317/v0/management/openai-compatibility
|
http://localhost:8317/v0/management/openai-compatibility
|
||||||
```
|
```
|
||||||
- Request (by index):
|
- Request (by index):
|
||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[]}}' \
|
||||||
http://localhost:8317/v0/management/openai-compatibility
|
http://localhost:8317/v0/management/openai-compatibility
|
||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
```json
|
```json
|
||||||
{ "status": "ok" }
|
{ "status": "ok" }
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- Notes:
|
||||||
|
- Legacy `api-keys` input remains accepted; keys are migrated into `api-key-entries` automatically so the legacy field will eventually remain empty in responses.
|
||||||
- DELETE `/openai-compatibility` — Delete (`?name=` or `?index=`)
|
- DELETE `/openai-compatibility` — Delete (`?name=` or `?index=`)
|
||||||
- Request (by name):
|
- Request (by name):
|
||||||
```bash
|
```bash
|
||||||
@@ -664,7 +644,7 @@ These endpoints initiate provider login flows and return a URL to open in a brow
|
|||||||
```bash
|
```bash
|
||||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-H 'Content-Type: application/json' \
|
-H 'Content-Type: application/json' \
|
||||||
-d '{"secure_1psid": "<__Secure-1PSID>", "secure_1psidts": "<__Secure-1PSIDTS>"}' \
|
-d '{"secure_1psid": "<__Secure-1PSID>", "secure_1psidts": "<__Secure-1PSIDTS>", "label": "<LABEL>"}' \
|
||||||
http://localhost:8317/v0/management/gemini-web-token
|
http://localhost:8317/v0/management/gemini-web-token
|
||||||
```
|
```
|
||||||
- Response:
|
- Response:
|
||||||
@@ -683,6 +663,17 @@ These endpoints initiate provider login flows and return a URL to open in a brow
|
|||||||
{ "status": "ok", "url": "https://..." }
|
{ "status": "ok", "url": "https://..." }
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- GET `/iflow-auth-url` — Start iFlow login
|
||||||
|
- Request:
|
||||||
|
```bash
|
||||||
|
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
|
http://localhost:8317/v0/management/iflow-auth-url
|
||||||
|
```
|
||||||
|
- Response:
|
||||||
|
```json
|
||||||
|
{ "status": "ok", "url": "https://..." }
|
||||||
|
```
|
||||||
|
|
||||||
- GET `/get-auth-status?state=<state>` — Poll OAuth flow status
|
- GET `/get-auth-status?state=<state>` — Poll OAuth flow status
|
||||||
- Request:
|
- Request:
|
||||||
```bash
|
```bash
|
||||||
|
|||||||
@@ -95,7 +95,7 @@
|
|||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
```json
|
```json
|
||||||
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01", "AI...02", "AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api"},{"api-key":"sk-...q2","base-url":"https://example.com"}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1"}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk...01"],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-keys":["sk...7e"],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}],"allow-localhost-unauthenticated":true}
|
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01","AI...02","AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api","proxy-url":""},{"api-key":"sk-...q2","base-url":"https://example.com","proxy-url":""}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1","proxy-url":""}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk...01","proxy-url":""}],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-key-entries":[{"api-key":"sk...7e","proxy-url":"socks5://proxy.example.com:1080"}],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}]}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Debug
|
### Debug
|
||||||
@@ -335,14 +335,14 @@
|
|||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
```json
|
```json
|
||||||
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "", "proxy-url": "" } ] }
|
||||||
```
|
```
|
||||||
- PUT `/codex-api-key` — 完整改写列表
|
- PUT `/codex-api-key` — 完整改写列表
|
||||||
- 请求:
|
- 请求:
|
||||||
```bash
|
```bash
|
||||||
curl -X PUT -H 'Content-Type: application/json' \
|
curl -X PUT -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
-d '[{"api-key":"sk-a","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"sk-b","base-url":"https://c.example.com","proxy-url":""}]' \
|
||||||
http://localhost:8317/v0/management/codex-api-key
|
http://localhost:8317/v0/management/codex-api-key
|
||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
@@ -354,14 +354,14 @@
|
|||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com","proxy-url":""}}' \
|
||||||
http://localhost:8317/v0/management/codex-api-key
|
http://localhost:8317/v0/management/codex-api-key
|
||||||
```
|
```
|
||||||
- 请求(按匹配):
|
- 请求(按匹配):
|
||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":"","proxy-url":"socks5://proxy.example.com:1080"}}' \
|
||||||
http://localhost:8317/v0/management/codex-api-key
|
http://localhost:8317/v0/management/codex-api-key
|
||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
@@ -428,29 +428,6 @@
|
|||||||
{ "status": "ok" }
|
{ "status": "ok" }
|
||||||
```
|
```
|
||||||
|
|
||||||
### 允许本地未认证访问
|
|
||||||
- GET `/allow-localhost-unauthenticated` — 获取布尔值
|
|
||||||
- 请求:
|
|
||||||
```bash
|
|
||||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/allow-localhost-unauthenticated
|
|
||||||
```
|
|
||||||
- 响应:
|
|
||||||
```json
|
|
||||||
{ "allow-localhost-unauthenticated": false }
|
|
||||||
```
|
|
||||||
- PUT/PATCH `/allow-localhost-unauthenticated` — 设置布尔值
|
|
||||||
- 请求:
|
|
||||||
```bash
|
|
||||||
curl -X PUT -H 'Content-Type: application/json' \
|
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
|
||||||
-d '{"value":true}' \
|
|
||||||
http://localhost:8317/v0/management/allow-localhost-unauthenticated
|
|
||||||
```
|
|
||||||
- 响应:
|
|
||||||
```json
|
|
||||||
{ "status": "ok" }
|
|
||||||
```
|
|
||||||
|
|
||||||
### Claude API KEY(对象数组)
|
### Claude API KEY(对象数组)
|
||||||
- GET `/claude-api-key` — 列出全部
|
- GET `/claude-api-key` — 列出全部
|
||||||
- 请求:
|
- 请求:
|
||||||
@@ -459,14 +436,14 @@
|
|||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
```json
|
```json
|
||||||
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "", "proxy-url": "" } ] }
|
||||||
```
|
```
|
||||||
- PUT `/claude-api-key` — 完整改写列表
|
- PUT `/claude-api-key` — 完整改写列表
|
||||||
- 请求:
|
- 请求:
|
||||||
```bash
|
```bash
|
||||||
curl -X PUT -H 'Content-Type: application/json' \
|
curl -X PUT -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
-d '[{"api-key":"sk-a","proxy-url":"socks5://proxy.example.com:1080"},{"api-key":"sk-b","base-url":"https://c.example.com","proxy-url":""}]' \
|
||||||
http://localhost:8317/v0/management/claude-api-key
|
http://localhost:8317/v0/management/claude-api-key
|
||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
@@ -478,14 +455,14 @@
|
|||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com","proxy-url":""}}' \
|
||||||
http://localhost:8317/v0/management/claude-api-key
|
http://localhost:8317/v0/management/claude-api-key
|
||||||
```
|
```
|
||||||
- 请求(按匹配):
|
- 请求(按匹配):
|
||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":"","proxy-url":"socks5://proxy.example.com:1080"}}' \
|
||||||
http://localhost:8317/v0/management/claude-api-key
|
http://localhost:8317/v0/management/claude-api-key
|
||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
@@ -514,14 +491,14 @@
|
|||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
```json
|
```json
|
||||||
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-keys": [], "models": [] } ] }
|
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-key-entries": [ { "api-key": "sk", "proxy-url": "" } ], "models": [] } ] }
|
||||||
```
|
```
|
||||||
- PUT `/openai-compatibility` — 完整改写列表
|
- PUT `/openai-compatibility` — 完整改写列表
|
||||||
- 请求:
|
- 请求:
|
||||||
```bash
|
```bash
|
||||||
curl -X PUT -H 'Content-Type: application/json' \
|
curl -X PUT -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk"],"models":[{"name":"m","alias":"a"}]}]' \
|
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[{"name":"m","alias":"a"}]}]' \
|
||||||
http://localhost:8317/v0/management/openai-compatibility
|
http://localhost:8317/v0/management/openai-compatibility
|
||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
@@ -533,20 +510,23 @@
|
|||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[]}}' \
|
||||||
http://localhost:8317/v0/management/openai-compatibility
|
http://localhost:8317/v0/management/openai-compatibility
|
||||||
```
|
```
|
||||||
- 请求(按索引):
|
- 请求(按索引):
|
||||||
```bash
|
```bash
|
||||||
curl -X PATCH -H 'Content-Type: application/json' \
|
curl -X PATCH -H 'Content-Type: application/json' \
|
||||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-key-entries":[{"api-key":"sk","proxy-url":""}],"models":[]}}' \
|
||||||
http://localhost:8317/v0/management/openai-compatibility
|
http://localhost:8317/v0/management/openai-compatibility
|
||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
```json
|
```json
|
||||||
{ "status": "ok" }
|
{ "status": "ok" }
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- 说明:
|
||||||
|
- 仍可提交遗留的 `api-keys` 字段,但所有密钥会自动迁移到 `api-key-entries` 中,返回结果中的 `api-keys` 会逐步留空。
|
||||||
- DELETE `/openai-compatibility` — 删除(`?name=` 或 `?index=`)
|
- DELETE `/openai-compatibility` — 删除(`?name=` 或 `?index=`)
|
||||||
- 请求(按名称):
|
- 请求(按名称):
|
||||||
```bash
|
```bash
|
||||||
@@ -664,7 +644,7 @@
|
|||||||
```bash
|
```bash
|
||||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
-H 'Content-Type: application/json' \
|
-H 'Content-Type: application/json' \
|
||||||
-d '{"secure_1psid": "<__Secure-1PSID>", "secure_1psidts": "<__Secure-1PSIDTS>"}' \
|
-d '{"secure_1psid": "<__Secure-1PSID>", "secure_1psidts": "<__Secure-1PSIDTS>", "label": "<LABEL>"}' \
|
||||||
http://localhost:8317/v0/management/gemini-web-token
|
http://localhost:8317/v0/management/gemini-web-token
|
||||||
```
|
```
|
||||||
- 响应:
|
- 响应:
|
||||||
@@ -683,6 +663,17 @@
|
|||||||
{ "status": "ok", "url": "https://..." }
|
{ "status": "ok", "url": "https://..." }
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- GET `/iflow-auth-url` — 开始 iFlow 登录
|
||||||
|
- 请求:
|
||||||
|
```bash
|
||||||
|
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||||
|
http://localhost:8317/v0/management/iflow-auth-url
|
||||||
|
```
|
||||||
|
- 响应:
|
||||||
|
```json
|
||||||
|
{ "status": "ok", "url": "https://..." }
|
||||||
|
```
|
||||||
|
|
||||||
- GET `/get-auth-status?state=<state>` — 轮询 OAuth 流程状态
|
- GET `/get-auth-status?state=<state>` — 轮询 OAuth 流程状态
|
||||||
- 请求:
|
- 请求:
|
||||||
```bash
|
```bash
|
||||||
|
|||||||
152
README.md
152
README.md
@@ -8,7 +8,7 @@ It now also supports OpenAI Codex (GPT models) and Claude Code via OAuth.
|
|||||||
|
|
||||||
So you can use local or multi-account CLI access with OpenAI(include Responses)/Gemini/Claude-compatible clients and SDKs.
|
So you can use local or multi-account CLI access with OpenAI(include Responses)/Gemini/Claude-compatible clients and SDKs.
|
||||||
|
|
||||||
The first Chinese provider has now been added: [Qwen Code](https://github.com/QwenLM/qwen-code).
|
Chinese providers have now been added: [Qwen Code](https://github.com/QwenLM/qwen-code), [iFlow](https://iflow.cn/).
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
@@ -16,19 +16,21 @@ The first Chinese provider has now been added: [Qwen Code](https://github.com/Qw
|
|||||||
- OpenAI Codex support (GPT models) via OAuth login
|
- OpenAI Codex support (GPT models) via OAuth login
|
||||||
- Claude Code support via OAuth login
|
- Claude Code support via OAuth login
|
||||||
- Qwen Code support via OAuth login
|
- Qwen Code support via OAuth login
|
||||||
|
- iFlow support via OAuth login
|
||||||
- Gemini Web support via cookie-based login
|
- Gemini Web support via cookie-based login
|
||||||
- Streaming and non-streaming responses
|
- Streaming and non-streaming responses
|
||||||
- Function calling/tools support
|
- Function calling/tools support
|
||||||
- Multimodal input support (text and images)
|
- Multimodal input support (text and images)
|
||||||
- Multiple accounts with round-robin load balancing (Gemini, OpenAI, Claude and Qwen)
|
- Multiple accounts with round-robin load balancing (Gemini, OpenAI, Claude, Qwen and iFlow)
|
||||||
- Simple CLI authentication flows (Gemini, OpenAI, Claude and Qwen)
|
- Simple CLI authentication flows (Gemini, OpenAI, Claude, Qwen and iFlow)
|
||||||
- Generative Language API Key support
|
- Generative Language API Key support
|
||||||
- Gemini CLI multi-account load balancing
|
- Gemini CLI multi-account load balancing
|
||||||
- Claude Code multi-account load balancing
|
- Claude Code multi-account load balancing
|
||||||
- Qwen Code multi-account load balancing
|
- Qwen Code multi-account load balancing
|
||||||
|
- iFlow multi-account load balancing
|
||||||
- OpenAI Codex multi-account load balancing
|
- OpenAI Codex multi-account load balancing
|
||||||
- OpenAI-compatible upstream providers via config (e.g., OpenRouter)
|
- OpenAI-compatible upstream providers via config (e.g., OpenRouter)
|
||||||
- Reusable Go SDK for embedding the proxy (see `docs/sdk-usage.md`, 中文: `docs/sdk-usage_CN.md`)
|
- Reusable Go SDK for embedding the proxy (see `docs/sdk-usage.md`)
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@@ -39,6 +41,7 @@ The first Chinese provider has now been added: [Qwen Code](https://github.com/Qw
|
|||||||
- An OpenAI account for Codex/GPT access (optional)
|
- An OpenAI account for Codex/GPT access (optional)
|
||||||
- An Anthropic account for Claude Code access (optional)
|
- An Anthropic account for Claude Code access (optional)
|
||||||
- A Qwen Chat account for Qwen Code access (optional)
|
- A Qwen Chat account for Qwen Code access (optional)
|
||||||
|
- An iFlow account for iFlow access (optional)
|
||||||
|
|
||||||
### Building from Source
|
### Building from Source
|
||||||
|
|
||||||
@@ -62,9 +65,21 @@ The first Chinese provider has now been added: [Qwen Code](https://github.com/Qw
|
|||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
|
### GUI Client & Official WebUI
|
||||||
|
|
||||||
|
#### [EasyCLI](https://github.com/router-for-me/EasyCLI)
|
||||||
|
|
||||||
|
A cross-platform desktop GUI client for CLIProxyAPI.
|
||||||
|
|
||||||
|
#### [Cli-Proxy-API-Management-Center](https://github.com/router-for-me/Cli-Proxy-API-Management-Center)
|
||||||
|
|
||||||
|
A web-based management center for CLIProxyAPI.
|
||||||
|
|
||||||
|
Set `remote-management.disable-control-panel` to `true` if you prefer to host the management UI elsewhere; the server will skip downloading `management.html` and `/management.html` will return 404.
|
||||||
|
|
||||||
### Authentication
|
### Authentication
|
||||||
|
|
||||||
You can authenticate for Gemini, OpenAI, and/or Claude. All can coexist in the same `auth-dir` and will be load balanced.
|
You can authenticate for Gemini, OpenAI, Claude, Qwen, and/or iFlow. All can coexist in the same `auth-dir` and will be load balanced.
|
||||||
|
|
||||||
- Gemini (Google):
|
- Gemini (Google):
|
||||||
```bash
|
```bash
|
||||||
@@ -103,6 +118,12 @@ You can authenticate for Gemini, OpenAI, and/or Claude. All can coexist in the s
|
|||||||
```
|
```
|
||||||
Options: add `--no-browser` to print the login URL instead of opening a browser. Use the Qwen Chat's OAuth device flow.
|
Options: add `--no-browser` to print the login URL instead of opening a browser. Use the Qwen Chat's OAuth device flow.
|
||||||
|
|
||||||
|
- iFlow (iFlow via OAuth):
|
||||||
|
```bash
|
||||||
|
./cli-proxy-api --iflow-login
|
||||||
|
```
|
||||||
|
Options: add `--no-browser` to print the login URL instead of opening a browser. The local OAuth callback uses port `11451`.
|
||||||
|
|
||||||
|
|
||||||
### Starting the Server
|
### Starting the Server
|
||||||
|
|
||||||
@@ -144,7 +165,7 @@ Request body example:
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
- Use a `gemini-*` model for Gemini (e.g., "gemini-2.5-pro"), a `gpt-*` model for OpenAI (e.g., "gpt-5"), a `claude-*` model for Claude (e.g., "claude-3-5-sonnet-20241022"), or a `qwen-*` model for Qwen (e.g., "qwen3-coder-plus"). The proxy will route to the correct provider automatically.
|
- Use a `gemini-*` model for Gemini (e.g., "gemini-2.5-pro"), a `gpt-*` model for OpenAI (e.g., "gpt-5"), a `claude-*` model for Claude (e.g., "claude-3-5-sonnet-20241022"), a `qwen-*` model for Qwen (e.g., "qwen3-coder-plus"), or an iFlow-supported model (e.g., "tstars2.0", "deepseek-v3.1", "kimi-k2", etc.). The proxy will route to the correct provider automatically.
|
||||||
|
|
||||||
#### Claude Messages (SSE-compatible)
|
#### Claude Messages (SSE-compatible)
|
||||||
|
|
||||||
@@ -237,15 +258,27 @@ console.log(await claudeResponse.json());
|
|||||||
- gemini-2.5-pro
|
- gemini-2.5-pro
|
||||||
- gemini-2.5-flash
|
- gemini-2.5-flash
|
||||||
- gemini-2.5-flash-lite
|
- gemini-2.5-flash-lite
|
||||||
|
- gemini-2.5-flash-image-preview
|
||||||
- gpt-5
|
- gpt-5
|
||||||
- gpt-5-codex
|
- gpt-5-codex
|
||||||
- claude-opus-4-1-20250805
|
- claude-opus-4-1-20250805
|
||||||
- claude-opus-4-20250514
|
- claude-opus-4-20250514
|
||||||
- claude-sonnet-4-20250514
|
- claude-sonnet-4-20250514
|
||||||
|
- claude-sonnet-4-5-20250929
|
||||||
- claude-3-7-sonnet-20250219
|
- claude-3-7-sonnet-20250219
|
||||||
- claude-3-5-haiku-20241022
|
- claude-3-5-haiku-20241022
|
||||||
- qwen3-coder-plus
|
- qwen3-coder-plus
|
||||||
- qwen3-coder-flash
|
- qwen3-coder-flash
|
||||||
|
- qwen3-max
|
||||||
|
- qwen3-vl-plus
|
||||||
|
- deepseek-v3.2
|
||||||
|
- deepseek-v3.1
|
||||||
|
- deepseek-r1
|
||||||
|
- deepseek-v3
|
||||||
|
- kimi-k2
|
||||||
|
- glm-4.5
|
||||||
|
- tstars2.0
|
||||||
|
- And other iFlow-supported models
|
||||||
- Gemini models auto-switch to preview variants when needed
|
- Gemini models auto-switch to preview variants when needed
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
@@ -266,33 +299,36 @@ The server uses a YAML configuration file (`config.yaml`) located in the project
|
|||||||
| `request-retry` | integer | 0 | Number of times to retry a request. Retries will occur if the HTTP response code is 403, 408, 500, 502, 503, or 504. |
|
| `request-retry` | integer | 0 | Number of times to retry a request. Retries will occur if the HTTP response code is 403, 408, 500, 502, 503, or 504. |
|
||||||
| `remote-management.allow-remote` | boolean | false | Whether to allow remote (non-localhost) access to the management API. If false, only localhost can access. A management key is still required for localhost. |
|
| `remote-management.allow-remote` | boolean | false | Whether to allow remote (non-localhost) access to the management API. If false, only localhost can access. A management key is still required for localhost. |
|
||||||
| `remote-management.secret-key` | string | "" | Management key. If a plaintext value is provided, it will be hashed on startup using bcrypt and persisted back to the config file. If empty, the entire management API is disabled (404). |
|
| `remote-management.secret-key` | string | "" | Management key. If a plaintext value is provided, it will be hashed on startup using bcrypt and persisted back to the config file. If empty, the entire management API is disabled (404). |
|
||||||
|
| `remote-management.disable-control-panel` | boolean | false | When true, skip downloading `management.html` and return 404 for `/management.html`, effectively disabling the bundled management UI. |
|
||||||
| `quota-exceeded` | object | {} | Configuration for handling quota exceeded. |
|
| `quota-exceeded` | object | {} | Configuration for handling quota exceeded. |
|
||||||
| `quota-exceeded.switch-project` | boolean | true | Whether to automatically switch to another project when a quota is exceeded. |
|
| `quota-exceeded.switch-project` | boolean | true | Whether to automatically switch to another project when a quota is exceeded. |
|
||||||
| `quota-exceeded.switch-preview-model` | boolean | true | Whether to automatically switch to a preview model when a quota is exceeded. |
|
| `quota-exceeded.switch-preview-model` | boolean | true | Whether to automatically switch to a preview model when a quota is exceeded. |
|
||||||
| `debug` | boolean | false | Enable debug mode for verbose logging. |
|
| `debug` | boolean | false | Enable debug mode for verbose logging. |
|
||||||
| `auth` | object | {} | Request authentication configuration. |
|
| `logging-to-file` | boolean | true | Write application logs to rotating files instead of stdout. Set to `false` to log to stdout/stderr. |
|
||||||
| `auth.providers` | object[] | [] | Authentication providers. Includes built-in `config-api-key` for inline keys. |
|
| `usage-statistics-enabled` | boolean | true | Enable in-memory usage aggregation for management APIs. Disable to drop all collected usage metrics. |
|
||||||
| `auth.providers.*.name` | string | "" | Provider instance name. |
|
|
||||||
| `auth.providers.*.type` | string | "" | Provider implementation identifier (for example `config-api-key`). |
|
|
||||||
| `auth.providers.*.api-keys` | string[] | [] | Inline API keys consumed by the `config-api-key` provider. |
|
|
||||||
| `api-keys` | string[] | [] | Legacy shorthand for inline API keys. Values are mirrored into the `config-api-key` provider for backwards compatibility. |
|
| `api-keys` | string[] | [] | Legacy shorthand for inline API keys. Values are mirrored into the `config-api-key` provider for backwards compatibility. |
|
||||||
| `generative-language-api-key` | string[] | [] | List of Generative Language API keys. |
|
| `generative-language-api-key` | string[] | [] | List of Generative Language API keys. |
|
||||||
| `codex-api-key` | object | {} | List of Codex API keys. |
|
| `codex-api-key` | object | {} | List of Codex API keys. |
|
||||||
| `codex-api-key.api-key` | string | "" | Codex API key. |
|
| `codex-api-key.api-key` | string | "" | Codex API key. |
|
||||||
| `codex-api-key.base-url` | string | "" | Custom Codex API endpoint, if you use a third-party API endpoint. |
|
| `codex-api-key.base-url` | string | "" | Custom Codex API endpoint, if you use a third-party API endpoint. |
|
||||||
|
| `codex-api-key.proxy-url` | string | "" | Proxy URL for this specific API key. Overrides the global proxy-url setting. Supports socks5/http/https protocols. |
|
||||||
| `claude-api-key` | object | {} | List of Claude API keys. |
|
| `claude-api-key` | object | {} | List of Claude API keys. |
|
||||||
| `claude-api-key.api-key` | string | "" | Claude API key. |
|
| `claude-api-key.api-key` | string | "" | Claude API key. |
|
||||||
| `claude-api-key.base-url` | string | "" | Custom Claude API endpoint, if you use a third-party API endpoint. |
|
| `claude-api-key.base-url` | string | "" | Custom Claude API endpoint, if you use a third-party API endpoint. |
|
||||||
|
| `claude-api-key.proxy-url` | string | "" | Proxy URL for this specific API key. Overrides the global proxy-url setting. Supports socks5/http/https protocols. |
|
||||||
| `openai-compatibility` | object[] | [] | Upstream OpenAI-compatible providers configuration (name, base-url, api-keys, models). |
|
| `openai-compatibility` | object[] | [] | Upstream OpenAI-compatible providers configuration (name, base-url, api-keys, models). |
|
||||||
| `openai-compatibility.*.name` | string | "" | The name of the provider. It will be used in the user agent and other places. |
|
| `openai-compatibility.*.name` | string | "" | The name of the provider. It will be used in the user agent and other places. |
|
||||||
| `openai-compatibility.*.base-url` | string | "" | The base URL of the provider. |
|
| `openai-compatibility.*.base-url` | string | "" | The base URL of the provider. |
|
||||||
| `openai-compatibility.*.api-keys` | string[] | [] | The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed. |
|
| `openai-compatibility.*.api-keys` | string[] | [] | (Deprecated) The API keys for the provider. Use api-key-entries instead for per-key proxy support. |
|
||||||
|
| `openai-compatibility.*.api-key-entries` | object[] | [] | API key entries with optional per-key proxy configuration. Preferred over api-keys. |
|
||||||
|
| `openai-compatibility.*.api-key-entries.*.api-key` | string | "" | The API key for this entry. |
|
||||||
|
| `openai-compatibility.*.api-key-entries.*.proxy-url` | string | "" | Proxy URL for this specific API key. Overrides the global proxy-url setting. Supports socks5/http/https protocols. |
|
||||||
| `openai-compatibility.*.models` | object[] | [] | The actual model name. |
|
| `openai-compatibility.*.models` | object[] | [] | The actual model name. |
|
||||||
| `openai-compatibility.*.models.*.name` | string | "" | The models supported by the provider. |
|
| `openai-compatibility.*.models.*.name` | string | "" | The models supported by the provider. |
|
||||||
| `openai-compatibility.*.models.*.alias` | string | "" | The alias used in the API. |
|
| `openai-compatibility.*.models.*.alias` | string | "" | The alias used in the API. |
|
||||||
| `gemini-web` | object | {} | Configuration specific to the Gemini Web client. |
|
| `gemini-web` | object | {} | Configuration specific to the Gemini Web client. |
|
||||||
| `gemini-web.context` | boolean | true | Enables conversation context reuse for continuous dialogue. |
|
| `gemini-web.context` | boolean | true | Enables conversation context reuse for continuous dialogue. |
|
||||||
| `gemini-web.code-mode` | boolean | false | Enables code mode for optimized responses in coding-related tasks. |
|
| `gemini-web.gem-mode` | string | "" | Selects a predefined Gem to attach for Gemini Web requests; allowed values: `coding-partner`, `writing-editor`. When empty, no Gem is attached. |
|
||||||
| `gemini-web.max-chars-per-request` | integer | 1,000,000 | The maximum number of characters to send to Gemini Web in a single request. |
|
| `gemini-web.max-chars-per-request` | integer | 1,000,000 | The maximum number of characters to send to Gemini Web in a single request. |
|
||||||
| `gemini-web.disable-continuation-hint` | boolean | false | Disables the continuation hint for split prompts. |
|
| `gemini-web.disable-continuation-hint` | boolean | false | Disables the continuation hint for split prompts. |
|
||||||
|
|
||||||
@@ -313,12 +349,21 @@ remote-management:
|
|||||||
# Leave empty to disable the Management API entirely (404 for all /v0/management routes).
|
# Leave empty to disable the Management API entirely (404 for all /v0/management routes).
|
||||||
secret-key: ""
|
secret-key: ""
|
||||||
|
|
||||||
|
# Disable the bundled management control panel asset download and HTTP route when true.
|
||||||
|
disable-control-panel: false
|
||||||
|
|
||||||
# Authentication directory (supports ~ for home directory). If you use Windows, please set the directory like this: `C:/cli-proxy-api/`
|
# Authentication directory (supports ~ for home directory). If you use Windows, please set the directory like this: `C:/cli-proxy-api/`
|
||||||
auth-dir: "~/.cli-proxy-api"
|
auth-dir: "~/.cli-proxy-api"
|
||||||
|
|
||||||
# Enable debug logging
|
# Enable debug logging
|
||||||
debug: false
|
debug: false
|
||||||
|
|
||||||
|
# When true, write application logs to rotating files instead of stdout
|
||||||
|
logging-to-file: true
|
||||||
|
|
||||||
|
# When false, disable in-memory usage statistics aggregation
|
||||||
|
usage-statistics-enabled: true
|
||||||
|
|
||||||
# Proxy URL. Supports socks5/http/https protocols. Example: socks5://user:pass@192.168.1.1:1080/
|
# Proxy URL. Supports socks5/http/https protocols. Example: socks5://user:pass@192.168.1.1:1080/
|
||||||
proxy-url: ""
|
proxy-url: ""
|
||||||
|
|
||||||
@@ -333,18 +378,9 @@ quota-exceeded:
|
|||||||
# Gemini Web client configuration
|
# Gemini Web client configuration
|
||||||
gemini-web:
|
gemini-web:
|
||||||
context: true # Enable conversation context reuse
|
context: true # Enable conversation context reuse
|
||||||
code-mode: false # Enable code mode
|
gem-mode: "" # Select Gem: "coding-partner" or "writing-editor"; empty means no Gem
|
||||||
max-chars-per-request: 1000000 # Max characters per request
|
max-chars-per-request: 1000000 # Max characters per request
|
||||||
|
|
||||||
# Request authentication providers
|
|
||||||
auth:
|
|
||||||
providers:
|
|
||||||
- name: "default"
|
|
||||||
type: "config-api-key"
|
|
||||||
api-keys:
|
|
||||||
- "your-api-key-1"
|
|
||||||
- "your-api-key-2"
|
|
||||||
|
|
||||||
# API keys for official Generative Language API
|
# API keys for official Generative Language API
|
||||||
generative-language-api-key:
|
generative-language-api-key:
|
||||||
- "AIzaSy...01"
|
- "AIzaSy...01"
|
||||||
@@ -356,20 +392,28 @@ generative-language-api-key:
|
|||||||
codex-api-key:
|
codex-api-key:
|
||||||
- api-key: "sk-atSM..."
|
- api-key: "sk-atSM..."
|
||||||
base-url: "https://www.example.com" # use the custom codex API endpoint
|
base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||||
|
proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||||
|
|
||||||
# Claude API keys
|
# Claude API keys
|
||||||
claude-api-key:
|
claude-api-key:
|
||||||
- api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
- api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||||
- api-key: "sk-atSM..."
|
- api-key: "sk-atSM..."
|
||||||
base-url: "https://www.example.com" # use the custom claude API endpoint
|
base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||||
|
proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||||
|
|
||||||
# OpenAI compatibility providers
|
# OpenAI compatibility providers
|
||||||
openai-compatibility:
|
openai-compatibility:
|
||||||
- name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
- name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||||
base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||||
api-keys: # The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed.
|
# New format with per-key proxy support (recommended):
|
||||||
- "sk-or-v1-...b780"
|
api-key-entries:
|
||||||
- "sk-or-v1-...b781"
|
- api-key: "sk-or-v1-...b780"
|
||||||
|
proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||||
|
- api-key: "sk-or-v1-...b781" # without proxy-url
|
||||||
|
# Legacy format (still supported, but cannot specify proxy per key):
|
||||||
|
# api-keys:
|
||||||
|
# - "sk-or-v1-...b780"
|
||||||
|
# - "sk-or-v1-...b781"
|
||||||
models: # The models supported by the provider.
|
models: # The models supported by the provider.
|
||||||
- name: "moonshotai/kimi-k2:free" # The actual model name.
|
- name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||||
alias: "kimi-k2" # The alias used in the API.
|
alias: "kimi-k2" # The alias used in the API.
|
||||||
@@ -381,10 +425,26 @@ Configure upstream OpenAI-compatible providers (e.g., OpenRouter) via `openai-co
|
|||||||
|
|
||||||
- name: provider identifier used internally
|
- name: provider identifier used internally
|
||||||
- base-url: provider base URL
|
- base-url: provider base URL
|
||||||
- api-keys: optional list of API keys (omit if provider allows unauthenticated requests)
|
- api-key-entries: list of API key entries with optional per-key proxy configuration (recommended)
|
||||||
|
- api-keys: (deprecated) simple list of API keys without proxy support
|
||||||
- models: list of mappings from upstream model `name` to local `alias`
|
- models: list of mappings from upstream model `name` to local `alias`
|
||||||
|
|
||||||
Example:
|
Example with per-key proxy support:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
openai-compatibility:
|
||||||
|
- name: "openrouter"
|
||||||
|
base-url: "https://openrouter.ai/api/v1"
|
||||||
|
api-key-entries:
|
||||||
|
- api-key: "sk-or-v1-...b780"
|
||||||
|
proxy-url: "socks5://proxy.example.com:1080"
|
||||||
|
- api-key: "sk-or-v1-...b781"
|
||||||
|
models:
|
||||||
|
- name: "moonshotai/kimi-k2:free"
|
||||||
|
alias: "kimi-k2"
|
||||||
|
```
|
||||||
|
|
||||||
|
Legacy format (still supported):
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
openai-compatibility:
|
openai-compatibility:
|
||||||
@@ -492,6 +552,14 @@ export ANTHROPIC_MODEL=qwen3-coder-plus
|
|||||||
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-coder-flash
|
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-coder-flash
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Using iFlow models:
|
||||||
|
```bash
|
||||||
|
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||||
|
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||||
|
export ANTHROPIC_MODEL=qwen3-max
|
||||||
|
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-235b-a22b-instruct
|
||||||
|
```
|
||||||
|
|
||||||
## Codex with multiple account load balancing
|
## Codex with multiple account load balancing
|
||||||
|
|
||||||
Start CLI Proxy API server, and then edit the `~/.codex/config.toml` and `~/.codex/auth.json` files.
|
Start CLI Proxy API server, and then edit the `~/.codex/config.toml` and `~/.codex/auth.json` files.
|
||||||
@@ -547,6 +615,12 @@ Run the following command to login (Qwen OAuth):
|
|||||||
docker run -it -rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --qwen-login
|
docker run -it -rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --qwen-login
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Run the following command to login (iFlow OAuth on port 11451):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm -p 11451:11451 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --iflow-login
|
||||||
|
```
|
||||||
|
|
||||||
Run the following command to start the server:
|
Run the following command to start the server:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -609,6 +683,10 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
|||||||
```bash
|
```bash
|
||||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --qwen-login
|
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --qwen-login
|
||||||
```
|
```
|
||||||
|
- **iFlow**:
|
||||||
|
```bash
|
||||||
|
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --iflow-login
|
||||||
|
```
|
||||||
|
|
||||||
5. To view the server logs:
|
5. To view the server logs:
|
||||||
```bash
|
```bash
|
||||||
@@ -626,8 +704,11 @@ see [MANAGEMENT_API.md](MANAGEMENT_API.md)
|
|||||||
|
|
||||||
## SDK Docs
|
## SDK Docs
|
||||||
|
|
||||||
- Usage: `docs/sdk-usage.md` (中文: `docs/sdk-usage_CN.md`)
|
- Usage: [docs/sdk-usage.md](docs/sdk-usage.md)
|
||||||
- Advanced (executors & translators): `docs/sdk-advanced.md` (中文: `docs/sdk-advanced_CN.md`)
|
- Advanced (executors & translators): [docs/sdk-advanced.md](docs/sdk-advanced.md)
|
||||||
|
- Access: [docs/sdk-access.md](docs/sdk-access.md)
|
||||||
|
- Watcher: [docs/sdk-watcher.md](docs/sdk-watcher.md)
|
||||||
|
- Custom Provider Example: `examples/custom-provider`
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
@@ -639,6 +720,17 @@ Contributions are welcome! Please feel free to submit a Pull Request.
|
|||||||
4. Push to the branch (`git push origin feature/amazing-feature`)
|
4. Push to the branch (`git push origin feature/amazing-feature`)
|
||||||
5. Open a Pull Request
|
5. Open a Pull Request
|
||||||
|
|
||||||
|
## Who is with us?
|
||||||
|
|
||||||
|
Those projects are based on CLIProxyAPI:
|
||||||
|
|
||||||
|
### [vibeproxy](https://github.com/automazeio/vibeproxy)
|
||||||
|
|
||||||
|
Native macOS menu bar app to use your Claude Code & ChatGPT subscriptions with AI coding tools - no API keys needed
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> If you developed a project based on CLIProxyAPI, please open a PR to add it to this list.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
||||||
|
|||||||
154
README_CN.md
154
README_CN.md
@@ -28,7 +28,7 @@
|
|||||||
|
|
||||||
您可以使用本地或多账户的CLI方式,通过任何与 OpenAI(包括Responses)/Gemini/Claude 兼容的客户端和SDK进行访问。
|
您可以使用本地或多账户的CLI方式,通过任何与 OpenAI(包括Responses)/Gemini/Claude 兼容的客户端和SDK进行访问。
|
||||||
|
|
||||||
现已新增首个中国提供商:[Qwen Code](https://github.com/QwenLM/qwen-code)。
|
现已新增国内提供商:[Qwen Code](https://github.com/QwenLM/qwen-code)、[iFlow](https://iflow.cn/)。
|
||||||
|
|
||||||
## 功能特性
|
## 功能特性
|
||||||
|
|
||||||
@@ -36,19 +36,21 @@
|
|||||||
- 新增 OpenAI Codex(GPT 系列)支持(OAuth 登录)
|
- 新增 OpenAI Codex(GPT 系列)支持(OAuth 登录)
|
||||||
- 新增 Claude Code 支持(OAuth 登录)
|
- 新增 Claude Code 支持(OAuth 登录)
|
||||||
- 新增 Qwen Code 支持(OAuth 登录)
|
- 新增 Qwen Code 支持(OAuth 登录)
|
||||||
|
- 新增 iFlow 支持(OAuth 登录)
|
||||||
- 新增 Gemini Web 支持(通过 Cookie 登录)
|
- 新增 Gemini Web 支持(通过 Cookie 登录)
|
||||||
- 支持流式与非流式响应
|
- 支持流式与非流式响应
|
||||||
- 函数调用/工具支持
|
- 函数调用/工具支持
|
||||||
- 多模态输入(文本、图片)
|
- 多模态输入(文本、图片)
|
||||||
- 多账户支持与轮询负载均衡(Gemini、OpenAI、Claude 与 Qwen)
|
- 多账户支持与轮询负载均衡(Gemini、OpenAI、Claude、Qwen 与 iFlow)
|
||||||
- 简单的 CLI 身份验证流程(Gemini、OpenAI、Claude 与 Qwen)
|
- 简单的 CLI 身份验证流程(Gemini、OpenAI、Claude、Qwen 与 iFlow)
|
||||||
- 支持 Gemini AIStudio API 密钥
|
- 支持 Gemini AIStudio API 密钥
|
||||||
- 支持 Gemini CLI 多账户轮询
|
- 支持 Gemini CLI 多账户轮询
|
||||||
- 支持 Claude Code 多账户轮询
|
- 支持 Claude Code 多账户轮询
|
||||||
- 支持 Qwen Code 多账户轮询
|
- 支持 Qwen Code 多账户轮询
|
||||||
|
- 支持 iFlow 多账户轮询
|
||||||
- 支持 OpenAI Codex 多账户轮询
|
- 支持 OpenAI Codex 多账户轮询
|
||||||
- 通过配置接入上游 OpenAI 兼容提供商(例如 OpenRouter)
|
- 通过配置接入上游 OpenAI 兼容提供商(例如 OpenRouter)
|
||||||
- 可复用的 Go SDK(见 `docs/sdk-usage.md`)
|
- 可复用的 Go SDK(见 `docs/sdk-usage_CN.md`)
|
||||||
|
|
||||||
## 安装
|
## 安装
|
||||||
|
|
||||||
@@ -59,6 +61,7 @@
|
|||||||
- 有权访问 OpenAI Codex/GPT 的 OpenAI 账户(可选)
|
- 有权访问 OpenAI Codex/GPT 的 OpenAI 账户(可选)
|
||||||
- 有权访问 Claude Code 的 Anthropic 账户(可选)
|
- 有权访问 Claude Code 的 Anthropic 账户(可选)
|
||||||
- 有权访问 Qwen Code 的 Qwen Chat 账户(可选)
|
- 有权访问 Qwen Code 的 Qwen Chat 账户(可选)
|
||||||
|
- 有权访问 iFlow 的 iFlow 账户(可选)
|
||||||
|
|
||||||
### 从源码构建
|
### 从源码构建
|
||||||
|
|
||||||
@@ -75,9 +78,21 @@
|
|||||||
|
|
||||||
## 使用方法
|
## 使用方法
|
||||||
|
|
||||||
|
### 图形客户端与官方 WebUI
|
||||||
|
|
||||||
|
#### [EasyCLI](https://github.com/router-for-me/EasyCLI)
|
||||||
|
|
||||||
|
CLIProxyAPI 的跨平台桌面图形客户端。
|
||||||
|
|
||||||
|
#### [Cli-Proxy-API-Management-Center](https://github.com/router-for-me/Cli-Proxy-API-Management-Center)
|
||||||
|
|
||||||
|
CLIProxyAPI 的基于 Web 的管理中心。
|
||||||
|
|
||||||
|
如果希望自行托管管理页面,可在配置中将 `remote-management.disable-control-panel` 设为 `true`,服务器将停止下载 `management.html`,并让 `/management.html` 返回 404。
|
||||||
|
|
||||||
### 身份验证
|
### 身份验证
|
||||||
|
|
||||||
您可以分别为 Gemini、OpenAI 和 Claude 进行身份验证,三者可同时存在于同一个 `auth-dir` 中并参与负载均衡。
|
您可以分别为 Gemini、OpenAI、Claude、Qwen 和 iFlow 进行身份验证,它们可同时存在于同一个 `auth-dir` 中并参与负载均衡。
|
||||||
|
|
||||||
- Gemini(Google):
|
- Gemini(Google):
|
||||||
```bash
|
```bash
|
||||||
@@ -116,6 +131,12 @@
|
|||||||
```
|
```
|
||||||
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。使用 Qwen Chat 的 OAuth 设备登录流程。
|
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。使用 Qwen Chat 的 OAuth 设备登录流程。
|
||||||
|
|
||||||
|
- iFlow(iFlow,OAuth):
|
||||||
|
```bash
|
||||||
|
./cli-proxy-api --iflow-login
|
||||||
|
```
|
||||||
|
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。本地 OAuth 回调端口为 `11451`。
|
||||||
|
|
||||||
### 启动服务器
|
### 启动服务器
|
||||||
|
|
||||||
身份验证完成后,启动服务器:
|
身份验证完成后,启动服务器:
|
||||||
@@ -156,7 +177,7 @@ POST http://localhost:8317/v1/chat/completions
|
|||||||
```
|
```
|
||||||
|
|
||||||
说明:
|
说明:
|
||||||
- 使用 "gemini-*" 模型(例如 "gemini-2.5-pro")来调用 Gemini,使用 "gpt-*" 模型(例如 "gpt-5")来调用 OpenAI,使用 "claude-*" 模型(例如 "claude-3-5-sonnet-20241022")来调用 Claude,或者使用 "qwen-*" 模型(例如 "qwen3-coder-plus")来调用 Qwen。代理服务会自动将请求路由到相应的提供商。
|
- 使用 "gemini-*" 模型(例如 "gemini-2.5-pro")来调用 Gemini,使用 "gpt-*" 模型(例如 "gpt-5")来调用 OpenAI,使用 "claude-*" 模型(例如 "claude-3-5-sonnet-20241022")来调用 Claude,使用 "qwen-*" 模型(例如 "qwen3-coder-plus")来调用 Qwen,或者使用 iFlow 支持的模型(例如 "tstars2.0"、"deepseek-v3.1"、"kimi-k2" 等)来调用 iFlow。代理服务会自动将请求路由到相应的提供商。
|
||||||
|
|
||||||
#### Claude 消息(SSE 兼容)
|
#### Claude 消息(SSE 兼容)
|
||||||
|
|
||||||
@@ -249,15 +270,27 @@ console.log(await claudeResponse.json());
|
|||||||
- gemini-2.5-pro
|
- gemini-2.5-pro
|
||||||
- gemini-2.5-flash
|
- gemini-2.5-flash
|
||||||
- gemini-2.5-flash-lite
|
- gemini-2.5-flash-lite
|
||||||
|
- gemini-2.5-flash-image-preview
|
||||||
- gpt-5
|
- gpt-5
|
||||||
- gpt-5-codex
|
- gpt-5-codex
|
||||||
- claude-opus-4-1-20250805
|
- claude-opus-4-1-20250805
|
||||||
- claude-opus-4-20250514
|
- claude-opus-4-20250514
|
||||||
- claude-sonnet-4-20250514
|
- claude-sonnet-4-20250514
|
||||||
|
- claude-sonnet-4-5-20250929
|
||||||
- claude-3-7-sonnet-20250219
|
- claude-3-7-sonnet-20250219
|
||||||
- claude-3-5-haiku-20241022
|
- claude-3-5-haiku-20241022
|
||||||
- qwen3-coder-plus
|
- qwen3-coder-plus
|
||||||
- qwen3-coder-flash
|
- qwen3-coder-flash
|
||||||
|
- qwen3-max
|
||||||
|
- qwen3-vl-plus
|
||||||
|
- deepseek-v3.2
|
||||||
|
- deepseek-v3.1
|
||||||
|
- deepseek-r1
|
||||||
|
- deepseek-v3
|
||||||
|
- kimi-k2
|
||||||
|
- glm-4.5
|
||||||
|
- tstars2.0
|
||||||
|
- 以及其他 iFlow 支持的模型
|
||||||
- Gemini 模型在需要时自动切换到对应的 preview 版本
|
- Gemini 模型在需要时自动切换到对应的 preview 版本
|
||||||
|
|
||||||
## 配置
|
## 配置
|
||||||
@@ -278,33 +311,36 @@ console.log(await claudeResponse.json());
|
|||||||
| `request-retry` | integer | 0 | 请求重试次数。如果HTTP响应码为403、408、500、502、503或504,将会触发重试。 |
|
| `request-retry` | integer | 0 | 请求重试次数。如果HTTP响应码为403、408、500、502、503或504,将会触发重试。 |
|
||||||
| `remote-management.allow-remote` | boolean | false | 是否允许远程(非localhost)访问管理接口。为false时仅允许本地访问;本地访问同样需要管理密钥。 |
|
| `remote-management.allow-remote` | boolean | false | 是否允许远程(非localhost)访问管理接口。为false时仅允许本地访问;本地访问同样需要管理密钥。 |
|
||||||
| `remote-management.secret-key` | string | "" | 管理密钥。若配置为明文,启动时会自动进行bcrypt加密并写回配置文件。若为空,管理接口整体不可用(404)。 |
|
| `remote-management.secret-key` | string | "" | 管理密钥。若配置为明文,启动时会自动进行bcrypt加密并写回配置文件。若为空,管理接口整体不可用(404)。 |
|
||||||
|
| `remote-management.disable-control-panel` | boolean | false | 当为 true 时,不再下载 `management.html`,且 `/management.html` 会返回 404,从而禁用内置管理界面。 |
|
||||||
| `quota-exceeded` | object | {} | 用于处理配额超限的配置。 |
|
| `quota-exceeded` | object | {} | 用于处理配额超限的配置。 |
|
||||||
| `quota-exceeded.switch-project` | boolean | true | 当配额超限时,是否自动切换到另一个项目。 |
|
| `quota-exceeded.switch-project` | boolean | true | 当配额超限时,是否自动切换到另一个项目。 |
|
||||||
| `quota-exceeded.switch-preview-model` | boolean | true | 当配额超限时,是否自动切换到预览模型。 |
|
| `quota-exceeded.switch-preview-model` | boolean | true | 当配额超限时,是否自动切换到预览模型。 |
|
||||||
| `debug` | boolean | false | 启用调试模式以获取详细日志。 |
|
| `debug` | boolean | false | 启用调试模式以获取详细日志。 |
|
||||||
| `auth` | object | {} | 请求鉴权配置。 |
|
| `logging-to-file` | boolean | true | 是否将应用日志写入滚动文件;设为 false 时输出到 stdout/stderr。 |
|
||||||
| `auth.providers` | object[] | [] | 鉴权提供方列表,内置 `config-api-key` 支持内联密钥。 |
|
| `usage-statistics-enabled` | boolean | true | 是否启用内存中的使用统计;设为 false 时直接丢弃所有统计数据。 |
|
||||||
| `auth.providers.*.name` | string | "" | 提供方实例名称。 |
|
|
||||||
| `auth.providers.*.type` | string | "" | 提供方实现标识(例如 `config-api-key`)。 |
|
|
||||||
| `auth.providers.*.api-keys` | string[] | [] | `config-api-key` 提供方使用的内联密钥。 |
|
|
||||||
| `api-keys` | string[] | [] | 兼容旧配置的简写,会自动同步到默认 `config-api-key` 提供方。 |
|
| `api-keys` | string[] | [] | 兼容旧配置的简写,会自动同步到默认 `config-api-key` 提供方。 |
|
||||||
| `generative-language-api-key` | string[] | [] | 生成式语言API密钥列表。 |
|
| `generative-language-api-key` | string[] | [] | 生成式语言API密钥列表。 |
|
||||||
| `codex-api-key` | object | {} | Codex API密钥列表。 |
|
| `codex-api-key` | object | {} | Codex API密钥列表。 |
|
||||||
| `codex-api-key.api-key` | string | "" | Codex API密钥。 |
|
| `codex-api-key.api-key` | string | "" | Codex API密钥。 |
|
||||||
| `codex-api-key.base-url` | string | "" | 自定义的Codex API端点 |
|
| `codex-api-key.base-url` | string | "" | 自定义的Codex API端点 |
|
||||||
|
| `codex-api-key.proxy-url` | string | "" | 针对该API密钥的代理URL。会覆盖全局proxy-url设置。支持socks5/http/https协议。 |
|
||||||
| `claude-api-key` | object | {} | Claude API密钥列表。 |
|
| `claude-api-key` | object | {} | Claude API密钥列表。 |
|
||||||
| `claude-api-key.api-key` | string | "" | Claude API密钥。 |
|
| `claude-api-key.api-key` | string | "" | Claude API密钥。 |
|
||||||
| `claude-api-key.base-url` | string | "" | 自定义的Claude API端点,如果您使用第三方的API端点。 |
|
| `claude-api-key.base-url` | string | "" | 自定义的Claude API端点,如果您使用第三方的API端点。 |
|
||||||
|
| `claude-api-key.proxy-url` | string | "" | 针对该API密钥的代理URL。会覆盖全局proxy-url设置。支持socks5/http/https协议。 |
|
||||||
| `openai-compatibility` | object[] | [] | 上游OpenAI兼容提供商的配置(名称、基础URL、API密钥、模型)。 |
|
| `openai-compatibility` | object[] | [] | 上游OpenAI兼容提供商的配置(名称、基础URL、API密钥、模型)。 |
|
||||||
| `openai-compatibility.*.name` | string | "" | 提供商的名称。它将被用于用户代理(User Agent)和其他地方。 |
|
| `openai-compatibility.*.name` | string | "" | 提供商的名称。它将被用于用户代理(User Agent)和其他地方。 |
|
||||||
| `openai-compatibility.*.base-url` | string | "" | 提供商的基础URL。 |
|
| `openai-compatibility.*.base-url` | string | "" | 提供商的基础URL。 |
|
||||||
| `openai-compatibility.*.api-keys` | string[] | [] | 提供商的API密钥。如果需要,可以添加多个密钥。如果允许未经身份验证的访问,则可以省略。 |
|
| `openai-compatibility.*.api-keys` | string[] | [] | (已弃用) 提供商的API密钥。建议改用api-key-entries以获得每密钥代理支持。 |
|
||||||
|
| `openai-compatibility.*.api-key-entries` | object[] | [] | API密钥条目,支持可选的每密钥代理配置。优先于api-keys。 |
|
||||||
|
| `openai-compatibility.*.api-key-entries.*.api-key` | string | "" | 该条目的API密钥。 |
|
||||||
|
| `openai-compatibility.*.api-key-entries.*.proxy-url` | string | "" | 针对该API密钥的代理URL。会覆盖全局proxy-url设置。支持socks5/http/https协议。 |
|
||||||
| `openai-compatibility.*.models` | object[] | [] | 实际的模型名称。 |
|
| `openai-compatibility.*.models` | object[] | [] | 实际的模型名称。 |
|
||||||
| `openai-compatibility.*.models.*.name` | string | "" | 提供商支持的模型。 |
|
| `openai-compatibility.*.models.*.name` | string | "" | 提供商支持的模型。 |
|
||||||
| `openai-compatibility.*.models.*.alias` | string | "" | 在API中使用的别名。 |
|
| `openai-compatibility.*.models.*.alias` | string | "" | 在API中使用的别名。 |
|
||||||
| `gemini-web` | object | {} | Gemini Web 客户端的特定配置。 |
|
| `gemini-web` | object | {} | Gemini Web 客户端的特定配置。 |
|
||||||
| `gemini-web.context` | boolean | true | 是否启用会话上下文重用,以实现连续对话。 |
|
| `gemini-web.context` | boolean | true | 是否启用会话上下文重用,以实现连续对话。 |
|
||||||
| `gemini-web.code-mode` | boolean | false | 是否启用代码模式,优化代码相关任务的响应。 |
|
| `gemini-web.gem-mode` | string | "" | 选择要附加的预设 Gem(`coding-partner` 或 `writing-editor`);为空表示不附加。 |
|
||||||
| `gemini-web.max-chars-per-request` | integer | 1,000,000 | 单次请求发送给 Gemini Web 的最大字符数。 |
|
| `gemini-web.max-chars-per-request` | integer | 1,000,000 | 单次请求发送给 Gemini Web 的最大字符数。 |
|
||||||
| `gemini-web.disable-continuation-hint` | boolean | false | 当提示被拆分时,是否禁用连续提示的暗示。 |
|
| `gemini-web.disable-continuation-hint` | boolean | false | 当提示被拆分时,是否禁用连续提示的暗示。 |
|
||||||
|
|
||||||
@@ -324,12 +360,21 @@ remote-management:
|
|||||||
# 若为空,/v0/management 整体处于 404(禁用)。
|
# 若为空,/v0/management 整体处于 404(禁用)。
|
||||||
secret-key: ""
|
secret-key: ""
|
||||||
|
|
||||||
|
# 当设为 true 时,不下载管理面板文件,/management.html 将直接返回 404。
|
||||||
|
disable-control-panel: false
|
||||||
|
|
||||||
# 身份验证目录(支持 ~ 表示主目录)。如果你使用Windows,建议设置成`C:/cli-proxy-api/`。
|
# 身份验证目录(支持 ~ 表示主目录)。如果你使用Windows,建议设置成`C:/cli-proxy-api/`。
|
||||||
auth-dir: "~/.cli-proxy-api"
|
auth-dir: "~/.cli-proxy-api"
|
||||||
|
|
||||||
# 启用调试日志
|
# 启用调试日志
|
||||||
debug: false
|
debug: false
|
||||||
|
|
||||||
|
# 为 true 时将应用日志写入滚动文件而不是 stdout
|
||||||
|
logging-to-file: true
|
||||||
|
|
||||||
|
# 为 false 时禁用内存中的使用统计并直接丢弃所有数据
|
||||||
|
usage-statistics-enabled: true
|
||||||
|
|
||||||
# 代理URL。支持socks5/http/https协议。例如:socks5://user:pass@192.168.1.1:1080/
|
# 代理URL。支持socks5/http/https协议。例如:socks5://user:pass@192.168.1.1:1080/
|
||||||
proxy-url: ""
|
proxy-url: ""
|
||||||
|
|
||||||
@@ -345,18 +390,9 @@ quota-exceeded:
|
|||||||
# Gemini Web 客户端配置
|
# Gemini Web 客户端配置
|
||||||
gemini-web:
|
gemini-web:
|
||||||
context: true # 启用会话上下文重用
|
context: true # 启用会话上下文重用
|
||||||
code-mode: false # 启用代码模式
|
gem-mode: "" # 选择 Gem:"coding-partner" 或 "writing-editor";为空表示不附加
|
||||||
max-chars-per-request: 1000000 # 单次请求最大字符数
|
max-chars-per-request: 1000000 # 单次请求最大字符数
|
||||||
|
|
||||||
# 请求鉴权提供方
|
|
||||||
auth:
|
|
||||||
providers:
|
|
||||||
- name: "default"
|
|
||||||
type: "config-api-key"
|
|
||||||
api-keys:
|
|
||||||
- "your-api-key-1"
|
|
||||||
- "your-api-key-2"
|
|
||||||
|
|
||||||
# AIStduio Gemini API 的 API 密钥
|
# AIStduio Gemini API 的 API 密钥
|
||||||
generative-language-api-key:
|
generative-language-api-key:
|
||||||
- "AIzaSy...01"
|
- "AIzaSy...01"
|
||||||
@@ -368,20 +404,28 @@ generative-language-api-key:
|
|||||||
codex-api-key:
|
codex-api-key:
|
||||||
- api-key: "sk-atSM..."
|
- api-key: "sk-atSM..."
|
||||||
base-url: "https://www.example.com" # 第三方 Codex API 中转服务端点
|
base-url: "https://www.example.com" # 第三方 Codex API 中转服务端点
|
||||||
|
proxy-url: "socks5://proxy.example.com:1080" # 可选:针对该密钥的代理设置
|
||||||
|
|
||||||
# Claude API 密钥
|
# Claude API 密钥
|
||||||
claude-api-key:
|
claude-api-key:
|
||||||
- api-key: "sk-atSM..." # 如果使用官方 Claude API,无需设置 base-url
|
- api-key: "sk-atSM..." # 如果使用官方 Claude API,无需设置 base-url
|
||||||
- api-key: "sk-atSM..."
|
- api-key: "sk-atSM..."
|
||||||
base-url: "https://www.example.com" # 第三方 Claude API 中转服务端点
|
base-url: "https://www.example.com" # 第三方 Claude API 中转服务端点
|
||||||
|
proxy-url: "socks5://proxy.example.com:1080" # 可选:针对该密钥的代理设置
|
||||||
|
|
||||||
# OpenAI 兼容提供商
|
# OpenAI 兼容提供商
|
||||||
openai-compatibility:
|
openai-compatibility:
|
||||||
- name: "openrouter" # 提供商的名称;它将被用于用户代理和其它地方。
|
- name: "openrouter" # 提供商的名称;它将被用于用户代理和其它地方。
|
||||||
base-url: "https://openrouter.ai/api/v1" # 提供商的基础URL。
|
base-url: "https://openrouter.ai/api/v1" # 提供商的基础URL。
|
||||||
api-keys: # 提供商的API密钥。如果需要,可以添加多个密钥。如果允许未经身份验证的访问,则可以省略。
|
# 新格式:支持每密钥代理配置(推荐):
|
||||||
- "sk-or-v1-...b780"
|
api-key-entries:
|
||||||
- "sk-or-v1-...b781"
|
- api-key: "sk-or-v1-...b780"
|
||||||
|
proxy-url: "socks5://proxy.example.com:1080" # 可选:针对该密钥的代理设置
|
||||||
|
- api-key: "sk-or-v1-...b781" # 不进行额外代理设置
|
||||||
|
# 旧格式(仍支持,但无法为每个密钥指定代理):
|
||||||
|
# api-keys:
|
||||||
|
# - "sk-or-v1-...b780"
|
||||||
|
# - "sk-or-v1-...b781"
|
||||||
models: # 提供商支持的模型。
|
models: # 提供商支持的模型。
|
||||||
- name: "moonshotai/kimi-k2:free" # 实际的模型名称。
|
- name: "moonshotai/kimi-k2:free" # 实际的模型名称。
|
||||||
alias: "kimi-k2" # 在API中使用的别名。
|
alias: "kimi-k2" # 在API中使用的别名。
|
||||||
@@ -393,10 +437,26 @@ openai-compatibility:
|
|||||||
|
|
||||||
- name:内部识别名
|
- name:内部识别名
|
||||||
- base-url:提供商基础地址
|
- base-url:提供商基础地址
|
||||||
- api-keys:可选,多密钥轮询(若提供商支持无鉴权可省略)
|
- api-key-entries:API密钥条目列表,支持可选的每密钥代理配置(推荐)
|
||||||
|
- api-keys:(已弃用) 简单的API密钥列表,不支持代理配置
|
||||||
- models:将上游模型 `name` 映射为本地可用 `alias`
|
- models:将上游模型 `name` 映射为本地可用 `alias`
|
||||||
|
|
||||||
示例:
|
支持每密钥代理配置的示例:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
openai-compatibility:
|
||||||
|
- name: "openrouter"
|
||||||
|
base-url: "https://openrouter.ai/api/v1"
|
||||||
|
api-key-entries:
|
||||||
|
- api-key: "sk-or-v1-...b780"
|
||||||
|
proxy-url: "socks5://proxy.example.com:1080"
|
||||||
|
- api-key: "sk-or-v1-...b781"
|
||||||
|
models:
|
||||||
|
- name: "moonshotai/kimi-k2:free"
|
||||||
|
alias: "kimi-k2"
|
||||||
|
```
|
||||||
|
|
||||||
|
旧格式(仍支持):
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
openai-compatibility:
|
openai-compatibility:
|
||||||
@@ -500,6 +560,14 @@ export ANTHROPIC_MODEL=qwen3-coder-plus
|
|||||||
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-coder-flash
|
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-coder-flash
|
||||||
```
|
```
|
||||||
|
|
||||||
|
使用 iFlow 模型:
|
||||||
|
```bash
|
||||||
|
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||||
|
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||||
|
export ANTHROPIC_MODEL=qwen3-max
|
||||||
|
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-235b-a22b-instruct
|
||||||
|
```
|
||||||
|
|
||||||
## Codex 多账户负载均衡
|
## Codex 多账户负载均衡
|
||||||
|
|
||||||
启动 CLI Proxy API 服务器, 修改 `~/.codex/config.toml` 和 `~/.codex/auth.json` 文件。
|
启动 CLI Proxy API 服务器, 修改 `~/.codex/config.toml` 和 `~/.codex/auth.json` 文件。
|
||||||
@@ -555,6 +623,12 @@ docker run --rm -p 54545:54545 -v /path/to/your/config.yaml:/CLIProxyAPI/config.
|
|||||||
docker run -it -rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --qwen-login
|
docker run -it -rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --qwen-login
|
||||||
```
|
```
|
||||||
|
|
||||||
|
运行以下命令进行登录(iFlow OAuth,端口 11451):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm -p 11451:11451 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --iflow-login
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
运行以下命令启动服务器:
|
运行以下命令启动服务器:
|
||||||
|
|
||||||
@@ -618,6 +692,10 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
|||||||
```bash
|
```bash
|
||||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --qwen-login
|
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --qwen-login
|
||||||
```
|
```
|
||||||
|
- **iFlow**:
|
||||||
|
```bash
|
||||||
|
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --iflow-login
|
||||||
|
```
|
||||||
|
|
||||||
5. 查看服务器日志:
|
5. 查看服务器日志:
|
||||||
```bash
|
```bash
|
||||||
@@ -635,8 +713,10 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
|||||||
|
|
||||||
## SDK 文档
|
## SDK 文档
|
||||||
|
|
||||||
- 使用文档:`docs/sdk-usage_CN.md`(English: `docs/sdk-usage.md`)
|
- 使用文档:[docs/sdk-usage_CN.md](docs/sdk-usage_CN.md)
|
||||||
- 高级(执行器与翻译器):`docs/sdk-advanced_CN.md`(English: `docs/sdk-advanced.md`)
|
- 高级(执行器与翻译器):[docs/sdk-advanced_CN.md](docs/sdk-advanced_CN.md)
|
||||||
|
- 认证: [docs/sdk-access_CN.md](docs/sdk-access_CN.md)
|
||||||
|
- 凭据加载/更新: [docs/sdk-watcher_CN.md](docs/sdk-watcher_CN.md)
|
||||||
- 自定义 Provider 示例:`examples/custom-provider`
|
- 自定义 Provider 示例:`examples/custom-provider`
|
||||||
|
|
||||||
## 贡献
|
## 贡献
|
||||||
@@ -649,6 +729,18 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
|||||||
4. 推送到分支(`git push origin feature/amazing-feature`)
|
4. 推送到分支(`git push origin feature/amazing-feature`)
|
||||||
5. 打开 Pull Request
|
5. 打开 Pull Request
|
||||||
|
|
||||||
|
## 谁与我们在一起?
|
||||||
|
|
||||||
|
这些项目基于 CLIProxyAPI:
|
||||||
|
|
||||||
|
### [vibeproxy](https://github.com/automazeio/vibeproxy)
|
||||||
|
|
||||||
|
一个原生 macOS 菜单栏应用,让您可以使用 Claude Code & ChatGPT 订阅服务和 AI 编程工具,无需 API 密钥。
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> 如果你开发了基于 CLIProxyAPI 的项目,请提交一个 PR(拉取请求)将其添加到此列表中。
|
||||||
|
|
||||||
|
|
||||||
## 许可证
|
## 许可证
|
||||||
|
|
||||||
此项目根据 MIT 许可证授权 - 有关详细信息,请参阅 [LICENSE](LICENSE) 文件。
|
此项目根据 MIT 许可证授权 - 有关详细信息,请参阅 [LICENSE](LICENSE) 文件。
|
||||||
|
|||||||
@@ -4,106 +4,32 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
configaccess "github.com/router-for-me/CLIProxyAPI/v6/internal/access/config_access"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/cmd"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/cmd"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator"
|
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/usage"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"gopkg.in/natefinch/lumberjack.v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Version = "dev"
|
Version = "dev"
|
||||||
Commit = "none"
|
Commit = "none"
|
||||||
BuildDate = "unknown"
|
BuildDate = "unknown"
|
||||||
logWriter *lumberjack.Logger
|
DefaultConfigPath = ""
|
||||||
ginInfoWriter *io.PipeWriter
|
|
||||||
ginErrorWriter *io.PipeWriter
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LogFormatter defines a custom log format for logrus.
|
// init initializes the shared logger setup.
|
||||||
// This formatter adds timestamp, log level, and source location information
|
|
||||||
// to each log entry for better debugging and monitoring.
|
|
||||||
type LogFormatter struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format renders a single log entry with custom formatting.
|
|
||||||
// It includes timestamp, log level, source file and line number, and the log message.
|
|
||||||
func (m *LogFormatter) Format(entry *log.Entry) ([]byte, error) {
|
|
||||||
var b *bytes.Buffer
|
|
||||||
if entry.Buffer != nil {
|
|
||||||
b = entry.Buffer
|
|
||||||
} else {
|
|
||||||
b = &bytes.Buffer{}
|
|
||||||
}
|
|
||||||
|
|
||||||
timestamp := entry.Time.Format("2006-01-02 15:04:05")
|
|
||||||
var newLog string
|
|
||||||
// Ensure message doesn't carry trailing newlines; formatter appends one.
|
|
||||||
msg := strings.TrimRight(entry.Message, "\r\n")
|
|
||||||
// Customize the log format to include timestamp, level, caller file/line, and message.
|
|
||||||
newLog = fmt.Sprintf("[%s] [%s] [%s:%d] %s\n", timestamp, entry.Level, filepath.Base(entry.Caller.File), entry.Caller.Line, msg)
|
|
||||||
|
|
||||||
b.WriteString(newLog)
|
|
||||||
return b.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes the logger configuration.
|
|
||||||
// It sets up the custom log formatter, enables caller reporting,
|
|
||||||
// and configures the log output destination.
|
|
||||||
func init() {
|
func init() {
|
||||||
logDir := "logs"
|
logging.SetupBaseLogger()
|
||||||
if err := os.MkdirAll(logDir, 0755); err != nil {
|
|
||||||
_, _ = fmt.Fprintf(os.Stderr, "failed to create log directory: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
logWriter = &lumberjack.Logger{
|
|
||||||
Filename: filepath.Join(logDir, "main.log"),
|
|
||||||
MaxSize: 10,
|
|
||||||
MaxBackups: 0,
|
|
||||||
MaxAge: 0,
|
|
||||||
Compress: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
log.SetOutput(logWriter)
|
|
||||||
// Enable reporting the caller function's file and line number.
|
|
||||||
log.SetReportCaller(true)
|
|
||||||
// Set the custom log formatter.
|
|
||||||
log.SetFormatter(&LogFormatter{})
|
|
||||||
|
|
||||||
ginInfoWriter = log.StandardLogger().Writer()
|
|
||||||
gin.DefaultWriter = ginInfoWriter
|
|
||||||
ginErrorWriter = log.StandardLogger().WriterLevel(log.ErrorLevel)
|
|
||||||
gin.DefaultErrorWriter = ginErrorWriter
|
|
||||||
gin.DebugPrintFunc = func(format string, values ...interface{}) {
|
|
||||||
// Trim trailing newlines from Gin's formatted messages to avoid blank lines.
|
|
||||||
// Gin's debug prints usually include a trailing "\n"; our formatter also appends one.
|
|
||||||
// Removing it here ensures a single newline per entry.
|
|
||||||
format = strings.TrimRight(format, "\r\n")
|
|
||||||
log.StandardLogger().Infof(format, values...)
|
|
||||||
}
|
|
||||||
log.RegisterExitHandler(func() {
|
|
||||||
if logWriter != nil {
|
|
||||||
_ = logWriter.Close()
|
|
||||||
}
|
|
||||||
if ginInfoWriter != nil {
|
|
||||||
_ = ginInfoWriter.Close()
|
|
||||||
}
|
|
||||||
if ginErrorWriter != nil {
|
|
||||||
_ = ginErrorWriter.Close()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// main is the entry point of the application.
|
// main is the entry point of the application.
|
||||||
@@ -111,13 +37,13 @@ func init() {
|
|||||||
// service based on the provided flags (login, codex-login, or server mode).
|
// service based on the provided flags (login, codex-login, or server mode).
|
||||||
func main() {
|
func main() {
|
||||||
fmt.Printf("CLIProxyAPI Version: %s, Commit: %s, BuiltAt: %s\n", Version, Commit, BuildDate)
|
fmt.Printf("CLIProxyAPI Version: %s, Commit: %s, BuiltAt: %s\n", Version, Commit, BuildDate)
|
||||||
log.Infof("CLIProxyAPI Version: %s, Commit: %s, BuiltAt: %s", Version, Commit, BuildDate)
|
|
||||||
|
|
||||||
// Command-line flags to control the application's behavior.
|
// Command-line flags to control the application's behavior.
|
||||||
var login bool
|
var login bool
|
||||||
var codexLogin bool
|
var codexLogin bool
|
||||||
var claudeLogin bool
|
var claudeLogin bool
|
||||||
var qwenLogin bool
|
var qwenLogin bool
|
||||||
|
var iflowLogin bool
|
||||||
var geminiWebAuth bool
|
var geminiWebAuth bool
|
||||||
var noBrowser bool
|
var noBrowser bool
|
||||||
var projectID string
|
var projectID string
|
||||||
@@ -129,10 +55,11 @@ func main() {
|
|||||||
flag.BoolVar(&codexLogin, "codex-login", false, "Login to Codex using OAuth")
|
flag.BoolVar(&codexLogin, "codex-login", false, "Login to Codex using OAuth")
|
||||||
flag.BoolVar(&claudeLogin, "claude-login", false, "Login to Claude using OAuth")
|
flag.BoolVar(&claudeLogin, "claude-login", false, "Login to Claude using OAuth")
|
||||||
flag.BoolVar(&qwenLogin, "qwen-login", false, "Login to Qwen using OAuth")
|
flag.BoolVar(&qwenLogin, "qwen-login", false, "Login to Qwen using OAuth")
|
||||||
|
flag.BoolVar(&iflowLogin, "iflow-login", false, "Login to iFlow using OAuth")
|
||||||
flag.BoolVar(&geminiWebAuth, "gemini-web-auth", false, "Auth Gemini Web using cookies")
|
flag.BoolVar(&geminiWebAuth, "gemini-web-auth", false, "Auth Gemini Web using cookies")
|
||||||
flag.BoolVar(&noBrowser, "no-browser", false, "Don't open browser automatically for OAuth")
|
flag.BoolVar(&noBrowser, "no-browser", false, "Don't open browser automatically for OAuth")
|
||||||
flag.StringVar(&projectID, "project_id", "", "Project ID (Gemini only, not required)")
|
flag.StringVar(&projectID, "project_id", "", "Project ID (Gemini only, not required)")
|
||||||
flag.StringVar(&configPath, "config", "", "Configure File Path")
|
flag.StringVar(&configPath, "config", DefaultConfigPath, "Configure File Path")
|
||||||
flag.StringVar(&password, "password", "", "")
|
flag.StringVar(&password, "password", "", "")
|
||||||
|
|
||||||
flag.CommandLine.Usage = func() {
|
flag.CommandLine.Usage = func() {
|
||||||
@@ -143,7 +70,7 @@ func main() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
s := fmt.Sprintf(" -%s", f.Name)
|
s := fmt.Sprintf(" -%s", f.Name)
|
||||||
name, usage := flag.UnquoteUsage(f)
|
name, unquoteUsage := flag.UnquoteUsage(f)
|
||||||
if name != "" {
|
if name != "" {
|
||||||
s += " " + name
|
s += " " + name
|
||||||
}
|
}
|
||||||
@@ -152,8 +79,8 @@ func main() {
|
|||||||
} else {
|
} else {
|
||||||
s += "\n "
|
s += "\n "
|
||||||
}
|
}
|
||||||
if usage != "" {
|
if unquoteUsage != "" {
|
||||||
s += usage
|
s += unquoteUsage
|
||||||
}
|
}
|
||||||
if f.DefValue != "" && f.DefValue != "false" && f.DefValue != "0" {
|
if f.DefValue != "" && f.DefValue != "false" && f.DefValue != "0" {
|
||||||
s += fmt.Sprintf(" (default %s)", f.DefValue)
|
s += fmt.Sprintf(" (default %s)", f.DefValue)
|
||||||
@@ -188,26 +115,21 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to load config: %v", err)
|
log.Fatalf("failed to load config: %v", err)
|
||||||
}
|
}
|
||||||
|
usage.SetStatisticsEnabled(cfg.UsageStatisticsEnabled)
|
||||||
|
|
||||||
|
if err = logging.ConfigureLogOutput(cfg.LoggingToFile); err != nil {
|
||||||
|
log.Fatalf("failed to configure log output: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("CLIProxyAPI Version: %s, Commit: %s, BuiltAt: %s", Version, Commit, BuildDate)
|
||||||
|
|
||||||
// Set the log level based on the configuration.
|
// Set the log level based on the configuration.
|
||||||
util.SetLogLevel(cfg)
|
util.SetLogLevel(cfg)
|
||||||
|
|
||||||
// Expand the tilde (~) in the auth directory path to the user's home directory.
|
if resolvedAuthDir, errResolveAuthDir := util.ResolveAuthDir(cfg.AuthDir); errResolveAuthDir != nil {
|
||||||
if strings.HasPrefix(cfg.AuthDir, "~") {
|
log.Fatalf("failed to resolve auth directory: %v", errResolveAuthDir)
|
||||||
home, errUserHomeDir := os.UserHomeDir()
|
|
||||||
if errUserHomeDir != nil {
|
|
||||||
log.Fatalf("failed to get home directory: %v", errUserHomeDir)
|
|
||||||
}
|
|
||||||
// Reconstruct the path by replacing the tilde with the user's home directory.
|
|
||||||
remainder := strings.TrimPrefix(cfg.AuthDir, "~")
|
|
||||||
remainder = strings.TrimLeft(remainder, "/\\")
|
|
||||||
if remainder == "" {
|
|
||||||
cfg.AuthDir = home
|
|
||||||
} else {
|
} else {
|
||||||
// Normalize any slash style in the remainder so Windows paths keep nested directories.
|
cfg.AuthDir = resolvedAuthDir
|
||||||
normalized := strings.ReplaceAll(remainder, "\\", "/")
|
|
||||||
cfg.AuthDir = filepath.Join(home, filepath.FromSlash(normalized))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create login options to be used in authentication flows.
|
// Create login options to be used in authentication flows.
|
||||||
@@ -218,6 +140,9 @@ func main() {
|
|||||||
// Register the shared token store once so all components use the same persistence backend.
|
// Register the shared token store once so all components use the same persistence backend.
|
||||||
sdkAuth.RegisterTokenStore(sdkAuth.NewFileTokenStore())
|
sdkAuth.RegisterTokenStore(sdkAuth.NewFileTokenStore())
|
||||||
|
|
||||||
|
// Register built-in access providers before constructing services.
|
||||||
|
configaccess.Register()
|
||||||
|
|
||||||
// Handle different command modes based on the provided flags.
|
// Handle different command modes based on the provided flags.
|
||||||
|
|
||||||
if login {
|
if login {
|
||||||
@@ -231,6 +156,8 @@ func main() {
|
|||||||
cmd.DoClaudeLogin(cfg, options)
|
cmd.DoClaudeLogin(cfg, options)
|
||||||
} else if qwenLogin {
|
} else if qwenLogin {
|
||||||
cmd.DoQwenLogin(cfg, options)
|
cmd.DoQwenLogin(cfg, options)
|
||||||
|
} else if iflowLogin {
|
||||||
|
cmd.DoIFlowLogin(cfg, options)
|
||||||
} else if geminiWebAuth {
|
} else if geminiWebAuth {
|
||||||
cmd.DoGeminiWebAuth(cfg)
|
cmd.DoGeminiWebAuth(cfg)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -12,12 +12,26 @@ remote-management:
|
|||||||
# Leave empty to disable the Management API entirely (404 for all /v0/management routes).
|
# Leave empty to disable the Management API entirely (404 for all /v0/management routes).
|
||||||
secret-key: ""
|
secret-key: ""
|
||||||
|
|
||||||
|
# Disable the bundled management control panel asset download and HTTP route when true.
|
||||||
|
disable-control-panel: false
|
||||||
|
|
||||||
# Authentication directory (supports ~ for home directory)
|
# Authentication directory (supports ~ for home directory)
|
||||||
auth-dir: "~/.cli-proxy-api"
|
auth-dir: "~/.cli-proxy-api"
|
||||||
|
|
||||||
|
# API keys for authentication
|
||||||
|
api-keys:
|
||||||
|
- "your-api-key-1"
|
||||||
|
- "your-api-key-2"
|
||||||
|
|
||||||
# Enable debug logging
|
# Enable debug logging
|
||||||
debug: false
|
debug: false
|
||||||
|
|
||||||
|
# When true, write application logs to rotating files instead of stdout
|
||||||
|
logging-to-file: false
|
||||||
|
|
||||||
|
# When false, disable in-memory usage statistics aggregation
|
||||||
|
usage-statistics-enabled: false
|
||||||
|
|
||||||
# Proxy URL. Supports socks5/http/https protocols. Example: socks5://user:pass@192.168.1.1:1080/
|
# Proxy URL. Supports socks5/http/https protocols. Example: socks5://user:pass@192.168.1.1:1080/
|
||||||
proxy-url: ""
|
proxy-url: ""
|
||||||
|
|
||||||
@@ -29,58 +43,55 @@ quota-exceeded:
|
|||||||
switch-project: true # Whether to automatically switch to another project when a quota is exceeded
|
switch-project: true # Whether to automatically switch to another project when a quota is exceeded
|
||||||
switch-preview-model: true # Whether to automatically switch to a preview model when a quota is exceeded
|
switch-preview-model: true # Whether to automatically switch to a preview model when a quota is exceeded
|
||||||
|
|
||||||
# Request authentication providers
|
|
||||||
auth:
|
|
||||||
providers:
|
|
||||||
- name: "default"
|
|
||||||
type: "config-api-key"
|
|
||||||
api-keys:
|
|
||||||
- "your-api-key-1"
|
|
||||||
- "your-api-key-2"
|
|
||||||
|
|
||||||
# API keys for official Generative Language API
|
# API keys for official Generative Language API
|
||||||
generative-language-api-key:
|
#generative-language-api-key:
|
||||||
- "AIzaSy...01"
|
# - "AIzaSy...01"
|
||||||
- "AIzaSy...02"
|
# - "AIzaSy...02"
|
||||||
- "AIzaSy...03"
|
# - "AIzaSy...03"
|
||||||
- "AIzaSy...04"
|
# - "AIzaSy...04"
|
||||||
|
|
||||||
# Codex API keys
|
# Codex API keys
|
||||||
codex-api-key:
|
#codex-api-key:
|
||||||
- api-key: "sk-atSM..."
|
# - api-key: "sk-atSM..."
|
||||||
base-url: "https://www.example.com" # use the custom codex API endpoint
|
# base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||||
|
# proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||||
|
|
||||||
# Claude API keys
|
# Claude API keys
|
||||||
claude-api-key:
|
#claude-api-key:
|
||||||
- api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
# - api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||||
- api-key: "sk-atSM..."
|
# - api-key: "sk-atSM..."
|
||||||
base-url: "https://www.example.com" # use the custom claude API endpoint
|
# base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||||
|
# proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||||
|
|
||||||
# OpenAI compatibility providers
|
# OpenAI compatibility providers
|
||||||
openai-compatibility:
|
#openai-compatibility:
|
||||||
- name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
# - name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||||
base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
# base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||||
api-keys: # The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed.
|
# # New format with per-key proxy support (recommended):
|
||||||
- "sk-or-v1-...b780"
|
# api-key-entries:
|
||||||
- "sk-or-v1-...b781"
|
# - api-key: "sk-or-v1-...b780"
|
||||||
models: # The models supported by the provider.
|
# proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||||
- name: "moonshotai/kimi-k2:free" # The actual model name.
|
# - api-key: "sk-or-v1-...b781" # without proxy-url
|
||||||
alias: "kimi-k2" # The alias used in the API.
|
# # Legacy format (still supported, but cannot specify proxy per key):
|
||||||
|
# # api-keys:
|
||||||
|
# # - "sk-or-v1-...b780"
|
||||||
|
# # - "sk-or-v1-...b781"
|
||||||
|
# models: # The models supported by the provider.
|
||||||
|
# - name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||||
|
# alias: "kimi-k2" # The alias used in the API.
|
||||||
|
|
||||||
# Gemini Web settings
|
# Gemini Web settings
|
||||||
gemini-web:
|
#gemini-web:
|
||||||
# Conversation reuse: set to true to enable (default), false to disable.
|
# # Conversation reuse: set to true to enable (default), false to disable.
|
||||||
context: true
|
# context: true
|
||||||
# Maximum characters per single request to Gemini Web. Requests exceeding this
|
# # Maximum characters per single request to Gemini Web. Requests exceeding this
|
||||||
# size split into chunks. Only the last chunk carries files and yields the final answer.
|
# # size split into chunks. Only the last chunk carries files and yields the final answer.
|
||||||
max-chars-per-request: 1000000
|
# max-chars-per-request: 1000000
|
||||||
# Disable the short continuation hint appended to intermediate chunks
|
# # Disable the short continuation hint appended to intermediate chunks
|
||||||
# when splitting long prompts. Default is false (hint enabled by default).
|
# # when splitting long prompts. Default is false (hint enabled by default).
|
||||||
disable-continuation-hint: false
|
# disable-continuation-hint: false
|
||||||
# Code mode:
|
# # Gem selection (Gem Mode):
|
||||||
# - true: enable XML wrapping hint and attach the coding-partner Gem.
|
# # - "coding-partner": attach the predefined Coding partner Gem
|
||||||
# Thought merging (<think> into visible content) applies to STREAMING only;
|
# # - "writing-editor": attach the predefined Writing editor Gem
|
||||||
# non-stream responses keep reasoning/thought parts separate for clients
|
# # - empty: do not attach any Gem
|
||||||
# that expect explicit reasoning fields.
|
# gem-mode: ""
|
||||||
# - false: disable XML hint and keep <think> separate
|
|
||||||
code-mode: false
|
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ services:
|
|||||||
- "8085:8085"
|
- "8085:8085"
|
||||||
- "1455:1455"
|
- "1455:1455"
|
||||||
- "54545:54545"
|
- "54545:54545"
|
||||||
|
- "11451:11451"
|
||||||
volumes:
|
volumes:
|
||||||
- ./config.yaml:/CLIProxyAPI/config.yaml
|
- ./config.yaml:/CLIProxyAPI/config.yaml
|
||||||
- ./auths:/root/.cli-proxy-api
|
- ./auths:/root/.cli-proxy-api
|
||||||
|
|||||||
@@ -160,11 +160,7 @@ func main() {
|
|||||||
if dirSetter, ok := tokenStore.(interface{ SetBaseDir(string) }); ok {
|
if dirSetter, ok := tokenStore.(interface{ SetBaseDir(string) }); ok {
|
||||||
dirSetter.SetBaseDir(cfg.AuthDir)
|
dirSetter.SetBaseDir(cfg.AuthDir)
|
||||||
}
|
}
|
||||||
store, ok := tokenStore.(coreauth.Store)
|
core := coreauth.NewManager(tokenStore, nil, nil)
|
||||||
if !ok {
|
|
||||||
panic("token store does not implement coreauth.Store")
|
|
||||||
}
|
|
||||||
core := coreauth.NewManager(store, nil, nil)
|
|
||||||
core.RegisterExecutor(MyExecutor{})
|
core.RegisterExecutor(MyExecutor{})
|
||||||
|
|
||||||
hooks := cliproxy.Hooks{
|
hooks := cliproxy.Hooks{
|
||||||
|
|||||||
@@ -1,27 +1,33 @@
|
|||||||
package configapikey
|
package configaccess
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
|
||||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||||
|
sdkconfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var registerOnce sync.Once
|
||||||
|
|
||||||
|
// Register ensures the config-access provider is available to the access manager.
|
||||||
|
func Register() {
|
||||||
|
registerOnce.Do(func() {
|
||||||
|
sdkaccess.RegisterProvider(sdkconfig.AccessProviderTypeConfigAPIKey, newProvider)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
type provider struct {
|
type provider struct {
|
||||||
name string
|
name string
|
||||||
keys map[string]struct{}
|
keys map[string]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func newProvider(cfg *sdkconfig.AccessProvider, _ *sdkconfig.SDKConfig) (sdkaccess.Provider, error) {
|
||||||
sdkaccess.RegisterProvider(config.AccessProviderTypeConfigAPIKey, newProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newProvider(cfg *config.AccessProvider, _ *config.Config) (sdkaccess.Provider, error) {
|
|
||||||
name := cfg.Name
|
name := cfg.Name
|
||||||
if name == "" {
|
if name == "" {
|
||||||
name = config.DefaultAccessProviderName
|
name = sdkconfig.DefaultAccessProviderName
|
||||||
}
|
}
|
||||||
keys := make(map[string]struct{}, len(cfg.APIKeys))
|
keys := make(map[string]struct{}, len(cfg.APIKeys))
|
||||||
for _, key := range cfg.APIKeys {
|
for _, key := range cfg.APIKeys {
|
||||||
@@ -35,7 +41,7 @@ func newProvider(cfg *config.AccessProvider, _ *config.Config) (sdkaccess.Provid
|
|||||||
|
|
||||||
func (p *provider) Identifier() string {
|
func (p *provider) Identifier() string {
|
||||||
if p == nil || p.name == "" {
|
if p == nil || p.name == "" {
|
||||||
return config.DefaultAccessProviderName
|
return sdkconfig.DefaultAccessProviderName
|
||||||
}
|
}
|
||||||
return p.name
|
return p.name
|
||||||
}
|
}
|
||||||
270
internal/access/reconcile.go
Normal file
270
internal/access/reconcile.go
Normal file
@@ -0,0 +1,270 @@
|
|||||||
|
package access
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||||
|
sdkConfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReconcileProviders builds the desired provider list by reusing existing providers when possible
|
||||||
|
// and creating or removing providers only when their configuration changed. It returns the final
|
||||||
|
// ordered provider slice along with the identifiers of providers that were added, updated, or
|
||||||
|
// removed compared to the previous configuration.
|
||||||
|
func ReconcileProviders(oldCfg, newCfg *config.Config, existing []sdkaccess.Provider) (result []sdkaccess.Provider, added, updated, removed []string, err error) {
|
||||||
|
if newCfg == nil {
|
||||||
|
return nil, nil, nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
existingMap := make(map[string]sdkaccess.Provider, len(existing))
|
||||||
|
for _, provider := range existing {
|
||||||
|
if provider == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
existingMap[provider.Identifier()] = provider
|
||||||
|
}
|
||||||
|
|
||||||
|
oldCfgMap := accessProviderMap(oldCfg)
|
||||||
|
newEntries := collectProviderEntries(newCfg)
|
||||||
|
|
||||||
|
result = make([]sdkaccess.Provider, 0, len(newEntries))
|
||||||
|
finalIDs := make(map[string]struct{}, len(newEntries))
|
||||||
|
|
||||||
|
isInlineProvider := func(id string) bool {
|
||||||
|
return strings.EqualFold(id, sdkConfig.DefaultAccessProviderName)
|
||||||
|
}
|
||||||
|
appendChange := func(list *[]string, id string) {
|
||||||
|
if isInlineProvider(id) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*list = append(*list, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, providerCfg := range newEntries {
|
||||||
|
key := providerIdentifier(providerCfg)
|
||||||
|
if key == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
forceRebuild := strings.EqualFold(strings.TrimSpace(providerCfg.Type), sdkConfig.AccessProviderTypeConfigAPIKey)
|
||||||
|
if oldCfgProvider, ok := oldCfgMap[key]; ok {
|
||||||
|
isAliased := oldCfgProvider == providerCfg
|
||||||
|
if !forceRebuild && !isAliased && providerConfigEqual(oldCfgProvider, providerCfg) {
|
||||||
|
if existingProvider, okExisting := existingMap[key]; okExisting {
|
||||||
|
result = append(result, existingProvider)
|
||||||
|
finalIDs[key] = struct{}{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
provider, buildErr := sdkaccess.BuildProvider(providerCfg, &newCfg.SDKConfig)
|
||||||
|
if buildErr != nil {
|
||||||
|
return nil, nil, nil, nil, buildErr
|
||||||
|
}
|
||||||
|
if _, ok := oldCfgMap[key]; ok {
|
||||||
|
if _, existed := existingMap[key]; existed {
|
||||||
|
appendChange(&updated, key)
|
||||||
|
} else {
|
||||||
|
appendChange(&added, key)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
appendChange(&added, key)
|
||||||
|
}
|
||||||
|
result = append(result, provider)
|
||||||
|
finalIDs[key] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(result) == 0 {
|
||||||
|
if inline := sdkConfig.MakeInlineAPIKeyProvider(newCfg.APIKeys); inline != nil {
|
||||||
|
key := providerIdentifier(inline)
|
||||||
|
if key != "" {
|
||||||
|
if oldCfgProvider, ok := oldCfgMap[key]; ok {
|
||||||
|
if providerConfigEqual(oldCfgProvider, inline) {
|
||||||
|
if existingProvider, okExisting := existingMap[key]; okExisting {
|
||||||
|
result = append(result, existingProvider)
|
||||||
|
finalIDs[key] = struct{}{}
|
||||||
|
goto inlineDone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
provider, buildErr := sdkaccess.BuildProvider(inline, &newCfg.SDKConfig)
|
||||||
|
if buildErr != nil {
|
||||||
|
return nil, nil, nil, nil, buildErr
|
||||||
|
}
|
||||||
|
if _, existed := existingMap[key]; existed {
|
||||||
|
appendChange(&updated, key)
|
||||||
|
} else if _, hadOld := oldCfgMap[key]; hadOld {
|
||||||
|
appendChange(&updated, key)
|
||||||
|
} else {
|
||||||
|
appendChange(&added, key)
|
||||||
|
}
|
||||||
|
result = append(result, provider)
|
||||||
|
finalIDs[key] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
inlineDone:
|
||||||
|
}
|
||||||
|
|
||||||
|
removedSet := make(map[string]struct{})
|
||||||
|
for id := range existingMap {
|
||||||
|
if _, ok := finalIDs[id]; !ok {
|
||||||
|
if isInlineProvider(id) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
removedSet[id] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
removed = make([]string, 0, len(removedSet))
|
||||||
|
for id := range removedSet {
|
||||||
|
removed = append(removed, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(added)
|
||||||
|
sort.Strings(updated)
|
||||||
|
sort.Strings(removed)
|
||||||
|
|
||||||
|
return result, added, updated, removed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyAccessProviders reconciles the configured access providers against the
|
||||||
|
// currently registered providers and updates the manager. It logs a concise
|
||||||
|
// summary of the detected changes and returns whether any provider changed.
|
||||||
|
func ApplyAccessProviders(manager *sdkaccess.Manager, oldCfg, newCfg *config.Config) (bool, error) {
|
||||||
|
if manager == nil || newCfg == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
existing := manager.Providers()
|
||||||
|
providers, added, updated, removed, err := ReconcileProviders(oldCfg, newCfg, existing)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to reconcile request auth providers: %v", err)
|
||||||
|
return false, fmt.Errorf("reconciling access providers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
manager.SetProviders(providers)
|
||||||
|
|
||||||
|
if len(added)+len(updated)+len(removed) > 0 {
|
||||||
|
log.Debugf("auth providers reconciled (added=%d updated=%d removed=%d)", len(added), len(updated), len(removed))
|
||||||
|
log.Debugf("auth providers changes details - added=%v updated=%v removed=%v", added, updated, removed)
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("auth providers unchanged after config update")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func accessProviderMap(cfg *config.Config) map[string]*sdkConfig.AccessProvider {
|
||||||
|
result := make(map[string]*sdkConfig.AccessProvider)
|
||||||
|
if cfg == nil {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
for i := range cfg.Access.Providers {
|
||||||
|
providerCfg := &cfg.Access.Providers[i]
|
||||||
|
if providerCfg.Type == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key := providerIdentifier(providerCfg)
|
||||||
|
if key == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result[key] = providerCfg
|
||||||
|
}
|
||||||
|
if len(result) == 0 && len(cfg.APIKeys) > 0 {
|
||||||
|
if provider := sdkConfig.MakeInlineAPIKeyProvider(cfg.APIKeys); provider != nil {
|
||||||
|
if key := providerIdentifier(provider); key != "" {
|
||||||
|
result[key] = provider
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func collectProviderEntries(cfg *config.Config) []*sdkConfig.AccessProvider {
|
||||||
|
entries := make([]*sdkConfig.AccessProvider, 0, len(cfg.Access.Providers))
|
||||||
|
for i := range cfg.Access.Providers {
|
||||||
|
providerCfg := &cfg.Access.Providers[i]
|
||||||
|
if providerCfg.Type == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if key := providerIdentifier(providerCfg); key != "" {
|
||||||
|
entries = append(entries, providerCfg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(entries) == 0 && len(cfg.APIKeys) > 0 {
|
||||||
|
if inline := sdkConfig.MakeInlineAPIKeyProvider(cfg.APIKeys); inline != nil {
|
||||||
|
entries = append(entries, inline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entries
|
||||||
|
}
|
||||||
|
|
||||||
|
func providerIdentifier(provider *sdkConfig.AccessProvider) string {
|
||||||
|
if provider == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if name := strings.TrimSpace(provider.Name); name != "" {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
typ := strings.TrimSpace(provider.Type)
|
||||||
|
if typ == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if strings.EqualFold(typ, sdkConfig.AccessProviderTypeConfigAPIKey) {
|
||||||
|
return sdkConfig.DefaultAccessProviderName
|
||||||
|
}
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
|
||||||
|
func providerConfigEqual(a, b *sdkConfig.AccessProvider) bool {
|
||||||
|
if a == nil || b == nil {
|
||||||
|
return a == nil && b == nil
|
||||||
|
}
|
||||||
|
if !strings.EqualFold(strings.TrimSpace(a.Type), strings.TrimSpace(b.Type)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(a.SDK) != strings.TrimSpace(b.SDK) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !stringSetEqual(a.APIKeys, b.APIKeys) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(a.Config) != len(b.Config) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(a.Config) > 0 && !reflect.DeepEqual(a.Config, b.Config) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringSetEqual(a, b []string) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(a) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
seen := make(map[string]int, len(a))
|
||||||
|
for _, val := range a {
|
||||||
|
seen[val]++
|
||||||
|
}
|
||||||
|
for _, val := range b {
|
||||||
|
count := seen[val]
|
||||||
|
if count == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if count == 1 {
|
||||||
|
delete(seen, val)
|
||||||
|
} else {
|
||||||
|
seen[val] = count - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(seen) == 0
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -12,6 +12,22 @@ func (h *Handler) GetConfig(c *gin.Context) {
|
|||||||
func (h *Handler) GetDebug(c *gin.Context) { c.JSON(200, gin.H{"debug": h.cfg.Debug}) }
|
func (h *Handler) GetDebug(c *gin.Context) { c.JSON(200, gin.H{"debug": h.cfg.Debug}) }
|
||||||
func (h *Handler) PutDebug(c *gin.Context) { h.updateBoolField(c, func(v bool) { h.cfg.Debug = v }) }
|
func (h *Handler) PutDebug(c *gin.Context) { h.updateBoolField(c, func(v bool) { h.cfg.Debug = v }) }
|
||||||
|
|
||||||
|
// UsageStatisticsEnabled
|
||||||
|
func (h *Handler) GetUsageStatisticsEnabled(c *gin.Context) {
|
||||||
|
c.JSON(200, gin.H{"usage-statistics-enabled": h.cfg.UsageStatisticsEnabled})
|
||||||
|
}
|
||||||
|
func (h *Handler) PutUsageStatisticsEnabled(c *gin.Context) {
|
||||||
|
h.updateBoolField(c, func(v bool) { h.cfg.UsageStatisticsEnabled = v })
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageStatisticsEnabled
|
||||||
|
func (h *Handler) GetLoggingToFile(c *gin.Context) {
|
||||||
|
c.JSON(200, gin.H{"logging-to-file": h.cfg.LoggingToFile})
|
||||||
|
}
|
||||||
|
func (h *Handler) PutLoggingToFile(c *gin.Context) {
|
||||||
|
h.updateBoolField(c, func(v bool) { h.cfg.LoggingToFile = v })
|
||||||
|
}
|
||||||
|
|
||||||
// Request log
|
// Request log
|
||||||
func (h *Handler) GetRequestLog(c *gin.Context) { c.JSON(200, gin.H{"request-log": h.cfg.RequestLog}) }
|
func (h *Handler) GetRequestLog(c *gin.Context) { c.JSON(200, gin.H{"request-log": h.cfg.RequestLog}) }
|
||||||
func (h *Handler) PutRequestLog(c *gin.Context) {
|
func (h *Handler) PutRequestLog(c *gin.Context) {
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package management
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
@@ -106,13 +107,16 @@ func (h *Handler) deleteFromStringList(c *gin.Context, target *[]string, after f
|
|||||||
// api-keys
|
// api-keys
|
||||||
func (h *Handler) GetAPIKeys(c *gin.Context) { c.JSON(200, gin.H{"api-keys": h.cfg.APIKeys}) }
|
func (h *Handler) GetAPIKeys(c *gin.Context) { c.JSON(200, gin.H{"api-keys": h.cfg.APIKeys}) }
|
||||||
func (h *Handler) PutAPIKeys(c *gin.Context) {
|
func (h *Handler) PutAPIKeys(c *gin.Context) {
|
||||||
h.putStringList(c, func(v []string) { config.SyncInlineAPIKeys(h.cfg, v) }, nil)
|
h.putStringList(c, func(v []string) {
|
||||||
|
h.cfg.APIKeys = append([]string(nil), v...)
|
||||||
|
h.cfg.Access.Providers = nil
|
||||||
|
}, nil)
|
||||||
}
|
}
|
||||||
func (h *Handler) PatchAPIKeys(c *gin.Context) {
|
func (h *Handler) PatchAPIKeys(c *gin.Context) {
|
||||||
h.patchStringList(c, &h.cfg.APIKeys, func() { config.SyncInlineAPIKeys(h.cfg, h.cfg.APIKeys) })
|
h.patchStringList(c, &h.cfg.APIKeys, func() { h.cfg.Access.Providers = nil })
|
||||||
}
|
}
|
||||||
func (h *Handler) DeleteAPIKeys(c *gin.Context) {
|
func (h *Handler) DeleteAPIKeys(c *gin.Context) {
|
||||||
h.deleteFromStringList(c, &h.cfg.APIKeys, func() { config.SyncInlineAPIKeys(h.cfg, h.cfg.APIKeys) })
|
h.deleteFromStringList(c, &h.cfg.APIKeys, func() { h.cfg.Access.Providers = nil })
|
||||||
}
|
}
|
||||||
|
|
||||||
// generative-language-api-key
|
// generative-language-api-key
|
||||||
@@ -201,7 +205,7 @@ func (h *Handler) DeleteClaudeKey(c *gin.Context) {
|
|||||||
|
|
||||||
// openai-compatibility: []OpenAICompatibility
|
// openai-compatibility: []OpenAICompatibility
|
||||||
func (h *Handler) GetOpenAICompat(c *gin.Context) {
|
func (h *Handler) GetOpenAICompat(c *gin.Context) {
|
||||||
c.JSON(200, gin.H{"openai-compatibility": h.cfg.OpenAICompatibility})
|
c.JSON(200, gin.H{"openai-compatibility": normalizedOpenAICompatibilityEntries(h.cfg.OpenAICompatibility)})
|
||||||
}
|
}
|
||||||
func (h *Handler) PutOpenAICompat(c *gin.Context) {
|
func (h *Handler) PutOpenAICompat(c *gin.Context) {
|
||||||
data, err := c.GetRawData()
|
data, err := c.GetRawData()
|
||||||
@@ -220,6 +224,9 @@ func (h *Handler) PutOpenAICompat(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
arr = obj.Items
|
arr = obj.Items
|
||||||
}
|
}
|
||||||
|
for i := range arr {
|
||||||
|
normalizeOpenAICompatibilityEntry(&arr[i])
|
||||||
|
}
|
||||||
h.cfg.OpenAICompatibility = arr
|
h.cfg.OpenAICompatibility = arr
|
||||||
h.persist(c)
|
h.persist(c)
|
||||||
}
|
}
|
||||||
@@ -233,6 +240,7 @@ func (h *Handler) PatchOpenAICompat(c *gin.Context) {
|
|||||||
c.JSON(400, gin.H{"error": "invalid body"})
|
c.JSON(400, gin.H{"error": "invalid body"})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
normalizeOpenAICompatibilityEntry(body.Value)
|
||||||
if body.Index != nil && *body.Index >= 0 && *body.Index < len(h.cfg.OpenAICompatibility) {
|
if body.Index != nil && *body.Index >= 0 && *body.Index < len(h.cfg.OpenAICompatibility) {
|
||||||
h.cfg.OpenAICompatibility[*body.Index] = *body.Value
|
h.cfg.OpenAICompatibility[*body.Index] = *body.Value
|
||||||
h.persist(c)
|
h.persist(c)
|
||||||
@@ -346,3 +354,51 @@ func (h *Handler) DeleteCodexKey(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
c.JSON(400, gin.H{"error": "missing api-key or index"})
|
c.JSON(400, gin.H{"error": "missing api-key or index"})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func normalizeOpenAICompatibilityEntry(entry *config.OpenAICompatibility) {
|
||||||
|
if entry == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
existing := make(map[string]struct{}, len(entry.APIKeyEntries))
|
||||||
|
for i := range entry.APIKeyEntries {
|
||||||
|
trimmed := strings.TrimSpace(entry.APIKeyEntries[i].APIKey)
|
||||||
|
entry.APIKeyEntries[i].APIKey = trimmed
|
||||||
|
if trimmed != "" {
|
||||||
|
existing[trimmed] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(entry.APIKeys) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, legacyKey := range entry.APIKeys {
|
||||||
|
trimmed := strings.TrimSpace(legacyKey)
|
||||||
|
if trimmed == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := existing[trimmed]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
entry.APIKeyEntries = append(entry.APIKeyEntries, config.OpenAICompatibilityAPIKey{APIKey: trimmed})
|
||||||
|
existing[trimmed] = struct{}{}
|
||||||
|
}
|
||||||
|
entry.APIKeys = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizedOpenAICompatibilityEntries(entries []config.OpenAICompatibility) []config.OpenAICompatibility {
|
||||||
|
if len(entries) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make([]config.OpenAICompatibility, len(entries))
|
||||||
|
for i := range entries {
|
||||||
|
copyEntry := entries[i]
|
||||||
|
if len(copyEntry.APIKeyEntries) > 0 {
|
||||||
|
copyEntry.APIKeyEntries = append([]config.OpenAICompatibilityAPIKey(nil), copyEntry.APIKeyEntries...)
|
||||||
|
}
|
||||||
|
if len(copyEntry.APIKeys) > 0 {
|
||||||
|
copyEntry.APIKeys = append([]string(nil), copyEntry.APIKeys...)
|
||||||
|
}
|
||||||
|
normalizeOpenAICompatibilityEntry(©Entry)
|
||||||
|
out[i] = copyEntry
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ type Handler struct {
|
|||||||
failedAttempts map[string]*attemptInfo // keyed by client IP
|
failedAttempts map[string]*attemptInfo // keyed by client IP
|
||||||
authManager *coreauth.Manager
|
authManager *coreauth.Manager
|
||||||
usageStats *usage.RequestStatistics
|
usageStats *usage.RequestStatistics
|
||||||
tokenStore sdkAuth.TokenStore
|
tokenStore coreauth.Store
|
||||||
|
|
||||||
localPassword string
|
localPassword string
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package middleware
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||||
@@ -17,6 +18,12 @@ import (
|
|||||||
// logger, the middleware has minimal overhead.
|
// logger, the middleware has minimal overhead.
|
||||||
func RequestLoggingMiddleware(logger logging.RequestLogger) gin.HandlerFunc {
|
func RequestLoggingMiddleware(logger logging.RequestLogger) gin.HandlerFunc {
|
||||||
return func(c *gin.Context) {
|
return func(c *gin.Context) {
|
||||||
|
path := c.Request.URL.Path
|
||||||
|
if strings.HasPrefix(path, "/v0/management") || path == "/keep-alive" {
|
||||||
|
c.Next()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Early return if logging is disabled (zero overhead)
|
// Early return if logging is disabled (zero overhead)
|
||||||
if !logger.IsEnabled() {
|
if !logger.IsEnabled() {
|
||||||
c.Next()
|
c.Next()
|
||||||
|
|||||||
@@ -6,34 +6,45 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/subtle"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/access"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/claude"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/gemini"
|
|
||||||
managementHandlers "github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/management"
|
managementHandlers "github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/management"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/openai"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/middleware"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/middleware"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/managementasset"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/usage"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/claude"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/gemini"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/openai"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const oauthCallbackSuccessHTML = `<html><head><meta charset="utf-8"><title>Authentication successful</title><script>setTimeout(function(){window.close();},5000);</script></head><body><h1>Authentication successful!</h1><p>You can close this window.</p><p>This window will close automatically in 5 seconds.</p></body></html>`
|
||||||
|
|
||||||
type serverOptionConfig struct {
|
type serverOptionConfig struct {
|
||||||
extraMiddleware []gin.HandlerFunc
|
extraMiddleware []gin.HandlerFunc
|
||||||
engineConfigurator func(*gin.Engine)
|
engineConfigurator func(*gin.Engine)
|
||||||
routerConfigurator func(*gin.Engine, *handlers.BaseAPIHandler, *config.Config)
|
routerConfigurator func(*gin.Engine, *handlers.BaseAPIHandler, *config.Config)
|
||||||
requestLoggerFactory func(*config.Config, string) logging.RequestLogger
|
requestLoggerFactory func(*config.Config, string) logging.RequestLogger
|
||||||
localPassword string
|
localPassword string
|
||||||
|
keepAliveEnabled bool
|
||||||
|
keepAliveTimeout time.Duration
|
||||||
|
keepAliveOnTimeout func()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerOption customises HTTP server construction.
|
// ServerOption customises HTTP server construction.
|
||||||
@@ -71,6 +82,18 @@ func WithLocalManagementPassword(password string) ServerOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithKeepAliveEndpoint enables a keep-alive endpoint with the provided timeout and callback.
|
||||||
|
func WithKeepAliveEndpoint(timeout time.Duration, onTimeout func()) ServerOption {
|
||||||
|
return func(cfg *serverOptionConfig) {
|
||||||
|
if timeout <= 0 || onTimeout == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cfg.keepAliveEnabled = true
|
||||||
|
cfg.keepAliveTimeout = timeout
|
||||||
|
cfg.keepAliveOnTimeout = onTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithRequestLoggerFactory customises request logger creation.
|
// WithRequestLoggerFactory customises request logger creation.
|
||||||
func WithRequestLoggerFactory(factory func(*config.Config, string) logging.RequestLogger) ServerOption {
|
func WithRequestLoggerFactory(factory func(*config.Config, string) logging.RequestLogger) ServerOption {
|
||||||
return func(cfg *serverOptionConfig) {
|
return func(cfg *serverOptionConfig) {
|
||||||
@@ -105,6 +128,19 @@ type Server struct {
|
|||||||
|
|
||||||
// management handler
|
// management handler
|
||||||
mgmt *managementHandlers.Handler
|
mgmt *managementHandlers.Handler
|
||||||
|
|
||||||
|
// managementRoutesRegistered tracks whether the management routes have been attached to the engine.
|
||||||
|
managementRoutesRegistered atomic.Bool
|
||||||
|
// managementRoutesEnabled controls whether management endpoints serve real handlers.
|
||||||
|
managementRoutesEnabled atomic.Bool
|
||||||
|
|
||||||
|
localPassword string
|
||||||
|
|
||||||
|
keepAliveEnabled bool
|
||||||
|
keepAliveTimeout time.Duration
|
||||||
|
keepAliveOnTimeout func()
|
||||||
|
keepAliveHeartbeat chan struct{}
|
||||||
|
keepAliveStop chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServer creates and initializes a new API server instance.
|
// NewServer creates and initializes a new API server instance.
|
||||||
@@ -161,19 +197,20 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
|||||||
// Create server instance
|
// Create server instance
|
||||||
s := &Server{
|
s := &Server{
|
||||||
engine: engine,
|
engine: engine,
|
||||||
handlers: handlers.NewBaseAPIHandlers(cfg, authManager),
|
handlers: handlers.NewBaseAPIHandlers(&cfg.SDKConfig, authManager),
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
accessManager: accessManager,
|
accessManager: accessManager,
|
||||||
requestLogger: requestLogger,
|
requestLogger: requestLogger,
|
||||||
loggerToggle: toggle,
|
loggerToggle: toggle,
|
||||||
configFilePath: configFilePath,
|
configFilePath: configFilePath,
|
||||||
}
|
}
|
||||||
s.applyAccessConfig(cfg)
|
s.applyAccessConfig(nil, cfg)
|
||||||
// Initialize management handler
|
// Initialize management handler
|
||||||
s.mgmt = managementHandlers.NewHandler(cfg, configFilePath, authManager)
|
s.mgmt = managementHandlers.NewHandler(cfg, configFilePath, authManager)
|
||||||
if optionState.localPassword != "" {
|
if optionState.localPassword != "" {
|
||||||
s.mgmt.SetLocalPassword(optionState.localPassword)
|
s.mgmt.SetLocalPassword(optionState.localPassword)
|
||||||
}
|
}
|
||||||
|
s.localPassword = optionState.localPassword
|
||||||
|
|
||||||
// Setup routes
|
// Setup routes
|
||||||
s.setupRoutes()
|
s.setupRoutes()
|
||||||
@@ -181,6 +218,16 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
|||||||
optionState.routerConfigurator(engine, s.handlers, cfg)
|
optionState.routerConfigurator(engine, s.handlers, cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register management routes only when a secret is present at startup.
|
||||||
|
s.managementRoutesEnabled.Store(cfg.RemoteManagement.SecretKey != "")
|
||||||
|
if cfg.RemoteManagement.SecretKey != "" {
|
||||||
|
s.registerManagementRoutes()
|
||||||
|
}
|
||||||
|
|
||||||
|
if optionState.keepAliveEnabled {
|
||||||
|
s.enableKeepAlive(optionState.keepAliveTimeout, optionState.keepAliveOnTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
// Create HTTP server
|
// Create HTTP server
|
||||||
s.server = &http.Server{
|
s.server = &http.Server{
|
||||||
Addr: fmt.Sprintf(":%d", cfg.Port),
|
Addr: fmt.Sprintf(":%d", cfg.Port),
|
||||||
@@ -193,6 +240,7 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
|||||||
// setupRoutes configures the API routes for the server.
|
// setupRoutes configures the API routes for the server.
|
||||||
// It defines the endpoints and associates them with their respective handlers.
|
// It defines the endpoints and associates them with their respective handlers.
|
||||||
func (s *Server) setupRoutes() {
|
func (s *Server) setupRoutes() {
|
||||||
|
s.engine.GET("/management.html", s.serveManagementControlPanel)
|
||||||
openaiHandlers := openai.NewOpenAIAPIHandler(s.handlers)
|
openaiHandlers := openai.NewOpenAIAPIHandler(s.handlers)
|
||||||
geminiHandlers := gemini.NewGeminiAPIHandler(s.handlers)
|
geminiHandlers := gemini.NewGeminiAPIHandler(s.handlers)
|
||||||
geminiCLIHandlers := gemini.NewGeminiCLIAPIHandler(s.handlers)
|
geminiCLIHandlers := gemini.NewGeminiCLIAPIHandler(s.handlers)
|
||||||
@@ -247,7 +295,7 @@ func (s *Server) setupRoutes() {
|
|||||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||||
}
|
}
|
||||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||||
c.String(http.StatusOK, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||||
})
|
})
|
||||||
|
|
||||||
s.engine.GET("/codex/callback", func(c *gin.Context) {
|
s.engine.GET("/codex/callback", func(c *gin.Context) {
|
||||||
@@ -259,7 +307,7 @@ func (s *Server) setupRoutes() {
|
|||||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||||
}
|
}
|
||||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||||
c.String(http.StatusOK, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||||
})
|
})
|
||||||
|
|
||||||
s.engine.GET("/google/callback", func(c *gin.Context) {
|
s.engine.GET("/google/callback", func(c *gin.Context) {
|
||||||
@@ -271,14 +319,36 @@ func (s *Server) setupRoutes() {
|
|||||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||||
}
|
}
|
||||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||||
c.String(http.StatusOK, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Management API routes (delegated to management handlers)
|
s.engine.GET("/iflow/callback", func(c *gin.Context) {
|
||||||
// New logic: if remote-management-key is empty, do not expose any management endpoint (404).
|
code := c.Query("code")
|
||||||
if s.cfg.RemoteManagement.SecretKey != "" {
|
state := c.Query("state")
|
||||||
|
errStr := c.Query("error")
|
||||||
|
if state != "" {
|
||||||
|
file := fmt.Sprintf("%s/.oauth-iflow-%s.oauth", s.cfg.AuthDir, state)
|
||||||
|
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||||
|
}
|
||||||
|
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||||
|
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Management routes are registered lazily by registerManagementRoutes when a secret is configured.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) registerManagementRoutes() {
|
||||||
|
if s == nil || s.engine == nil || s.mgmt == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !s.managementRoutesRegistered.CompareAndSwap(false, true) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("management routes registered after secret key configuration")
|
||||||
|
|
||||||
mgmt := s.engine.Group("/v0/management")
|
mgmt := s.engine.Group("/v0/management")
|
||||||
mgmt.Use(s.mgmt.Middleware())
|
mgmt.Use(s.managementAvailabilityMiddleware(), s.mgmt.Middleware())
|
||||||
{
|
{
|
||||||
mgmt.GET("/usage", s.mgmt.GetUsageStatistics)
|
mgmt.GET("/usage", s.mgmt.GetUsageStatistics)
|
||||||
mgmt.GET("/config", s.mgmt.GetConfig)
|
mgmt.GET("/config", s.mgmt.GetConfig)
|
||||||
@@ -287,6 +357,14 @@ func (s *Server) setupRoutes() {
|
|||||||
mgmt.PUT("/debug", s.mgmt.PutDebug)
|
mgmt.PUT("/debug", s.mgmt.PutDebug)
|
||||||
mgmt.PATCH("/debug", s.mgmt.PutDebug)
|
mgmt.PATCH("/debug", s.mgmt.PutDebug)
|
||||||
|
|
||||||
|
mgmt.GET("/logging-to-file", s.mgmt.GetLoggingToFile)
|
||||||
|
mgmt.PUT("/logging-to-file", s.mgmt.PutLoggingToFile)
|
||||||
|
mgmt.PATCH("/logging-to-file", s.mgmt.PutLoggingToFile)
|
||||||
|
|
||||||
|
mgmt.GET("/usage-statistics-enabled", s.mgmt.GetUsageStatisticsEnabled)
|
||||||
|
mgmt.PUT("/usage-statistics-enabled", s.mgmt.PutUsageStatisticsEnabled)
|
||||||
|
mgmt.PATCH("/usage-statistics-enabled", s.mgmt.PutUsageStatisticsEnabled)
|
||||||
|
|
||||||
mgmt.GET("/proxy-url", s.mgmt.GetProxyURL)
|
mgmt.GET("/proxy-url", s.mgmt.GetProxyURL)
|
||||||
mgmt.PUT("/proxy-url", s.mgmt.PutProxyURL)
|
mgmt.PUT("/proxy-url", s.mgmt.PutProxyURL)
|
||||||
mgmt.PATCH("/proxy-url", s.mgmt.PutProxyURL)
|
mgmt.PATCH("/proxy-url", s.mgmt.PutProxyURL)
|
||||||
@@ -343,9 +421,125 @@ func (s *Server) setupRoutes() {
|
|||||||
mgmt.GET("/gemini-cli-auth-url", s.mgmt.RequestGeminiCLIToken)
|
mgmt.GET("/gemini-cli-auth-url", s.mgmt.RequestGeminiCLIToken)
|
||||||
mgmt.POST("/gemini-web-token", s.mgmt.CreateGeminiWebToken)
|
mgmt.POST("/gemini-web-token", s.mgmt.CreateGeminiWebToken)
|
||||||
mgmt.GET("/qwen-auth-url", s.mgmt.RequestQwenToken)
|
mgmt.GET("/qwen-auth-url", s.mgmt.RequestQwenToken)
|
||||||
|
mgmt.GET("/iflow-auth-url", s.mgmt.RequestIFlowToken)
|
||||||
mgmt.GET("/get-auth-status", s.mgmt.GetAuthStatus)
|
mgmt.GET("/get-auth-status", s.mgmt.GetAuthStatus)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) managementAvailabilityMiddleware() gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
if !s.managementRoutesEnabled.Load() {
|
||||||
|
c.AbortWithStatus(http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) serveManagementControlPanel(c *gin.Context) {
|
||||||
|
cfg := s.cfg
|
||||||
|
if cfg == nil || cfg.RemoteManagement.DisableControlPanel {
|
||||||
|
c.AbortWithStatus(http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := managementasset.FilePath(s.configFilePath)
|
||||||
|
if strings.TrimSpace(filePath) == "" {
|
||||||
|
c.AbortWithStatus(http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(filePath); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
go managementasset.EnsureLatestManagementHTML(context.Background(), managementasset.StaticDir(s.configFilePath), cfg.ProxyURL)
|
||||||
|
c.AbortWithStatus(http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithError(err).Error("failed to stat management control panel asset")
|
||||||
|
c.AbortWithStatus(http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.File(filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) enableKeepAlive(timeout time.Duration, onTimeout func()) {
|
||||||
|
if timeout <= 0 || onTimeout == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.keepAliveEnabled = true
|
||||||
|
s.keepAliveTimeout = timeout
|
||||||
|
s.keepAliveOnTimeout = onTimeout
|
||||||
|
s.keepAliveHeartbeat = make(chan struct{}, 1)
|
||||||
|
s.keepAliveStop = make(chan struct{}, 1)
|
||||||
|
|
||||||
|
s.engine.GET("/keep-alive", s.handleKeepAlive)
|
||||||
|
|
||||||
|
go s.watchKeepAlive()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleKeepAlive(c *gin.Context) {
|
||||||
|
if s.localPassword != "" {
|
||||||
|
provided := strings.TrimSpace(c.GetHeader("Authorization"))
|
||||||
|
if provided != "" {
|
||||||
|
parts := strings.SplitN(provided, " ", 2)
|
||||||
|
if len(parts) == 2 && strings.EqualFold(parts[0], "bearer") {
|
||||||
|
provided = parts[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if provided == "" {
|
||||||
|
provided = strings.TrimSpace(c.GetHeader("X-Local-Password"))
|
||||||
|
}
|
||||||
|
if subtle.ConstantTimeCompare([]byte(provided), []byte(s.localPassword)) != 1 {
|
||||||
|
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "invalid password"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.signalKeepAlive()
|
||||||
|
c.JSON(http.StatusOK, gin.H{"status": "ok"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) signalKeepAlive() {
|
||||||
|
if !s.keepAliveEnabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case s.keepAliveHeartbeat <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) watchKeepAlive() {
|
||||||
|
if !s.keepAliveEnabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
timer := time.NewTimer(s.keepAliveTimeout)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
log.Warnf("keep-alive endpoint idle for %s, shutting down", s.keepAliveTimeout)
|
||||||
|
if s.keepAliveOnTimeout != nil {
|
||||||
|
s.keepAliveOnTimeout()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case <-s.keepAliveHeartbeat:
|
||||||
|
if !timer.Stop() {
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
timer.Reset(s.keepAliveTimeout)
|
||||||
|
case <-s.keepAliveStop:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// unifiedModelsHandler creates a unified handler for the /v1/models endpoint
|
// unifiedModelsHandler creates a unified handler for the /v1/models endpoint
|
||||||
@@ -394,6 +588,13 @@ func (s *Server) Start() error {
|
|||||||
func (s *Server) Stop(ctx context.Context) error {
|
func (s *Server) Stop(ctx context.Context) error {
|
||||||
log.Debug("Stopping API server...")
|
log.Debug("Stopping API server...")
|
||||||
|
|
||||||
|
if s.keepAliveEnabled {
|
||||||
|
select {
|
||||||
|
case s.keepAliveStop <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Shutdown the HTTP server.
|
// Shutdown the HTTP server.
|
||||||
if err := s.server.Shutdown(ctx); err != nil {
|
if err := s.server.Shutdown(ctx); err != nil {
|
||||||
return fmt.Errorf("failed to shutdown HTTP server: %v", err)
|
return fmt.Errorf("failed to shutdown HTTP server: %v", err)
|
||||||
@@ -423,16 +624,13 @@ func corsMiddleware() gin.HandlerFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) applyAccessConfig(cfg *config.Config) {
|
func (s *Server) applyAccessConfig(oldCfg, newCfg *config.Config) {
|
||||||
if s == nil || s.accessManager == nil {
|
if s == nil || s.accessManager == nil || newCfg == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
providers, err := sdkaccess.BuildProviders(cfg)
|
if _, err := access.ApplyAccessProviders(s.accessManager, oldCfg, newCfg); err != nil {
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to update request auth providers: %v", err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.accessManager.SetProviders(providers)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateClients updates the server's client list and configuration.
|
// UpdateClients updates the server's client list and configuration.
|
||||||
@@ -442,29 +640,88 @@ func (s *Server) applyAccessConfig(cfg *config.Config) {
|
|||||||
// - clients: The new slice of AI service clients
|
// - clients: The new slice of AI service clients
|
||||||
// - cfg: The new application configuration
|
// - cfg: The new application configuration
|
||||||
func (s *Server) UpdateClients(cfg *config.Config) {
|
func (s *Server) UpdateClients(cfg *config.Config) {
|
||||||
|
oldCfg := s.cfg
|
||||||
|
|
||||||
// Update request logger enabled state if it has changed
|
// Update request logger enabled state if it has changed
|
||||||
if s.requestLogger != nil && s.cfg.RequestLog != cfg.RequestLog {
|
previousRequestLog := false
|
||||||
|
if oldCfg != nil {
|
||||||
|
previousRequestLog = oldCfg.RequestLog
|
||||||
|
}
|
||||||
|
if s.requestLogger != nil && (oldCfg == nil || previousRequestLog != cfg.RequestLog) {
|
||||||
if s.loggerToggle != nil {
|
if s.loggerToggle != nil {
|
||||||
s.loggerToggle(cfg.RequestLog)
|
s.loggerToggle(cfg.RequestLog)
|
||||||
} else if toggler, ok := s.requestLogger.(interface{ SetEnabled(bool) }); ok {
|
} else if toggler, ok := s.requestLogger.(interface{ SetEnabled(bool) }); ok {
|
||||||
toggler.SetEnabled(cfg.RequestLog)
|
toggler.SetEnabled(cfg.RequestLog)
|
||||||
}
|
}
|
||||||
log.Debugf("request logging updated from %t to %t", s.cfg.RequestLog, cfg.RequestLog)
|
if oldCfg != nil {
|
||||||
|
log.Debugf("request logging updated from %t to %t", previousRequestLog, cfg.RequestLog)
|
||||||
|
} else {
|
||||||
|
log.Debugf("request logging toggled to %t", cfg.RequestLog)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldCfg != nil && oldCfg.LoggingToFile != cfg.LoggingToFile {
|
||||||
|
if err := logging.ConfigureLogOutput(cfg.LoggingToFile); err != nil {
|
||||||
|
log.Errorf("failed to reconfigure log output: %v", err)
|
||||||
|
} else {
|
||||||
|
log.Debugf("logging_to_file updated from %t to %t", oldCfg.LoggingToFile, cfg.LoggingToFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldCfg == nil || oldCfg.UsageStatisticsEnabled != cfg.UsageStatisticsEnabled {
|
||||||
|
usage.SetStatisticsEnabled(cfg.UsageStatisticsEnabled)
|
||||||
|
if oldCfg != nil {
|
||||||
|
log.Debugf("usage_statistics_enabled updated from %t to %t", oldCfg.UsageStatisticsEnabled, cfg.UsageStatisticsEnabled)
|
||||||
|
} else {
|
||||||
|
log.Debugf("usage_statistics_enabled toggled to %t", cfg.UsageStatisticsEnabled)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update log level dynamically when debug flag changes
|
// Update log level dynamically when debug flag changes
|
||||||
if s.cfg.Debug != cfg.Debug {
|
if oldCfg == nil || oldCfg.Debug != cfg.Debug {
|
||||||
util.SetLogLevel(cfg)
|
util.SetLogLevel(cfg)
|
||||||
log.Debugf("debug mode updated from %t to %t", s.cfg.Debug, cfg.Debug)
|
if oldCfg != nil {
|
||||||
|
log.Debugf("debug mode updated from %t to %t", oldCfg.Debug, cfg.Debug)
|
||||||
|
} else {
|
||||||
|
log.Debugf("debug mode toggled to %t", cfg.Debug)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
prevSecretEmpty := true
|
||||||
|
if oldCfg != nil {
|
||||||
|
prevSecretEmpty = oldCfg.RemoteManagement.SecretKey == ""
|
||||||
|
}
|
||||||
|
newSecretEmpty := cfg.RemoteManagement.SecretKey == ""
|
||||||
|
switch {
|
||||||
|
case prevSecretEmpty && !newSecretEmpty:
|
||||||
|
s.registerManagementRoutes()
|
||||||
|
if s.managementRoutesEnabled.CompareAndSwap(false, true) {
|
||||||
|
log.Info("management routes enabled after secret key update")
|
||||||
|
} else {
|
||||||
|
s.managementRoutesEnabled.Store(true)
|
||||||
|
}
|
||||||
|
case !prevSecretEmpty && newSecretEmpty:
|
||||||
|
if s.managementRoutesEnabled.CompareAndSwap(true, false) {
|
||||||
|
log.Info("management routes disabled after secret key removal")
|
||||||
|
} else {
|
||||||
|
s.managementRoutesEnabled.Store(false)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
s.managementRoutesEnabled.Store(!newSecretEmpty)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.applyAccessConfig(oldCfg, cfg)
|
||||||
s.cfg = cfg
|
s.cfg = cfg
|
||||||
s.handlers.UpdateClients(cfg)
|
s.handlers.UpdateClients(&cfg.SDKConfig)
|
||||||
|
|
||||||
|
if !cfg.RemoteManagement.DisableControlPanel {
|
||||||
|
staticDir := managementasset.StaticDir(s.configFilePath)
|
||||||
|
go managementasset.EnsureLatestManagementHTML(context.Background(), staticDir, cfg.ProxyURL)
|
||||||
|
}
|
||||||
if s.mgmt != nil {
|
if s.mgmt != nil {
|
||||||
s.mgmt.SetConfig(cfg)
|
s.mgmt.SetConfig(cfg)
|
||||||
s.mgmt.SetAuthManager(s.handlers.AuthManager)
|
s.mgmt.SetAuthManager(s.handlers.AuthManager)
|
||||||
}
|
}
|
||||||
s.applyAccessConfig(cfg)
|
|
||||||
|
|
||||||
// Count client sources from configuration and auth directory
|
// Count client sources from configuration and auth directory
|
||||||
authFiles := util.CountAuthFiles(cfg.AuthDir)
|
authFiles := util.CountAuthFiles(cfg.AuthDir)
|
||||||
@@ -473,11 +730,16 @@ func (s *Server) UpdateClients(cfg *config.Config) {
|
|||||||
codexAPIKeyCount := len(cfg.CodexKey)
|
codexAPIKeyCount := len(cfg.CodexKey)
|
||||||
openAICompatCount := 0
|
openAICompatCount := 0
|
||||||
for i := range cfg.OpenAICompatibility {
|
for i := range cfg.OpenAICompatibility {
|
||||||
openAICompatCount += len(cfg.OpenAICompatibility[i].APIKeys)
|
entry := cfg.OpenAICompatibility[i]
|
||||||
|
if len(entry.APIKeyEntries) > 0 {
|
||||||
|
openAICompatCount += len(entry.APIKeyEntries)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
openAICompatCount += len(entry.APIKeys)
|
||||||
}
|
}
|
||||||
|
|
||||||
total := authFiles + glAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + openAICompatCount
|
total := authFiles + glAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + openAICompatCount
|
||||||
log.Infof("server clients and configuration updated: %d clients (%d auth files + %d GL API keys + %d Claude API keys + %d Codex keys + %d OpenAI-compat)",
|
fmt.Printf("server clients and configuration updated: %d clients (%d auth files + %d GL API keys + %d Claude API keys + %d Codex keys + %d OpenAI-compat)\n",
|
||||||
total,
|
total,
|
||||||
authFiles,
|
authFiles,
|
||||||
glAPIKeyCount,
|
glAPIKeyCount,
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ type ClaudeAuth struct {
|
|||||||
// - *ClaudeAuth: A new Claude authentication service instance
|
// - *ClaudeAuth: A new Claude authentication service instance
|
||||||
func NewClaudeAuth(cfg *config.Config) *ClaudeAuth {
|
func NewClaudeAuth(cfg *config.Config) *ClaudeAuth {
|
||||||
return &ClaudeAuth{
|
return &ClaudeAuth{
|
||||||
httpClient: util.SetProxy(cfg, &http.Client{}),
|
httpClient: util.SetProxy(&cfg.SDKConfig, &http.Client{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ type CodexAuth struct {
|
|||||||
// It initializes an HTTP client with proxy settings from the provided configuration.
|
// It initializes an HTTP client with proxy settings from the provided configuration.
|
||||||
func NewCodexAuth(cfg *config.Config) *CodexAuth {
|
func NewCodexAuth(cfg *config.Config) *CodexAuth {
|
||||||
return &CodexAuth{
|
return &CodexAuth{
|
||||||
httpClient: util.SetProxy(cfg, &http.Client{}),
|
httpClient: util.SetProxy(&cfg.SDKConfig, &http.Client{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ func (g *GeminiAuth) GetAuthenticatedClient(ctx context.Context, ts *GeminiToken
|
|||||||
|
|
||||||
// If no token is found in storage, initiate the web-based OAuth flow.
|
// If no token is found in storage, initiate the web-based OAuth flow.
|
||||||
if ts.Token == nil {
|
if ts.Token == nil {
|
||||||
log.Info("Could not load token from file, starting OAuth flow.")
|
fmt.Printf("Could not load token from file, starting OAuth flow.\n")
|
||||||
token, err = g.getTokenFromWeb(ctx, conf, noBrowser...)
|
token, err = g.getTokenFromWeb(ctx, conf, noBrowser...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get token from web: %w", err)
|
return nil, fmt.Errorf("failed to get token from web: %w", err)
|
||||||
@@ -169,9 +169,9 @@ func (g *GeminiAuth) createTokenStorage(ctx context.Context, config *oauth2.Conf
|
|||||||
|
|
||||||
emailResult := gjson.GetBytes(bodyBytes, "email")
|
emailResult := gjson.GetBytes(bodyBytes, "email")
|
||||||
if emailResult.Exists() && emailResult.Type == gjson.String {
|
if emailResult.Exists() && emailResult.Type == gjson.String {
|
||||||
log.Infof("Authenticated user email: %s", emailResult.String())
|
fmt.Printf("Authenticated user email: %s\n", emailResult.String())
|
||||||
} else {
|
} else {
|
||||||
log.Info("Failed to get user email from token")
|
fmt.Println("Failed to get user email from token")
|
||||||
}
|
}
|
||||||
|
|
||||||
var ifToken map[string]any
|
var ifToken map[string]any
|
||||||
@@ -246,19 +246,19 @@ func (g *GeminiAuth) getTokenFromWeb(ctx context.Context, config *oauth2.Config,
|
|||||||
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", "consent"))
|
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", "consent"))
|
||||||
|
|
||||||
if len(noBrowser) == 1 && !noBrowser[0] {
|
if len(noBrowser) == 1 && !noBrowser[0] {
|
||||||
log.Info("Opening browser for authentication...")
|
fmt.Println("Opening browser for authentication...")
|
||||||
|
|
||||||
// Check if browser is available
|
// Check if browser is available
|
||||||
if !browser.IsAvailable() {
|
if !browser.IsAvailable() {
|
||||||
log.Warn("No browser available on this system")
|
log.Warn("No browser available on this system")
|
||||||
util.PrintSSHTunnelInstructions(8085)
|
util.PrintSSHTunnelInstructions(8085)
|
||||||
log.Infof("Please manually open this URL in your browser:\n\n%s\n", authURL)
|
fmt.Printf("Please manually open this URL in your browser:\n\n%s\n", authURL)
|
||||||
} else {
|
} else {
|
||||||
if err := browser.OpenURL(authURL); err != nil {
|
if err := browser.OpenURL(authURL); err != nil {
|
||||||
authErr := codex.NewAuthenticationError(codex.ErrBrowserOpenFailed, err)
|
authErr := codex.NewAuthenticationError(codex.ErrBrowserOpenFailed, err)
|
||||||
log.Warn(codex.GetUserFriendlyMessage(authErr))
|
log.Warn(codex.GetUserFriendlyMessage(authErr))
|
||||||
util.PrintSSHTunnelInstructions(8085)
|
util.PrintSSHTunnelInstructions(8085)
|
||||||
log.Infof("Please manually open this URL in your browser:\n\n%s\n", authURL)
|
fmt.Printf("Please manually open this URL in your browser:\n\n%s\n", authURL)
|
||||||
|
|
||||||
// Log platform info for debugging
|
// Log platform info for debugging
|
||||||
platformInfo := browser.GetPlatformInfo()
|
platformInfo := browser.GetPlatformInfo()
|
||||||
@@ -269,10 +269,10 @@ func (g *GeminiAuth) getTokenFromWeb(ctx context.Context, config *oauth2.Config,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
util.PrintSSHTunnelInstructions(8085)
|
util.PrintSSHTunnelInstructions(8085)
|
||||||
log.Infof("Please open this URL in your browser:\n\n%s\n", authURL)
|
fmt.Printf("Please open this URL in your browser:\n\n%s\n", authURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Waiting for authentication callback...")
|
fmt.Println("Waiting for authentication callback...")
|
||||||
|
|
||||||
// Wait for the authorization code or an error.
|
// Wait for the authorization code or an error.
|
||||||
var authCode string
|
var authCode string
|
||||||
@@ -296,6 +296,6 @@ func (g *GeminiAuth) getTokenFromWeb(ctx context.Context, config *oauth2.Config,
|
|||||||
return nil, fmt.Errorf("failed to exchange token: %w", err)
|
return nil, fmt.Errorf("failed to exchange token: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Authentication successful.")
|
fmt.Println("Authentication successful.")
|
||||||
return token, nil
|
return token, nil
|
||||||
}
|
}
|
||||||
|
|||||||
275
internal/auth/iflow/iflow_auth.go
Normal file
275
internal/auth/iflow/iflow_auth.go
Normal file
@@ -0,0 +1,275 @@
|
|||||||
|
package iflow
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// OAuth endpoints and client metadata are derived from the reference Python implementation.
|
||||||
|
iFlowOAuthTokenEndpoint = "https://iflow.cn/oauth/token"
|
||||||
|
iFlowOAuthAuthorizeEndpoint = "https://iflow.cn/oauth"
|
||||||
|
iFlowUserInfoEndpoint = "https://iflow.cn/api/oauth/getUserInfo"
|
||||||
|
iFlowSuccessRedirectURL = "https://iflow.cn/oauth/success"
|
||||||
|
|
||||||
|
// Client credentials provided by iFlow for the Code Assist integration.
|
||||||
|
iFlowOAuthClientID = "10009311001"
|
||||||
|
iFlowOAuthClientSecret = "4Z3YjXycVsQvyGF1etiNlIBB4RsqSDtW"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultAPIBaseURL is the canonical chat completions endpoint.
|
||||||
|
const DefaultAPIBaseURL = "https://apis.iflow.cn/v1"
|
||||||
|
|
||||||
|
// SuccessRedirectURL is exposed for consumers needing the official success page.
|
||||||
|
const SuccessRedirectURL = iFlowSuccessRedirectURL
|
||||||
|
|
||||||
|
// CallbackPort defines the local port used for OAuth callbacks.
|
||||||
|
const CallbackPort = 11451
|
||||||
|
|
||||||
|
// IFlowAuth encapsulates the HTTP client helpers for the OAuth flow.
|
||||||
|
type IFlowAuth struct {
|
||||||
|
httpClient *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIFlowAuth constructs a new IFlowAuth with proxy-aware transport.
|
||||||
|
func NewIFlowAuth(cfg *config.Config) *IFlowAuth {
|
||||||
|
client := &http.Client{Timeout: 30 * time.Second}
|
||||||
|
return &IFlowAuth{httpClient: util.SetProxy(&cfg.SDKConfig, client)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthorizationURL builds the authorization URL and matching redirect URI.
|
||||||
|
func (ia *IFlowAuth) AuthorizationURL(state string, port int) (authURL, redirectURI string) {
|
||||||
|
redirectURI = fmt.Sprintf("http://localhost:%d/oauth2callback", port)
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("loginMethod", "phone")
|
||||||
|
values.Set("type", "phone")
|
||||||
|
values.Set("redirect", redirectURI)
|
||||||
|
values.Set("state", state)
|
||||||
|
values.Set("client_id", iFlowOAuthClientID)
|
||||||
|
authURL = fmt.Sprintf("%s?%s", iFlowOAuthAuthorizeEndpoint, values.Encode())
|
||||||
|
return authURL, redirectURI
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExchangeCodeForTokens exchanges an authorization code for access and refresh tokens.
|
||||||
|
func (ia *IFlowAuth) ExchangeCodeForTokens(ctx context.Context, code, redirectURI string) (*IFlowTokenData, error) {
|
||||||
|
form := url.Values{}
|
||||||
|
form.Set("grant_type", "authorization_code")
|
||||||
|
form.Set("code", code)
|
||||||
|
form.Set("redirect_uri", redirectURI)
|
||||||
|
form.Set("client_id", iFlowOAuthClientID)
|
||||||
|
form.Set("client_secret", iFlowOAuthClientSecret)
|
||||||
|
|
||||||
|
req, err := ia.newTokenRequest(ctx, form)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ia.doTokenRequest(ctx, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshTokens exchanges a refresh token for a new access token.
|
||||||
|
func (ia *IFlowAuth) RefreshTokens(ctx context.Context, refreshToken string) (*IFlowTokenData, error) {
|
||||||
|
form := url.Values{}
|
||||||
|
form.Set("grant_type", "refresh_token")
|
||||||
|
form.Set("refresh_token", refreshToken)
|
||||||
|
form.Set("client_id", iFlowOAuthClientID)
|
||||||
|
form.Set("client_secret", iFlowOAuthClientSecret)
|
||||||
|
|
||||||
|
req, err := ia.newTokenRequest(ctx, form)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ia.doTokenRequest(ctx, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ia *IFlowAuth) newTokenRequest(ctx context.Context, form url.Values) (*http.Request, error) {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, iFlowOAuthTokenEndpoint, strings.NewReader(form.Encode()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow token: create request failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
basic := base64.StdEncoding.EncodeToString([]byte(iFlowOAuthClientID + ":" + iFlowOAuthClientSecret))
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
req.Header.Set("Accept", "application/json")
|
||||||
|
req.Header.Set("Authorization", "Basic "+basic)
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ia *IFlowAuth) doTokenRequest(ctx context.Context, req *http.Request) (*IFlowTokenData, error) {
|
||||||
|
resp, err := ia.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow token: request failed: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = resp.Body.Close() }()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow token: read response failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
log.Debugf("iflow token request failed: status=%d body=%s", resp.StatusCode, string(body))
|
||||||
|
return nil, fmt.Errorf("iflow token: %d %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var tokenResp IFlowTokenResponse
|
||||||
|
if err = json.Unmarshal(body, &tokenResp); err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow token: decode response failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := &IFlowTokenData{
|
||||||
|
AccessToken: tokenResp.AccessToken,
|
||||||
|
RefreshToken: tokenResp.RefreshToken,
|
||||||
|
TokenType: tokenResp.TokenType,
|
||||||
|
Scope: tokenResp.Scope,
|
||||||
|
Expire: time.Now().Add(time.Duration(tokenResp.ExpiresIn) * time.Second).Format(time.RFC3339),
|
||||||
|
}
|
||||||
|
|
||||||
|
if tokenResp.AccessToken == "" {
|
||||||
|
return nil, fmt.Errorf("iflow token: missing access token in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
info, errAPI := ia.FetchUserInfo(ctx, tokenResp.AccessToken)
|
||||||
|
if errAPI != nil {
|
||||||
|
return nil, fmt.Errorf("iflow token: fetch user info failed: %w", errAPI)
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(info.APIKey) == "" {
|
||||||
|
return nil, fmt.Errorf("iflow token: empty api key returned")
|
||||||
|
}
|
||||||
|
email := strings.TrimSpace(info.Email)
|
||||||
|
if email == "" {
|
||||||
|
email = strings.TrimSpace(info.Phone)
|
||||||
|
}
|
||||||
|
if email == "" {
|
||||||
|
return nil, fmt.Errorf("iflow token: missing account email/phone in user info")
|
||||||
|
}
|
||||||
|
data.APIKey = info.APIKey
|
||||||
|
data.Email = email
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchUserInfo retrieves account metadata (including API key) for the provided access token.
|
||||||
|
func (ia *IFlowAuth) FetchUserInfo(ctx context.Context, accessToken string) (*userInfoData, error) {
|
||||||
|
if strings.TrimSpace(accessToken) == "" {
|
||||||
|
return nil, fmt.Errorf("iflow api key: access token is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint := fmt.Sprintf("%s?accessToken=%s", iFlowUserInfoEndpoint, url.QueryEscape(accessToken))
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow api key: create request failed: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Accept", "application/json")
|
||||||
|
|
||||||
|
resp, err := ia.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow api key: request failed: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = resp.Body.Close() }()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow api key: read response failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
log.Debugf("iflow api key failed: status=%d body=%s", resp.StatusCode, string(body))
|
||||||
|
return nil, fmt.Errorf("iflow api key: %d %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result userInfoResponse
|
||||||
|
if err = json.Unmarshal(body, &result); err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow api key: decode body failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.Success {
|
||||||
|
return nil, fmt.Errorf("iflow api key: request not successful")
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.Data.APIKey == "" {
|
||||||
|
return nil, fmt.Errorf("iflow api key: missing api key in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result.Data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTokenStorage converts token data into persistence storage.
|
||||||
|
func (ia *IFlowAuth) CreateTokenStorage(data *IFlowTokenData) *IFlowTokenStorage {
|
||||||
|
if data == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &IFlowTokenStorage{
|
||||||
|
AccessToken: data.AccessToken,
|
||||||
|
RefreshToken: data.RefreshToken,
|
||||||
|
LastRefresh: time.Now().Format(time.RFC3339),
|
||||||
|
Expire: data.Expire,
|
||||||
|
APIKey: data.APIKey,
|
||||||
|
Email: data.Email,
|
||||||
|
TokenType: data.TokenType,
|
||||||
|
Scope: data.Scope,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTokenStorage updates the persisted token storage with latest token data.
|
||||||
|
func (ia *IFlowAuth) UpdateTokenStorage(storage *IFlowTokenStorage, data *IFlowTokenData) {
|
||||||
|
if storage == nil || data == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
storage.AccessToken = data.AccessToken
|
||||||
|
storage.RefreshToken = data.RefreshToken
|
||||||
|
storage.LastRefresh = time.Now().Format(time.RFC3339)
|
||||||
|
storage.Expire = data.Expire
|
||||||
|
if data.APIKey != "" {
|
||||||
|
storage.APIKey = data.APIKey
|
||||||
|
}
|
||||||
|
if data.Email != "" {
|
||||||
|
storage.Email = data.Email
|
||||||
|
}
|
||||||
|
storage.TokenType = data.TokenType
|
||||||
|
storage.Scope = data.Scope
|
||||||
|
}
|
||||||
|
|
||||||
|
// IFlowTokenResponse models the OAuth token endpoint response.
|
||||||
|
type IFlowTokenResponse struct {
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
ExpiresIn int `json:"expires_in"`
|
||||||
|
TokenType string `json:"token_type"`
|
||||||
|
Scope string `json:"scope"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IFlowTokenData captures processed token details.
|
||||||
|
type IFlowTokenData struct {
|
||||||
|
AccessToken string
|
||||||
|
RefreshToken string
|
||||||
|
TokenType string
|
||||||
|
Scope string
|
||||||
|
Expire string
|
||||||
|
APIKey string
|
||||||
|
Email string
|
||||||
|
}
|
||||||
|
|
||||||
|
// userInfoResponse represents the structure returned by the user info endpoint.
|
||||||
|
type userInfoResponse struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Data userInfoData `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type userInfoData struct {
|
||||||
|
APIKey string `json:"apiKey"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
Phone string `json:"phone"`
|
||||||
|
}
|
||||||
43
internal/auth/iflow/iflow_token.go
Normal file
43
internal/auth/iflow/iflow_token.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
package iflow
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IFlowTokenStorage persists iFlow OAuth credentials alongside the derived API key.
|
||||||
|
type IFlowTokenStorage struct {
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
LastRefresh string `json:"last_refresh"`
|
||||||
|
Expire string `json:"expired"`
|
||||||
|
APIKey string `json:"api_key"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
TokenType string `json:"token_type"`
|
||||||
|
Scope string `json:"scope"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveTokenToFile serialises the token storage to disk.
|
||||||
|
func (ts *IFlowTokenStorage) SaveTokenToFile(authFilePath string) error {
|
||||||
|
misc.LogSavingCredentials(authFilePath)
|
||||||
|
ts.Type = "iflow"
|
||||||
|
if err := os.MkdirAll(filepath.Dir(authFilePath), 0o700); err != nil {
|
||||||
|
return fmt.Errorf("iflow token: create directory failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(authFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("iflow token: create file failed: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = f.Close() }()
|
||||||
|
|
||||||
|
if err = json.NewEncoder(f).Encode(ts); err != nil {
|
||||||
|
return fmt.Errorf("iflow token: encode token failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
143
internal/auth/iflow/oauth_server.go
Normal file
143
internal/auth/iflow/oauth_server.go
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
package iflow
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const errorRedirectURL = "https://iflow.cn/oauth/error"
|
||||||
|
|
||||||
|
// OAuthResult captures the outcome of the local OAuth callback.
|
||||||
|
type OAuthResult struct {
|
||||||
|
Code string
|
||||||
|
State string
|
||||||
|
Error string
|
||||||
|
}
|
||||||
|
|
||||||
|
// OAuthServer provides a minimal HTTP server for handling the iFlow OAuth callback.
|
||||||
|
type OAuthServer struct {
|
||||||
|
server *http.Server
|
||||||
|
port int
|
||||||
|
result chan *OAuthResult
|
||||||
|
errChan chan error
|
||||||
|
mu sync.Mutex
|
||||||
|
running bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewOAuthServer constructs a new OAuthServer bound to the provided port.
|
||||||
|
func NewOAuthServer(port int) *OAuthServer {
|
||||||
|
return &OAuthServer{
|
||||||
|
port: port,
|
||||||
|
result: make(chan *OAuthResult, 1),
|
||||||
|
errChan: make(chan error, 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start launches the callback listener.
|
||||||
|
func (s *OAuthServer) Start() error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if s.running {
|
||||||
|
return fmt.Errorf("iflow oauth server already running")
|
||||||
|
}
|
||||||
|
if !s.isPortAvailable() {
|
||||||
|
return fmt.Errorf("port %d is already in use", s.port)
|
||||||
|
}
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.HandleFunc("/oauth2callback", s.handleCallback)
|
||||||
|
|
||||||
|
s.server = &http.Server{
|
||||||
|
Addr: fmt.Sprintf(":%d", s.port),
|
||||||
|
Handler: mux,
|
||||||
|
ReadTimeout: 10 * time.Second,
|
||||||
|
WriteTimeout: 10 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.running = true
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||||
|
s.errChan <- err
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop gracefully terminates the callback listener.
|
||||||
|
func (s *OAuthServer) Stop(ctx context.Context) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if !s.running || s.server == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
s.running = false
|
||||||
|
s.server = nil
|
||||||
|
}()
|
||||||
|
return s.server.Shutdown(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForCallback blocks until a callback result, server error, or timeout occurs.
|
||||||
|
func (s *OAuthServer) WaitForCallback(timeout time.Duration) (*OAuthResult, error) {
|
||||||
|
select {
|
||||||
|
case res := <-s.result:
|
||||||
|
return res, nil
|
||||||
|
case err := <-s.errChan:
|
||||||
|
return nil, err
|
||||||
|
case <-time.After(timeout):
|
||||||
|
return nil, fmt.Errorf("timeout waiting for OAuth callback")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *OAuthServer) handleCallback(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Method != http.MethodGet {
|
||||||
|
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
query := r.URL.Query()
|
||||||
|
if errParam := strings.TrimSpace(query.Get("error")); errParam != "" {
|
||||||
|
s.sendResult(&OAuthResult{Error: errParam})
|
||||||
|
http.Redirect(w, r, errorRedirectURL, http.StatusFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
code := strings.TrimSpace(query.Get("code"))
|
||||||
|
if code == "" {
|
||||||
|
s.sendResult(&OAuthResult{Error: "missing_code"})
|
||||||
|
http.Redirect(w, r, errorRedirectURL, http.StatusFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
state := query.Get("state")
|
||||||
|
s.sendResult(&OAuthResult{Code: code, State: state})
|
||||||
|
http.Redirect(w, r, SuccessRedirectURL, http.StatusFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *OAuthServer) sendResult(res *OAuthResult) {
|
||||||
|
select {
|
||||||
|
case s.result <- res:
|
||||||
|
default:
|
||||||
|
log.Debug("iflow oauth result channel full, dropping result")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *OAuthServer) isPortAvailable() bool {
|
||||||
|
addr := fmt.Sprintf(":%d", s.port)
|
||||||
|
listener, err := net.Listen("tcp", addr)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_ = listener.Close()
|
||||||
|
return true
|
||||||
|
}
|
||||||
@@ -85,7 +85,7 @@ type QwenAuth struct {
|
|||||||
// NewQwenAuth creates a new QwenAuth instance with a proxy-configured HTTP client.
|
// NewQwenAuth creates a new QwenAuth instance with a proxy-configured HTTP client.
|
||||||
func NewQwenAuth(cfg *config.Config) *QwenAuth {
|
func NewQwenAuth(cfg *config.Config) *QwenAuth {
|
||||||
return &QwenAuth{
|
return &QwenAuth{
|
||||||
httpClient: util.SetProxy(cfg, &http.Client{}),
|
httpClient: util.SetProxy(&cfg.SDKConfig, &http.Client{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -260,7 +260,7 @@ func (qa *QwenAuth) PollForToken(deviceCode, codeVerifier string) (*QwenTokenDat
|
|||||||
switch errorType {
|
switch errorType {
|
||||||
case "authorization_pending":
|
case "authorization_pending":
|
||||||
// User has not yet approved the authorization request. Continue polling.
|
// User has not yet approved the authorization request. Continue polling.
|
||||||
log.Infof("Polling attempt %d/%d...\n", attempt+1, maxAttempts)
|
fmt.Printf("Polling attempt %d/%d...\n\n", attempt+1, maxAttempts)
|
||||||
time.Sleep(pollInterval)
|
time.Sleep(pollInterval)
|
||||||
continue
|
continue
|
||||||
case "slow_down":
|
case "slow_down":
|
||||||
@@ -269,7 +269,7 @@ func (qa *QwenAuth) PollForToken(deviceCode, codeVerifier string) (*QwenTokenDat
|
|||||||
if pollInterval > 10*time.Second {
|
if pollInterval > 10*time.Second {
|
||||||
pollInterval = 10 * time.Second
|
pollInterval = 10 * time.Second
|
||||||
}
|
}
|
||||||
log.Infof("Server requested to slow down, increasing poll interval to %v\n", pollInterval)
|
fmt.Printf("Server requested to slow down, increasing poll interval to %v\n\n", pollInterval)
|
||||||
time.Sleep(pollInterval)
|
time.Sleep(pollInterval)
|
||||||
continue
|
continue
|
||||||
case "expired_token":
|
case "expired_token":
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import (
|
|||||||
// Returns:
|
// Returns:
|
||||||
// - An error if the URL cannot be opened, otherwise nil.
|
// - An error if the URL cannot be opened, otherwise nil.
|
||||||
func OpenURL(url string) error {
|
func OpenURL(url string) error {
|
||||||
log.Infof("Attempting to open URL in browser: %s", url)
|
fmt.Printf("Attempting to open URL in browser: %s\n", url)
|
||||||
|
|
||||||
// Try using the open-golang library first
|
// Try using the open-golang library first
|
||||||
err := open.Run(url)
|
err := open.Run(url)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ func newAuthManager() *sdkAuth.Manager {
|
|||||||
sdkAuth.NewCodexAuthenticator(),
|
sdkAuth.NewCodexAuthenticator(),
|
||||||
sdkAuth.NewClaudeAuthenticator(),
|
sdkAuth.NewClaudeAuthenticator(),
|
||||||
sdkAuth.NewQwenAuthenticator(),
|
sdkAuth.NewQwenAuthenticator(),
|
||||||
|
sdkAuth.NewIFlowAuthenticator(),
|
||||||
)
|
)
|
||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,42 +6,147 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||||
log "github.com/sirupsen/logrus"
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// banner prints a simple ASCII banner for clarity without ANSI colors.
|
||||||
|
func banner(title string) {
|
||||||
|
line := strings.Repeat("=", len(title)+8)
|
||||||
|
fmt.Println(line)
|
||||||
|
fmt.Println("=== " + title + " ===")
|
||||||
|
fmt.Println(line)
|
||||||
|
}
|
||||||
|
|
||||||
// DoGeminiWebAuth handles the process of creating a Gemini Web token file.
|
// DoGeminiWebAuth handles the process of creating a Gemini Web token file.
|
||||||
// It prompts the user for their cookie values and saves them to a JSON file.
|
// New flow:
|
||||||
|
// 1. Prompt user to paste the full cookie string.
|
||||||
|
// 2. Extract __Secure-1PSID and __Secure-1PSIDTS from the cookie string.
|
||||||
|
// 3. Call https://accounts.google.com/ListAccounts with the cookie to obtain email.
|
||||||
|
// 4. Save auth file with the same structure, and set Label to the email.
|
||||||
func DoGeminiWebAuth(cfg *config.Config) {
|
func DoGeminiWebAuth(cfg *config.Config) {
|
||||||
|
var secure1psid, secure1psidts, email string
|
||||||
|
|
||||||
reader := bufio.NewReader(os.Stdin)
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
isMacOS := strings.HasPrefix(runtime.GOOS, "darwin")
|
||||||
|
cookieProvided := false
|
||||||
|
banner("Gemini Web Cookie Sign-in")
|
||||||
|
if !isMacOS {
|
||||||
|
// NOTE: Provide extra guidance for macOS users or anyone unsure about retrieving cookies.
|
||||||
|
fmt.Println("--- Cookie Input ---")
|
||||||
|
fmt.Println(">> Paste your full Google Cookie and press Enter")
|
||||||
|
fmt.Println("Tip: If you are on macOS, or don't know how to get the cookie, just press Enter and follow the prompts.")
|
||||||
|
fmt.Print("Cookie: ")
|
||||||
|
rawCookie, _ := reader.ReadString('\n')
|
||||||
|
rawCookie = strings.TrimSpace(rawCookie)
|
||||||
|
if rawCookie == "" {
|
||||||
|
// Skip cookie-based parsing; fall back to manual field prompts.
|
||||||
|
fmt.Println("==> No cookie provided. Proceeding with manual input.")
|
||||||
|
} else {
|
||||||
|
cookieProvided = true
|
||||||
|
// Parse K=V cookie pairs separated by ';'
|
||||||
|
cookieMap := make(map[string]string)
|
||||||
|
parts := strings.Split(rawCookie, ";")
|
||||||
|
for _, p := range parts {
|
||||||
|
p = strings.TrimSpace(p)
|
||||||
|
if p == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if eq := strings.Index(p, "="); eq > 0 {
|
||||||
|
k := strings.TrimSpace(p[:eq])
|
||||||
|
v := strings.TrimSpace(p[eq+1:])
|
||||||
|
if k != "" {
|
||||||
|
cookieMap[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
secure1psid = strings.TrimSpace(cookieMap["__Secure-1PSID"])
|
||||||
|
secure1psidts = strings.TrimSpace(cookieMap["__Secure-1PSIDTS"])
|
||||||
|
|
||||||
fmt.Print("Enter your __Secure-1PSID cookie value: ")
|
// Build HTTP client with proxy settings respected.
|
||||||
secure1psid, _ := reader.ReadString('\n')
|
httpClient := &http.Client{Timeout: 15 * time.Second}
|
||||||
secure1psid = strings.TrimSpace(secure1psid)
|
httpClient = util.SetProxy(&cfg.SDKConfig, httpClient)
|
||||||
|
|
||||||
|
// Request ListAccounts to extract email as label (use POST per upstream behavior).
|
||||||
|
req, err := http.NewRequest(http.MethodPost, "https://accounts.google.com/ListAccounts", nil)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("!! Failed to create request:", err)
|
||||||
|
} else {
|
||||||
|
req.Header.Set("Cookie", rawCookie)
|
||||||
|
req.Header.Set("Accept", "application/json, text/plain, */*")
|
||||||
|
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36")
|
||||||
|
req.Header.Set("Origin", "https://accounts.google.com")
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded;charset=UTF-8")
|
||||||
|
|
||||||
|
resp, errDo := httpClient.Do(req)
|
||||||
|
if errDo != nil {
|
||||||
|
fmt.Println("!! Request to ListAccounts failed:", err)
|
||||||
|
} else {
|
||||||
|
defer func() { _ = resp.Body.Close() }()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
fmt.Printf("!! ListAccounts returned status code: %d\n", resp.StatusCode)
|
||||||
|
} else {
|
||||||
|
var payload []any
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&payload); err != nil {
|
||||||
|
fmt.Println("!! Failed to parse ListAccounts response:", err)
|
||||||
|
} else {
|
||||||
|
// Expected structure like: ["gaia.l.a.r", [["gaia.l.a",1,"Name","email@example.com", ... ]]]
|
||||||
|
if len(payload) >= 2 {
|
||||||
|
if accounts, ok := payload[1].([]any); ok && len(accounts) >= 1 {
|
||||||
|
if first, ok1 := accounts[0].([]any); ok1 && len(first) >= 4 {
|
||||||
|
if em, ok2 := first[3].(string); ok2 {
|
||||||
|
email = strings.TrimSpace(em)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if email == "" {
|
||||||
|
fmt.Println("!! Failed to parse email from ListAccounts response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: prompt user to input missing values
|
||||||
if secure1psid == "" {
|
if secure1psid == "" {
|
||||||
log.Fatal("The __Secure-1PSID value cannot be empty.")
|
if cookieProvided && !isMacOS {
|
||||||
return
|
fmt.Println("!! Cookie missing __Secure-1PSID.")
|
||||||
|
}
|
||||||
|
fmt.Print("Enter __Secure-1PSID: ")
|
||||||
|
v, _ := reader.ReadString('\n')
|
||||||
|
secure1psid = strings.TrimSpace(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Print("Enter your __Secure-1PSIDTS cookie value: ")
|
|
||||||
secure1psidts, _ := reader.ReadString('\n')
|
|
||||||
secure1psidts = strings.TrimSpace(secure1psidts)
|
|
||||||
|
|
||||||
if secure1psidts == "" {
|
if secure1psidts == "" {
|
||||||
fmt.Println("The __Secure-1PSIDTS value cannot be empty.")
|
if cookieProvided && !isMacOS {
|
||||||
|
fmt.Println("!! Cookie missing __Secure-1PSIDTS.")
|
||||||
|
}
|
||||||
|
fmt.Print("Enter __Secure-1PSIDTS: ")
|
||||||
|
v, _ := reader.ReadString('\n')
|
||||||
|
secure1psidts = strings.TrimSpace(v)
|
||||||
|
}
|
||||||
|
if secure1psid == "" || secure1psidts == "" {
|
||||||
|
// Use print instead of logger to avoid log redirection.
|
||||||
|
fmt.Println("!! __Secure-1PSID and __Secure-1PSIDTS cannot be empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if isMacOS {
|
||||||
tokenStorage := &gemini.GeminiWebTokenStorage{
|
fmt.Print("Enter your account email: ")
|
||||||
Secure1PSID: secure1psid,
|
v, _ := reader.ReadString('\n')
|
||||||
Secure1PSIDTS: secure1psidts,
|
email = strings.TrimSpace(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate a filename based on the SHA256 hash of the PSID
|
// Generate a filename based on the SHA256 hash of the PSID
|
||||||
@@ -49,21 +154,44 @@ func DoGeminiWebAuth(cfg *config.Config) {
|
|||||||
hasher.Write([]byte(secure1psid))
|
hasher.Write([]byte(secure1psid))
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
fileName := fmt.Sprintf("gemini-web-%s.json", hash[:16])
|
fileName := fmt.Sprintf("gemini-web-%s.json", hash[:16])
|
||||||
// Set a stable label for logging, e.g. gemini-web-<hash>
|
|
||||||
if tokenStorage != nil {
|
// Decide label: prefer email; fallback prompt then file name without .json
|
||||||
tokenStorage.Label = strings.TrimSuffix(fileName, ".json")
|
defaultLabel := strings.TrimSuffix(fileName, ".json")
|
||||||
|
label := email
|
||||||
|
if label == "" {
|
||||||
|
fmt.Print(fmt.Sprintf("Enter label for this auth (default: %s): ", defaultLabel))
|
||||||
|
v, _ := reader.ReadString('\n')
|
||||||
|
v = strings.TrimSpace(v)
|
||||||
|
if v != "" {
|
||||||
|
label = v
|
||||||
|
} else {
|
||||||
|
label = defaultLabel
|
||||||
}
|
}
|
||||||
record := &sdkAuth.TokenRecord{
|
}
|
||||||
|
|
||||||
|
tokenStorage := &gemini.GeminiWebTokenStorage{
|
||||||
|
Secure1PSID: secure1psid,
|
||||||
|
Secure1PSIDTS: secure1psidts,
|
||||||
|
Label: label,
|
||||||
|
}
|
||||||
|
record := &coreauth.Auth{
|
||||||
|
ID: fileName,
|
||||||
Provider: "gemini-web",
|
Provider: "gemini-web",
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
Storage: tokenStorage,
|
Storage: tokenStorage,
|
||||||
}
|
}
|
||||||
store := sdkAuth.GetTokenStore()
|
store := sdkAuth.GetTokenStore()
|
||||||
savedPath, err := store.Save(context.Background(), cfg, record)
|
if cfg != nil {
|
||||||
|
if dirSetter, ok := store.(interface{ SetBaseDir(string) }); ok {
|
||||||
|
dirSetter.SetBaseDir(cfg.AuthDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
savedPath, err := store.Save(context.Background(), record)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed to save Gemini Web token to file: %v\n", err)
|
fmt.Println("!! Failed to save Gemini Web token to file:", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Successfully saved Gemini Web token to: %s\n", savedPath)
|
fmt.Println("==> Successfully saved Gemini Web token!")
|
||||||
|
fmt.Println("==> Saved to:", savedPath)
|
||||||
}
|
}
|
||||||
|
|||||||
54
internal/cmd/iflow_login.go
Normal file
54
internal/cmd/iflow_login.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DoIFlowLogin performs the iFlow OAuth login via the shared authentication manager.
|
||||||
|
func DoIFlowLogin(cfg *config.Config, options *LoginOptions) {
|
||||||
|
if options == nil {
|
||||||
|
options = &LoginOptions{}
|
||||||
|
}
|
||||||
|
|
||||||
|
manager := newAuthManager()
|
||||||
|
|
||||||
|
promptFn := options.Prompt
|
||||||
|
if promptFn == nil {
|
||||||
|
promptFn = func(prompt string) (string, error) {
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println(prompt)
|
||||||
|
var value string
|
||||||
|
_, err := fmt.Scanln(&value)
|
||||||
|
return value, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
authOpts := &sdkAuth.LoginOptions{
|
||||||
|
NoBrowser: options.NoBrowser,
|
||||||
|
Metadata: map[string]string{},
|
||||||
|
Prompt: promptFn,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, savedPath, err := manager.Login(context.Background(), "iflow", cfg, authOpts)
|
||||||
|
if err != nil {
|
||||||
|
var emailErr *sdkAuth.EmailRequiredError
|
||||||
|
if errors.As(err, &emailErr) {
|
||||||
|
log.Error(emailErr.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Printf("iFlow authentication failed: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if savedPath != "" {
|
||||||
|
fmt.Printf("Authentication saved to %s\n", savedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("iFlow authentication successful!")
|
||||||
|
}
|
||||||
@@ -4,18 +4,45 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||||
|
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/tidwall/gjson"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
geminiCLIEndpoint = "https://cloudcode-pa.googleapis.com"
|
||||||
|
geminiCLIVersion = "v1internal"
|
||||||
|
geminiCLIUserAgent = "google-api-nodejs-client/9.15.1"
|
||||||
|
geminiCLIApiClient = "gl-node/22.17.0"
|
||||||
|
geminiCLIClientMetadata = "ideType=IDE_UNSPECIFIED,platform=PLATFORM_UNSPECIFIED,pluginType=GEMINI"
|
||||||
|
)
|
||||||
|
|
||||||
|
type projectSelectionRequiredError struct{}
|
||||||
|
|
||||||
|
func (e *projectSelectionRequiredError) Error() string {
|
||||||
|
return "gemini cli: project selection required"
|
||||||
|
}
|
||||||
|
|
||||||
// DoLogin handles Google Gemini authentication using the shared authentication manager.
|
// DoLogin handles Google Gemini authentication using the shared authentication manager.
|
||||||
// It initiates the OAuth flow for Google Gemini services and saves the authentication
|
// It initiates the OAuth flow for Google Gemini services, performs the legacy CLI user setup,
|
||||||
// tokens to the configured auth directory.
|
// and saves the authentication tokens to the configured auth directory.
|
||||||
//
|
//
|
||||||
// Parameters:
|
// Parameters:
|
||||||
// - cfg: The application configuration
|
// - cfg: The application configuration
|
||||||
@@ -26,26 +53,369 @@ func DoLogin(cfg *config.Config, projectID string, options *LoginOptions) {
|
|||||||
options = &LoginOptions{}
|
options = &LoginOptions{}
|
||||||
}
|
}
|
||||||
|
|
||||||
manager := newAuthManager()
|
ctx := context.Background()
|
||||||
|
|
||||||
metadata := map[string]string{}
|
loginOpts := &sdkAuth.LoginOptions{
|
||||||
if projectID != "" {
|
|
||||||
metadata["project_id"] = projectID
|
|
||||||
}
|
|
||||||
|
|
||||||
authOpts := &sdkAuth.LoginOptions{
|
|
||||||
NoBrowser: options.NoBrowser,
|
NoBrowser: options.NoBrowser,
|
||||||
ProjectID: projectID,
|
ProjectID: strings.TrimSpace(projectID),
|
||||||
Metadata: metadata,
|
Metadata: map[string]string{},
|
||||||
Prompt: options.Prompt,
|
Prompt: options.Prompt,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, savedPath, err := manager.Login(context.Background(), "gemini", cfg, authOpts)
|
authenticator := sdkAuth.NewGeminiAuthenticator()
|
||||||
if err != nil {
|
record, errLogin := authenticator.Login(ctx, cfg, loginOpts)
|
||||||
var selectionErr *sdkAuth.ProjectSelectionError
|
if errLogin != nil {
|
||||||
if errors.As(err, &selectionErr) {
|
log.Fatalf("Gemini authentication failed: %v", errLogin)
|
||||||
fmt.Println(selectionErr.Error())
|
return
|
||||||
projects := selectionErr.ProjectsDisplay()
|
}
|
||||||
|
|
||||||
|
storage, okStorage := record.Storage.(*gemini.GeminiTokenStorage)
|
||||||
|
if !okStorage || storage == nil {
|
||||||
|
log.Fatal("Gemini authentication failed: unsupported token storage")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
geminiAuth := gemini.NewGeminiAuth()
|
||||||
|
httpClient, errClient := geminiAuth.GetAuthenticatedClient(ctx, storage, cfg, options.NoBrowser)
|
||||||
|
if errClient != nil {
|
||||||
|
log.Fatalf("Gemini authentication failed: %v", errClient)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Authentication successful.")
|
||||||
|
|
||||||
|
projects, errProjects := fetchGCPProjects(ctx, httpClient)
|
||||||
|
if errProjects != nil {
|
||||||
|
log.Fatalf("Failed to get project list: %v", errProjects)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
promptFn := options.Prompt
|
||||||
|
if promptFn == nil {
|
||||||
|
promptFn = defaultProjectPrompt()
|
||||||
|
}
|
||||||
|
|
||||||
|
selectedProjectID := promptForProjectSelection(projects, strings.TrimSpace(projectID), promptFn)
|
||||||
|
if strings.TrimSpace(selectedProjectID) == "" {
|
||||||
|
log.Fatal("No project selected; aborting login.")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if errSetup := performGeminiCLISetup(ctx, httpClient, storage, selectedProjectID); errSetup != nil {
|
||||||
|
var projectErr *projectSelectionRequiredError
|
||||||
|
if errors.As(errSetup, &projectErr) {
|
||||||
|
log.Error("Failed to start user onboarding: A project ID is required.")
|
||||||
|
showProjectSelectionHelp(storage.Email, projects)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Fatalf("Failed to complete user setup: %v", errSetup)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.Auto = false
|
||||||
|
|
||||||
|
if !storage.Auto && !storage.Checked {
|
||||||
|
isChecked, errCheck := checkCloudAPIIsEnabled(ctx, httpClient, storage.ProjectID)
|
||||||
|
if errCheck != nil {
|
||||||
|
log.Fatalf("Failed to check if Cloud AI API is enabled: %v", errCheck)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
storage.Checked = isChecked
|
||||||
|
if !isChecked {
|
||||||
|
log.Fatal("Failed to check if Cloud AI API is enabled. If you encounter an error message, please create an issue.")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updateAuthRecord(record, storage)
|
||||||
|
|
||||||
|
store := sdkAuth.GetTokenStore()
|
||||||
|
if setter, okSetter := store.(interface{ SetBaseDir(string) }); okSetter && cfg != nil {
|
||||||
|
setter.SetBaseDir(cfg.AuthDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
savedPath, errSave := store.Save(ctx, record)
|
||||||
|
if errSave != nil {
|
||||||
|
log.Fatalf("Failed to save token to file: %v", errSave)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if savedPath != "" {
|
||||||
|
fmt.Printf("Authentication saved to %s\n", savedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Gemini authentication successful!")
|
||||||
|
}
|
||||||
|
|
||||||
|
func performGeminiCLISetup(ctx context.Context, httpClient *http.Client, storage *gemini.GeminiTokenStorage, requestedProject string) error {
|
||||||
|
metadata := map[string]string{
|
||||||
|
"ideType": "IDE_UNSPECIFIED",
|
||||||
|
"platform": "PLATFORM_UNSPECIFIED",
|
||||||
|
"pluginType": "GEMINI",
|
||||||
|
}
|
||||||
|
|
||||||
|
trimmedRequest := strings.TrimSpace(requestedProject)
|
||||||
|
explicitProject := trimmedRequest != ""
|
||||||
|
|
||||||
|
loadReqBody := map[string]any{
|
||||||
|
"metadata": metadata,
|
||||||
|
}
|
||||||
|
if explicitProject {
|
||||||
|
loadReqBody["cloudaicompanionProject"] = trimmedRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
var loadResp map[string]any
|
||||||
|
if errLoad := callGeminiCLI(ctx, httpClient, "loadCodeAssist", loadReqBody, &loadResp); errLoad != nil {
|
||||||
|
return fmt.Errorf("load code assist: %w", errLoad)
|
||||||
|
}
|
||||||
|
|
||||||
|
tierID := "legacy-tier"
|
||||||
|
if tiers, okTiers := loadResp["allowedTiers"].([]any); okTiers {
|
||||||
|
for _, rawTier := range tiers {
|
||||||
|
tier, okTier := rawTier.(map[string]any)
|
||||||
|
if !okTier {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isDefault, okDefault := tier["isDefault"].(bool); okDefault && isDefault {
|
||||||
|
if id, okID := tier["id"].(string); okID && strings.TrimSpace(id) != "" {
|
||||||
|
tierID = strings.TrimSpace(id)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
projectID := trimmedRequest
|
||||||
|
if projectID == "" {
|
||||||
|
if id, okProject := loadResp["cloudaicompanionProject"].(string); okProject {
|
||||||
|
projectID = strings.TrimSpace(id)
|
||||||
|
}
|
||||||
|
if projectID == "" {
|
||||||
|
if projectMap, okProject := loadResp["cloudaicompanionProject"].(map[string]any); okProject {
|
||||||
|
if id, okID := projectMap["id"].(string); okID {
|
||||||
|
projectID = strings.TrimSpace(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if projectID == "" {
|
||||||
|
return &projectSelectionRequiredError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
onboardReqBody := map[string]any{
|
||||||
|
"tierId": tierID,
|
||||||
|
"metadata": metadata,
|
||||||
|
"cloudaicompanionProject": projectID,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the requested project as a fallback in case the response omits it.
|
||||||
|
storage.ProjectID = projectID
|
||||||
|
|
||||||
|
for {
|
||||||
|
var onboardResp map[string]any
|
||||||
|
if errOnboard := callGeminiCLI(ctx, httpClient, "onboardUser", onboardReqBody, &onboardResp); errOnboard != nil {
|
||||||
|
return fmt.Errorf("onboard user: %w", errOnboard)
|
||||||
|
}
|
||||||
|
|
||||||
|
if done, okDone := onboardResp["done"].(bool); okDone && done {
|
||||||
|
responseProjectID := ""
|
||||||
|
if resp, okResp := onboardResp["response"].(map[string]any); okResp {
|
||||||
|
switch projectValue := resp["cloudaicompanionProject"].(type) {
|
||||||
|
case map[string]any:
|
||||||
|
if id, okID := projectValue["id"].(string); okID {
|
||||||
|
responseProjectID = strings.TrimSpace(id)
|
||||||
|
}
|
||||||
|
case string:
|
||||||
|
responseProjectID = strings.TrimSpace(projectValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
finalProjectID := projectID
|
||||||
|
if responseProjectID != "" {
|
||||||
|
if explicitProject && !strings.EqualFold(responseProjectID, projectID) {
|
||||||
|
log.Warnf("Gemini onboarding returned project %s instead of requested %s; keeping requested project ID.", responseProjectID, projectID)
|
||||||
|
} else {
|
||||||
|
finalProjectID = responseProjectID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.ProjectID = strings.TrimSpace(finalProjectID)
|
||||||
|
if storage.ProjectID == "" {
|
||||||
|
storage.ProjectID = strings.TrimSpace(projectID)
|
||||||
|
}
|
||||||
|
if storage.ProjectID == "" {
|
||||||
|
return fmt.Errorf("onboard user completed without project id")
|
||||||
|
}
|
||||||
|
log.Infof("Onboarding complete. Using Project ID: %s", storage.ProjectID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Onboarding in progress, waiting 5 seconds...")
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func callGeminiCLI(ctx context.Context, httpClient *http.Client, endpoint string, body any, result any) error {
|
||||||
|
url := fmt.Sprintf("%s/%s:%s", geminiCLIEndpoint, geminiCLIVersion, endpoint)
|
||||||
|
if strings.HasPrefix(endpoint, "operations/") {
|
||||||
|
url = fmt.Sprintf("%s/%s", geminiCLIEndpoint, endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
var reader io.Reader
|
||||||
|
if body != nil {
|
||||||
|
rawBody, errMarshal := json.Marshal(body)
|
||||||
|
if errMarshal != nil {
|
||||||
|
return fmt.Errorf("marshal request body: %w", errMarshal)
|
||||||
|
}
|
||||||
|
reader = bytes.NewReader(rawBody)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, errRequest := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
|
||||||
|
if errRequest != nil {
|
||||||
|
return fmt.Errorf("create request: %w", errRequest)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||||
|
req.Header.Set("X-Goog-Api-Client", geminiCLIApiClient)
|
||||||
|
req.Header.Set("Client-Metadata", geminiCLIClientMetadata)
|
||||||
|
|
||||||
|
resp, errDo := httpClient.Do(req)
|
||||||
|
if errDo != nil {
|
||||||
|
return fmt.Errorf("execute request: %w", errDo)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if errClose := resp.Body.Close(); errClose != nil {
|
||||||
|
log.Errorf("response body close error: %v", errClose)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
return fmt.Errorf("api request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if result == nil {
|
||||||
|
_, _ = io.Copy(io.Discard, resp.Body)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if errDecode := json.NewDecoder(resp.Body).Decode(result); errDecode != nil {
|
||||||
|
return fmt.Errorf("decode response body: %w", errDecode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchGCPProjects(ctx context.Context, httpClient *http.Client) ([]interfaces.GCPProjectProjects, error) {
|
||||||
|
req, errRequest := http.NewRequestWithContext(ctx, http.MethodGet, "https://cloudresourcemanager.googleapis.com/v1/projects", nil)
|
||||||
|
if errRequest != nil {
|
||||||
|
return nil, fmt.Errorf("could not create project list request: %w", errRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, errDo := httpClient.Do(req)
|
||||||
|
if errDo != nil {
|
||||||
|
return nil, fmt.Errorf("failed to execute project list request: %w", errDo)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if errClose := resp.Body.Close(); errClose != nil {
|
||||||
|
log.Errorf("response body close error: %v", errClose)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
return nil, fmt.Errorf("project list request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var projects interfaces.GCPProject
|
||||||
|
if errDecode := json.NewDecoder(resp.Body).Decode(&projects); errDecode != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal project list: %w", errDecode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return projects.Projects, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// promptForProjectSelection prints available projects and returns the chosen project ID.
|
||||||
|
func promptForProjectSelection(projects []interfaces.GCPProjectProjects, presetID string, promptFn func(string) (string, error)) string {
|
||||||
|
trimmedPreset := strings.TrimSpace(presetID)
|
||||||
|
if len(projects) == 0 {
|
||||||
|
if trimmedPreset != "" {
|
||||||
|
return trimmedPreset
|
||||||
|
}
|
||||||
|
fmt.Println("No Google Cloud projects are available for selection.")
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Available Google Cloud projects:")
|
||||||
|
defaultIndex := 0
|
||||||
|
for idx, project := range projects {
|
||||||
|
fmt.Printf("[%d] %s (%s)\n", idx+1, project.ProjectID, project.Name)
|
||||||
|
if trimmedPreset != "" && project.ProjectID == trimmedPreset {
|
||||||
|
defaultIndex = idx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultID := projects[defaultIndex].ProjectID
|
||||||
|
|
||||||
|
if trimmedPreset != "" {
|
||||||
|
for _, project := range projects {
|
||||||
|
if project.ProjectID == trimmedPreset {
|
||||||
|
return trimmedPreset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Warnf("Provided project ID %s not found in available projects; please choose from the list.", trimmedPreset)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
promptMsg := fmt.Sprintf("Enter project ID [%s]: ", defaultID)
|
||||||
|
answer, errPrompt := promptFn(promptMsg)
|
||||||
|
if errPrompt != nil {
|
||||||
|
log.Errorf("Project selection prompt failed: %v", errPrompt)
|
||||||
|
return defaultID
|
||||||
|
}
|
||||||
|
answer = strings.TrimSpace(answer)
|
||||||
|
if answer == "" {
|
||||||
|
return defaultID
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, project := range projects {
|
||||||
|
if project.ProjectID == answer {
|
||||||
|
return project.ProjectID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx, errAtoi := strconv.Atoi(answer); errAtoi == nil {
|
||||||
|
if idx >= 1 && idx <= len(projects) {
|
||||||
|
return projects[idx-1].ProjectID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Invalid selection, enter a project ID or a number from the list.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultProjectPrompt() func(string) (string, error) {
|
||||||
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
return func(prompt string) (string, error) {
|
||||||
|
fmt.Print(prompt)
|
||||||
|
line, errRead := reader.ReadString('\n')
|
||||||
|
if errRead != nil {
|
||||||
|
if errors.Is(errRead, io.EOF) {
|
||||||
|
return strings.TrimSpace(line), nil
|
||||||
|
}
|
||||||
|
return "", errRead
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(line), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func showProjectSelectionHelp(email string, projects []interfaces.GCPProjectProjects) {
|
||||||
|
if email != "" {
|
||||||
|
log.Infof("Your account %s needs to specify a project ID.", email)
|
||||||
|
} else {
|
||||||
|
log.Info("You need to specify a project ID.")
|
||||||
|
}
|
||||||
|
|
||||||
if len(projects) > 0 {
|
if len(projects) > 0 {
|
||||||
fmt.Println("========================================================================")
|
fmt.Println("========================================================================")
|
||||||
for _, p := range projects {
|
for _, p := range projects {
|
||||||
@@ -53,17 +423,89 @@ func DoLogin(cfg *config.Config, projectID string, options *LoginOptions) {
|
|||||||
fmt.Printf("Project Name: %s\n", p.Name)
|
fmt.Printf("Project Name: %s\n", p.Name)
|
||||||
fmt.Println("------------------------------------------------------------------------")
|
fmt.Println("------------------------------------------------------------------------")
|
||||||
}
|
}
|
||||||
fmt.Println("Please rerun the login command with --project_id <project_id>.")
|
} else {
|
||||||
|
fmt.Println("No active projects were returned for this account.")
|
||||||
}
|
}
|
||||||
return
|
|
||||||
|
fmt.Printf("Please run this command to login again with a specific project:\n\n%s --login --project_id <project_id>\n", os.Args[0])
|
||||||
}
|
}
|
||||||
log.Fatalf("Gemini authentication failed: %v", err)
|
|
||||||
|
func checkCloudAPIIsEnabled(ctx context.Context, httpClient *http.Client, projectID string) (bool, error) {
|
||||||
|
serviceUsageURL := "https://serviceusage.googleapis.com"
|
||||||
|
requiredServices := []string{
|
||||||
|
// "geminicloudassist.googleapis.com", // Gemini Cloud Assist API
|
||||||
|
"cloudaicompanion.googleapis.com", // Gemini for Google Cloud API
|
||||||
|
}
|
||||||
|
for _, service := range requiredServices {
|
||||||
|
checkUrl := fmt.Sprintf("%s/v1/projects/%s/services/%s", serviceUsageURL, projectID, service)
|
||||||
|
req, errRequest := http.NewRequestWithContext(ctx, http.MethodGet, checkUrl, nil)
|
||||||
|
if errRequest != nil {
|
||||||
|
return false, fmt.Errorf("failed to create request: %w", errRequest)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||||
|
resp, errDo := httpClient.Do(req)
|
||||||
|
if errDo != nil {
|
||||||
|
return false, fmt.Errorf("failed to execute request: %w", errDo)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
if gjson.GetBytes(bodyBytes, "state").String() == "ENABLED" {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
|
||||||
|
enableUrl := fmt.Sprintf("%s/v1/projects/%s/services/%s:enable", serviceUsageURL, projectID, service)
|
||||||
|
req, errRequest = http.NewRequestWithContext(ctx, http.MethodPost, enableUrl, strings.NewReader("{}"))
|
||||||
|
if errRequest != nil {
|
||||||
|
return false, fmt.Errorf("failed to create request: %w", errRequest)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||||
|
resp, errDo = httpClient.Do(req)
|
||||||
|
if errDo != nil {
|
||||||
|
return false, fmt.Errorf("failed to execute request: %w", errDo)
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
errMessage := string(bodyBytes)
|
||||||
|
errMessageResult := gjson.GetBytes(bodyBytes, "error.message")
|
||||||
|
if errMessageResult.Exists() {
|
||||||
|
errMessage = errMessageResult.String()
|
||||||
|
}
|
||||||
|
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusCreated {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
continue
|
||||||
|
} else if resp.StatusCode == http.StatusBadRequest {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
if strings.Contains(strings.ToLower(errMessage), "already enabled") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("project activation required: %s", errMessage)
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateAuthRecord(record *cliproxyauth.Auth, storage *gemini.GeminiTokenStorage) {
|
||||||
|
if record == nil || storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if savedPath != "" {
|
finalName := fmt.Sprintf("%s-%s.json", storage.Email, storage.ProjectID)
|
||||||
log.Infof("Authentication saved to %s", savedPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Gemini authentication successful!")
|
if record.Metadata == nil {
|
||||||
|
record.Metadata = make(map[string]any)
|
||||||
|
}
|
||||||
|
record.Metadata["email"] = storage.Email
|
||||||
|
record.Metadata["project_id"] = storage.ProjectID
|
||||||
|
record.Metadata["auto"] = storage.Auto
|
||||||
|
record.Metadata["checked"] = storage.Checked
|
||||||
|
|
||||||
|
record.ID = finalName
|
||||||
|
record.FileName = finalName
|
||||||
|
record.Storage = storage
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,9 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/api"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy"
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@@ -23,19 +25,30 @@ import (
|
|||||||
// - configPath: The path to the configuration file
|
// - configPath: The path to the configuration file
|
||||||
// - localPassword: Optional password accepted for local management requests
|
// - localPassword: Optional password accepted for local management requests
|
||||||
func StartService(cfg *config.Config, configPath string, localPassword string) {
|
func StartService(cfg *config.Config, configPath string, localPassword string) {
|
||||||
service, err := cliproxy.NewBuilder().
|
builder := cliproxy.NewBuilder().
|
||||||
WithConfig(cfg).
|
WithConfig(cfg).
|
||||||
WithConfigPath(configPath).
|
WithConfigPath(configPath).
|
||||||
WithLocalManagementPassword(localPassword).
|
WithLocalManagementPassword(localPassword)
|
||||||
Build()
|
|
||||||
|
ctxSignal, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
runCtx := ctxSignal
|
||||||
|
if localPassword != "" {
|
||||||
|
var keepAliveCancel context.CancelFunc
|
||||||
|
runCtx, keepAliveCancel = context.WithCancel(ctxSignal)
|
||||||
|
builder = builder.WithServerOptions(api.WithKeepAliveEndpoint(10*time.Second, func() {
|
||||||
|
log.Warn("keep-alive endpoint idle for 10s, shutting down")
|
||||||
|
keepAliveCancel()
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
service, err := builder.Build()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to build proxy service: %v", err)
|
log.Fatalf("failed to build proxy service: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
err = service.Run(runCtx)
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
err = service.Run(ctx)
|
|
||||||
if err != nil && !errors.Is(err, context.Canceled) {
|
if err != nil && !errors.Is(err, context.Canceled) {
|
||||||
log.Fatalf("proxy service exited with error: %v", err)
|
log.Fatalf("proxy service exited with error: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,12 +8,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config represents the application's configuration, loaded from a YAML file.
|
// Config represents the application's configuration, loaded from a YAML file.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
|
config.SDKConfig `yaml:",inline"`
|
||||||
// Port is the network port on which the API server will listen.
|
// Port is the network port on which the API server will listen.
|
||||||
Port int `yaml:"port" json:"-"`
|
Port int `yaml:"port" json:"-"`
|
||||||
|
|
||||||
@@ -23,14 +25,11 @@ type Config struct {
|
|||||||
// Debug enables or disables debug-level logging and other debug features.
|
// Debug enables or disables debug-level logging and other debug features.
|
||||||
Debug bool `yaml:"debug" json:"debug"`
|
Debug bool `yaml:"debug" json:"debug"`
|
||||||
|
|
||||||
// ProxyURL is the URL of an optional proxy server to use for outbound requests.
|
// LoggingToFile controls whether application logs are written to rotating files or stdout.
|
||||||
ProxyURL string `yaml:"proxy-url" json:"proxy-url"`
|
LoggingToFile bool `yaml:"logging-to-file" json:"logging-to-file"`
|
||||||
|
|
||||||
// APIKeys is a list of keys for authenticating clients to this proxy server.
|
// UsageStatisticsEnabled toggles in-memory usage aggregation; when false, usage data is discarded.
|
||||||
APIKeys []string `yaml:"api-keys" json:"api-keys"`
|
UsageStatisticsEnabled bool `yaml:"usage-statistics-enabled" json:"usage-statistics-enabled"`
|
||||||
|
|
||||||
// Access holds request authentication provider configuration.
|
|
||||||
Access AccessConfig `yaml:"auth" json:"auth"`
|
|
||||||
|
|
||||||
// QuotaExceeded defines the behavior when a quota is exceeded.
|
// QuotaExceeded defines the behavior when a quota is exceeded.
|
||||||
QuotaExceeded QuotaExceeded `yaml:"quota-exceeded" json:"quota-exceeded"`
|
QuotaExceeded QuotaExceeded `yaml:"quota-exceeded" json:"quota-exceeded"`
|
||||||
@@ -38,9 +37,6 @@ type Config struct {
|
|||||||
// GlAPIKey is the API key for the generative language API.
|
// GlAPIKey is the API key for the generative language API.
|
||||||
GlAPIKey []string `yaml:"generative-language-api-key" json:"generative-language-api-key"`
|
GlAPIKey []string `yaml:"generative-language-api-key" json:"generative-language-api-key"`
|
||||||
|
|
||||||
// RequestLog enables or disables detailed request logging functionality.
|
|
||||||
RequestLog bool `yaml:"request-log" json:"request-log"`
|
|
||||||
|
|
||||||
// RequestRetry defines the retry times when the request failed.
|
// RequestRetry defines the retry times when the request failed.
|
||||||
RequestRetry int `yaml:"request-retry" json:"request-retry"`
|
RequestRetry int `yaml:"request-retry" json:"request-retry"`
|
||||||
|
|
||||||
@@ -60,48 +56,25 @@ type Config struct {
|
|||||||
GeminiWeb GeminiWebConfig `yaml:"gemini-web" json:"gemini-web"`
|
GeminiWeb GeminiWebConfig `yaml:"gemini-web" json:"gemini-web"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccessConfig groups request authentication providers.
|
|
||||||
type AccessConfig struct {
|
|
||||||
// Providers lists configured authentication providers.
|
|
||||||
Providers []AccessProvider `yaml:"providers" json:"providers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccessProvider describes a request authentication provider entry.
|
|
||||||
type AccessProvider struct {
|
|
||||||
// Name is the instance identifier for the provider.
|
|
||||||
Name string `yaml:"name" json:"name"`
|
|
||||||
|
|
||||||
// Type selects the provider implementation registered via the SDK.
|
|
||||||
Type string `yaml:"type" json:"type"`
|
|
||||||
|
|
||||||
// SDK optionally names a third-party SDK module providing this provider.
|
|
||||||
SDK string `yaml:"sdk,omitempty" json:"sdk,omitempty"`
|
|
||||||
|
|
||||||
// APIKeys lists inline keys for providers that require them.
|
|
||||||
APIKeys []string `yaml:"api-keys,omitempty" json:"api-keys,omitempty"`
|
|
||||||
|
|
||||||
// Config passes provider-specific options to the implementation.
|
|
||||||
Config map[string]any `yaml:"config,omitempty" json:"config,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AccessProviderTypeConfigAPIKey is the built-in provider validating inline API keys.
|
|
||||||
AccessProviderTypeConfigAPIKey = "config-api-key"
|
|
||||||
|
|
||||||
// DefaultAccessProviderName is applied when no provider name is supplied.
|
|
||||||
DefaultAccessProviderName = "config-inline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GeminiWebConfig nests Gemini Web related options under 'gemini-web'.
|
// GeminiWebConfig nests Gemini Web related options under 'gemini-web'.
|
||||||
type GeminiWebConfig struct {
|
type GeminiWebConfig struct {
|
||||||
// Context enables JSON-based conversation reuse.
|
// Context enables JSON-based conversation reuse.
|
||||||
// Defaults to true if not set in YAML (see LoadConfig).
|
// Defaults to true if not set in YAML (see LoadConfig).
|
||||||
Context bool `yaml:"context" json:"context"`
|
Context bool `yaml:"context" json:"context"`
|
||||||
|
|
||||||
// CodeMode, when true, enables coding mode behaviors for Gemini Web:
|
// GemMode selects a predefined Gem to attach for Gemini Web requests.
|
||||||
// - Attach the predefined "Coding partner" Gem
|
// Allowed values:
|
||||||
// - Enable XML wrapping hint for tool markup
|
// - "coding-partner"
|
||||||
// - Merge <think> content into visible content for tool-friendly output
|
// - "writing-editor"
|
||||||
|
// When empty, no Gem is attached by configuration.
|
||||||
|
// This is independent from CodeMode below, which is kept for backwards compatibility.
|
||||||
|
GemMode string `yaml:"gem-mode" json:"gem-mode"`
|
||||||
|
|
||||||
|
// CodeMode enables legacy coding-mode behaviors for Gemini Web.
|
||||||
|
// Backwards compatibility: when true, the service behaves as before by
|
||||||
|
// attaching the predefined "Coding partner" Gem and enabling extra
|
||||||
|
// conveniences (e.g., XML wrapping hints). Prefer GemMode for selecting
|
||||||
|
// a Gem going forward.
|
||||||
CodeMode bool `yaml:"code-mode" json:"code-mode"`
|
CodeMode bool `yaml:"code-mode" json:"code-mode"`
|
||||||
|
|
||||||
// MaxCharsPerRequest caps the number of characters (runes) sent to
|
// MaxCharsPerRequest caps the number of characters (runes) sent to
|
||||||
@@ -122,6 +95,8 @@ type RemoteManagement struct {
|
|||||||
AllowRemote bool `yaml:"allow-remote"`
|
AllowRemote bool `yaml:"allow-remote"`
|
||||||
// SecretKey is the management key (plaintext or bcrypt hashed). YAML key intentionally 'secret-key'.
|
// SecretKey is the management key (plaintext or bcrypt hashed). YAML key intentionally 'secret-key'.
|
||||||
SecretKey string `yaml:"secret-key"`
|
SecretKey string `yaml:"secret-key"`
|
||||||
|
// DisableControlPanel skips serving and syncing the bundled management UI when true.
|
||||||
|
DisableControlPanel bool `yaml:"disable-control-panel"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// QuotaExceeded defines the behavior when API quota limits are exceeded.
|
// QuotaExceeded defines the behavior when API quota limits are exceeded.
|
||||||
@@ -143,6 +118,9 @@ type ClaudeKey struct {
|
|||||||
// BaseURL is the base URL for the Claude API endpoint.
|
// BaseURL is the base URL for the Claude API endpoint.
|
||||||
// If empty, the default Claude API URL will be used.
|
// If empty, the default Claude API URL will be used.
|
||||||
BaseURL string `yaml:"base-url" json:"base-url"`
|
BaseURL string `yaml:"base-url" json:"base-url"`
|
||||||
|
|
||||||
|
// ProxyURL overrides the global proxy setting for this API key if provided.
|
||||||
|
ProxyURL string `yaml:"proxy-url" json:"proxy-url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CodexKey represents the configuration for a Codex API key,
|
// CodexKey represents the configuration for a Codex API key,
|
||||||
@@ -154,6 +132,9 @@ type CodexKey struct {
|
|||||||
// BaseURL is the base URL for the Codex API endpoint.
|
// BaseURL is the base URL for the Codex API endpoint.
|
||||||
// If empty, the default Codex API URL will be used.
|
// If empty, the default Codex API URL will be used.
|
||||||
BaseURL string `yaml:"base-url" json:"base-url"`
|
BaseURL string `yaml:"base-url" json:"base-url"`
|
||||||
|
|
||||||
|
// ProxyURL overrides the global proxy setting for this API key if provided.
|
||||||
|
ProxyURL string `yaml:"proxy-url" json:"proxy-url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenAICompatibility represents the configuration for OpenAI API compatibility
|
// OpenAICompatibility represents the configuration for OpenAI API compatibility
|
||||||
@@ -166,12 +147,25 @@ type OpenAICompatibility struct {
|
|||||||
BaseURL string `yaml:"base-url" json:"base-url"`
|
BaseURL string `yaml:"base-url" json:"base-url"`
|
||||||
|
|
||||||
// APIKeys are the authentication keys for accessing the external API services.
|
// APIKeys are the authentication keys for accessing the external API services.
|
||||||
APIKeys []string `yaml:"api-keys" json:"api-keys"`
|
// Deprecated: Use APIKeyEntries instead to support per-key proxy configuration.
|
||||||
|
APIKeys []string `yaml:"api-keys,omitempty" json:"api-keys,omitempty"`
|
||||||
|
|
||||||
|
// APIKeyEntries defines API keys with optional per-key proxy configuration.
|
||||||
|
APIKeyEntries []OpenAICompatibilityAPIKey `yaml:"api-key-entries,omitempty" json:"api-key-entries,omitempty"`
|
||||||
|
|
||||||
// Models defines the model configurations including aliases for routing.
|
// Models defines the model configurations including aliases for routing.
|
||||||
Models []OpenAICompatibilityModel `yaml:"models" json:"models"`
|
Models []OpenAICompatibilityModel `yaml:"models" json:"models"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OpenAICompatibilityAPIKey represents an API key configuration with optional proxy setting.
|
||||||
|
type OpenAICompatibilityAPIKey struct {
|
||||||
|
// APIKey is the authentication key for accessing the external API services.
|
||||||
|
APIKey string `yaml:"api-key" json:"api-key"`
|
||||||
|
|
||||||
|
// ProxyURL overrides the global proxy setting for this API key if provided.
|
||||||
|
ProxyURL string `yaml:"proxy-url,omitempty" json:"proxy-url,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// OpenAICompatibilityModel represents a model configuration for OpenAI compatibility,
|
// OpenAICompatibilityModel represents a model configuration for OpenAI compatibility,
|
||||||
// including the actual model name and its alias for API routing.
|
// including the actual model name and its alias for API routing.
|
||||||
type OpenAICompatibilityModel struct {
|
type OpenAICompatibilityModel struct {
|
||||||
@@ -200,21 +194,23 @@ func LoadConfig(configFile string) (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal the YAML data into the Config struct.
|
// Unmarshal the YAML data into the Config struct.
|
||||||
var config Config
|
var cfg Config
|
||||||
// Set defaults before unmarshal so that absent keys keep defaults.
|
// Set defaults before unmarshal so that absent keys keep defaults.
|
||||||
config.GeminiWeb.Context = true
|
cfg.LoggingToFile = true
|
||||||
if err = yaml.Unmarshal(data, &config); err != nil {
|
cfg.UsageStatisticsEnabled = true
|
||||||
|
cfg.GeminiWeb.Context = true
|
||||||
|
if err = yaml.Unmarshal(data, &cfg); err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse config file: %w", err)
|
return nil, fmt.Errorf("failed to parse config file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hash remote management key if plaintext is detected (nested)
|
// Hash remote management key if plaintext is detected (nested)
|
||||||
// We consider a value to be already hashed if it looks like a bcrypt hash ($2a$, $2b$, or $2y$ prefix).
|
// We consider a value to be already hashed if it looks like a bcrypt hash ($2a$, $2b$, or $2y$ prefix).
|
||||||
if config.RemoteManagement.SecretKey != "" && !looksLikeBcrypt(config.RemoteManagement.SecretKey) {
|
if cfg.RemoteManagement.SecretKey != "" && !looksLikeBcrypt(cfg.RemoteManagement.SecretKey) {
|
||||||
hashed, errHash := hashSecret(config.RemoteManagement.SecretKey)
|
hashed, errHash := hashSecret(cfg.RemoteManagement.SecretKey)
|
||||||
if errHash != nil {
|
if errHash != nil {
|
||||||
return nil, fmt.Errorf("failed to hash remote management key: %w", errHash)
|
return nil, fmt.Errorf("failed to hash remote management key: %w", errHash)
|
||||||
}
|
}
|
||||||
config.RemoteManagement.SecretKey = hashed
|
cfg.RemoteManagement.SecretKey = hashed
|
||||||
|
|
||||||
// Persist the hashed value back to the config file to avoid re-hashing on next startup.
|
// Persist the hashed value back to the config file to avoid re-hashing on next startup.
|
||||||
// Preserve YAML comments and ordering; update only the nested key.
|
// Preserve YAML comments and ordering; update only the nested key.
|
||||||
@@ -222,81 +218,23 @@ func LoadConfig(configFile string) (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sync request authentication providers with inline API keys for backwards compatibility.
|
// Sync request authentication providers with inline API keys for backwards compatibility.
|
||||||
syncInlineAccessProvider(&config)
|
syncInlineAccessProvider(&cfg)
|
||||||
|
|
||||||
// Return the populated configuration struct.
|
// Return the populated configuration struct.
|
||||||
return &config, nil
|
return &cfg, nil
|
||||||
}
|
|
||||||
|
|
||||||
// SyncInlineAPIKeys updates the inline API key provider and top-level APIKeys field.
|
|
||||||
func SyncInlineAPIKeys(cfg *Config, keys []string) {
|
|
||||||
if cfg == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cloned := append([]string(nil), keys...)
|
|
||||||
cfg.APIKeys = cloned
|
|
||||||
if provider := cfg.ConfigAPIKeyProvider(); provider != nil {
|
|
||||||
if provider.Name == "" {
|
|
||||||
provider.Name = DefaultAccessProviderName
|
|
||||||
}
|
|
||||||
provider.APIKeys = cloned
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cfg.Access.Providers = append(cfg.Access.Providers, AccessProvider{
|
|
||||||
Name: DefaultAccessProviderName,
|
|
||||||
Type: AccessProviderTypeConfigAPIKey,
|
|
||||||
APIKeys: cloned,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigAPIKeyProvider returns the first inline API key provider if present.
|
|
||||||
func (c *Config) ConfigAPIKeyProvider() *AccessProvider {
|
|
||||||
if c == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for i := range c.Access.Providers {
|
|
||||||
if c.Access.Providers[i].Type == AccessProviderTypeConfigAPIKey {
|
|
||||||
if c.Access.Providers[i].Name == "" {
|
|
||||||
c.Access.Providers[i].Name = DefaultAccessProviderName
|
|
||||||
}
|
|
||||||
return &c.Access.Providers[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func syncInlineAccessProvider(cfg *Config) {
|
func syncInlineAccessProvider(cfg *Config) {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(cfg.Access.Providers) == 0 {
|
|
||||||
if len(cfg.APIKeys) == 0 {
|
if len(cfg.APIKeys) == 0 {
|
||||||
return
|
if provider := cfg.ConfigAPIKeyProvider(); provider != nil && len(provider.APIKeys) > 0 {
|
||||||
}
|
|
||||||
cfg.Access.Providers = append(cfg.Access.Providers, AccessProvider{
|
|
||||||
Name: DefaultAccessProviderName,
|
|
||||||
Type: AccessProviderTypeConfigAPIKey,
|
|
||||||
APIKeys: append([]string(nil), cfg.APIKeys...),
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
provider := cfg.ConfigAPIKeyProvider()
|
|
||||||
if provider == nil {
|
|
||||||
if len(cfg.APIKeys) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cfg.Access.Providers = append(cfg.Access.Providers, AccessProvider{
|
|
||||||
Name: DefaultAccessProviderName,
|
|
||||||
Type: AccessProviderTypeConfigAPIKey,
|
|
||||||
APIKeys: append([]string(nil), cfg.APIKeys...),
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(provider.APIKeys) == 0 && len(cfg.APIKeys) > 0 {
|
|
||||||
provider.APIKeys = append([]string(nil), cfg.APIKeys...)
|
|
||||||
}
|
|
||||||
cfg.APIKeys = append([]string(nil), provider.APIKeys...)
|
cfg.APIKeys = append([]string(nil), provider.APIKeys...)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
cfg.Access.Providers = nil
|
||||||
|
}
|
||||||
|
|
||||||
// looksLikeBcrypt returns true if the provided string appears to be a bcrypt hash.
|
// looksLikeBcrypt returns true if the provided string appears to be a bcrypt hash.
|
||||||
func looksLikeBcrypt(s string) bool {
|
func looksLikeBcrypt(s string) bool {
|
||||||
@@ -316,6 +254,7 @@ func hashSecret(secret string) (string, error) {
|
|||||||
// SaveConfigPreserveComments writes the config back to YAML while preserving existing comments
|
// SaveConfigPreserveComments writes the config back to YAML while preserving existing comments
|
||||||
// and key ordering by loading the original file into a yaml.Node tree and updating values in-place.
|
// and key ordering by loading the original file into a yaml.Node tree and updating values in-place.
|
||||||
func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
||||||
|
persistCfg := sanitizeConfigForPersist(cfg)
|
||||||
// Load original YAML as a node tree to preserve comments and ordering.
|
// Load original YAML as a node tree to preserve comments and ordering.
|
||||||
data, err := os.ReadFile(configFile)
|
data, err := os.ReadFile(configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -334,7 +273,7 @@ func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Marshal the current cfg to YAML, then unmarshal to a yaml.Node we can merge from.
|
// Marshal the current cfg to YAML, then unmarshal to a yaml.Node we can merge from.
|
||||||
rendered, err := yaml.Marshal(cfg)
|
rendered, err := yaml.Marshal(persistCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -349,6 +288,9 @@ func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
|||||||
return fmt.Errorf("expected generated root mapping node")
|
return fmt.Errorf("expected generated root mapping node")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove deprecated auth block before merging to avoid persisting it again.
|
||||||
|
removeMapKey(original.Content[0], "auth")
|
||||||
|
|
||||||
// Merge generated into original in-place, preserving comments/order of existing nodes.
|
// Merge generated into original in-place, preserving comments/order of existing nodes.
|
||||||
mergeMappingPreserve(original.Content[0], generated.Content[0])
|
mergeMappingPreserve(original.Content[0], generated.Content[0])
|
||||||
|
|
||||||
@@ -367,6 +309,16 @@ func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
|||||||
return enc.Close()
|
return enc.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sanitizeConfigForPersist(cfg *Config) *Config {
|
||||||
|
if cfg == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
clone := *cfg
|
||||||
|
clone.SDKConfig = cfg.SDKConfig
|
||||||
|
clone.SDKConfig.Access = config.AccessConfig{}
|
||||||
|
return &clone
|
||||||
|
}
|
||||||
|
|
||||||
// SaveConfigPreserveCommentsUpdateNestedScalar updates a nested scalar key path like ["a","b"]
|
// SaveConfigPreserveCommentsUpdateNestedScalar updates a nested scalar key path like ["a","b"]
|
||||||
// while preserving comments and positions.
|
// while preserving comments and positions.
|
||||||
func SaveConfigPreserveCommentsUpdateNestedScalar(configFile string, path []string, value string) error {
|
func SaveConfigPreserveCommentsUpdateNestedScalar(configFile string, path []string, value string) error {
|
||||||
@@ -569,3 +521,15 @@ func copyNodeShallow(dst, src *yaml.Node) {
|
|||||||
dst.Content = nil
|
dst.Content = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func removeMapKey(mapNode *yaml.Node, key string) {
|
||||||
|
if mapNode == nil || mapNode.Kind != yaml.MappingNode || key == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := 0; i+1 < len(mapNode.Content); i += 2 {
|
||||||
|
if mapNode.Content[i] != nil && mapNode.Content[i].Value == key {
|
||||||
|
mapNode.Content = append(mapNode.Content[:i], mapNode.Content[i+2:]...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
117
internal/logging/global_logger.go
Normal file
117
internal/logging/global_logger.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package logging
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"gopkg.in/natefinch/lumberjack.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
setupOnce sync.Once
|
||||||
|
writerMu sync.Mutex
|
||||||
|
logWriter *lumberjack.Logger
|
||||||
|
ginInfoWriter *io.PipeWriter
|
||||||
|
ginErrorWriter *io.PipeWriter
|
||||||
|
)
|
||||||
|
|
||||||
|
// LogFormatter defines a custom log format for logrus.
|
||||||
|
// This formatter adds timestamp, level, and source location to each log entry.
|
||||||
|
type LogFormatter struct{}
|
||||||
|
|
||||||
|
// Format renders a single log entry with custom formatting.
|
||||||
|
func (m *LogFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||||
|
var buffer *bytes.Buffer
|
||||||
|
if entry.Buffer != nil {
|
||||||
|
buffer = entry.Buffer
|
||||||
|
} else {
|
||||||
|
buffer = &bytes.Buffer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := entry.Time.Format("2006-01-02 15:04:05")
|
||||||
|
message := strings.TrimRight(entry.Message, "\r\n")
|
||||||
|
formatted := fmt.Sprintf("[%s] [%s] [%s:%d] %s\n", timestamp, entry.Level, filepath.Base(entry.Caller.File), entry.Caller.Line, message)
|
||||||
|
buffer.WriteString(formatted)
|
||||||
|
|
||||||
|
return buffer.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupBaseLogger configures the shared logrus instance and Gin writers.
|
||||||
|
// It is safe to call multiple times; initialization happens only once.
|
||||||
|
func SetupBaseLogger() {
|
||||||
|
setupOnce.Do(func() {
|
||||||
|
log.SetOutput(os.Stdout)
|
||||||
|
log.SetReportCaller(true)
|
||||||
|
log.SetFormatter(&LogFormatter{})
|
||||||
|
|
||||||
|
ginInfoWriter = log.StandardLogger().Writer()
|
||||||
|
gin.DefaultWriter = ginInfoWriter
|
||||||
|
ginErrorWriter = log.StandardLogger().WriterLevel(log.ErrorLevel)
|
||||||
|
gin.DefaultErrorWriter = ginErrorWriter
|
||||||
|
gin.DebugPrintFunc = func(format string, values ...interface{}) {
|
||||||
|
format = strings.TrimRight(format, "\r\n")
|
||||||
|
log.StandardLogger().Infof(format, values...)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.RegisterExitHandler(closeLogOutputs)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigureLogOutput switches the global log destination between rotating files and stdout.
|
||||||
|
func ConfigureLogOutput(loggingToFile bool) error {
|
||||||
|
SetupBaseLogger()
|
||||||
|
|
||||||
|
writerMu.Lock()
|
||||||
|
defer writerMu.Unlock()
|
||||||
|
|
||||||
|
if loggingToFile {
|
||||||
|
const logDir = "logs"
|
||||||
|
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
||||||
|
return fmt.Errorf("logging: failed to create log directory: %w", err)
|
||||||
|
}
|
||||||
|
if logWriter != nil {
|
||||||
|
_ = logWriter.Close()
|
||||||
|
}
|
||||||
|
logWriter = &lumberjack.Logger{
|
||||||
|
Filename: filepath.Join(logDir, "main.log"),
|
||||||
|
MaxSize: 10,
|
||||||
|
MaxBackups: 0,
|
||||||
|
MaxAge: 0,
|
||||||
|
Compress: false,
|
||||||
|
}
|
||||||
|
log.SetOutput(logWriter)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if logWriter != nil {
|
||||||
|
_ = logWriter.Close()
|
||||||
|
logWriter = nil
|
||||||
|
}
|
||||||
|
log.SetOutput(os.Stdout)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func closeLogOutputs() {
|
||||||
|
writerMu.Lock()
|
||||||
|
defer writerMu.Unlock()
|
||||||
|
|
||||||
|
if logWriter != nil {
|
||||||
|
_ = logWriter.Close()
|
||||||
|
logWriter = nil
|
||||||
|
}
|
||||||
|
if ginInfoWriter != nil {
|
||||||
|
_ = ginInfoWriter.Close()
|
||||||
|
ginInfoWriter = nil
|
||||||
|
}
|
||||||
|
if ginErrorWriter != nil {
|
||||||
|
_ = ginErrorWriter.Close()
|
||||||
|
ginErrorWriter = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
256
internal/managementasset/updater.go
Normal file
256
internal/managementasset/updater.go
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
package managementasset
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
|
sdkconfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
managementReleaseURL = "https://api.github.com/repos/router-for-me/Cli-Proxy-API-Management-Center/releases/latest"
|
||||||
|
managementAssetName = "management.html"
|
||||||
|
httpUserAgent = "CLIProxyAPI-management-updater"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ManagementFileName exposes the control panel asset filename.
|
||||||
|
const ManagementFileName = managementAssetName
|
||||||
|
|
||||||
|
func newHTTPClient(proxyURL string) *http.Client {
|
||||||
|
client := &http.Client{Timeout: 15 * time.Second}
|
||||||
|
|
||||||
|
sdkCfg := &sdkconfig.SDKConfig{ProxyURL: strings.TrimSpace(proxyURL)}
|
||||||
|
util.SetProxy(sdkCfg, client)
|
||||||
|
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
type releaseAsset struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
BrowserDownloadURL string `json:"browser_download_url"`
|
||||||
|
Digest string `json:"digest"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type releaseResponse struct {
|
||||||
|
Assets []releaseAsset `json:"assets"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// StaticDir resolves the directory that stores the management control panel asset.
|
||||||
|
func StaticDir(configFilePath string) string {
|
||||||
|
configFilePath = strings.TrimSpace(configFilePath)
|
||||||
|
if configFilePath == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
base := filepath.Dir(configFilePath)
|
||||||
|
return filepath.Join(base, "static")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilePath resolves the absolute path to the management control panel asset.
|
||||||
|
func FilePath(configFilePath string) string {
|
||||||
|
dir := StaticDir(configFilePath)
|
||||||
|
if dir == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return filepath.Join(dir, ManagementFileName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureLatestManagementHTML checks the latest management.html asset and updates the local copy when needed.
|
||||||
|
// The function is designed to run in a background goroutine and will never panic.
|
||||||
|
func EnsureLatestManagementHTML(ctx context.Context, staticDir string, proxyURL string) {
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
|
||||||
|
staticDir = strings.TrimSpace(staticDir)
|
||||||
|
if staticDir == "" {
|
||||||
|
log.Debug("management asset sync skipped: empty static directory")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(staticDir, 0o755); err != nil {
|
||||||
|
log.WithError(err).Warn("failed to prepare static directory for management asset")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client := newHTTPClient(proxyURL)
|
||||||
|
|
||||||
|
localPath := filepath.Join(staticDir, managementAssetName)
|
||||||
|
localHash, err := fileSHA256(localPath)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
log.WithError(err).Debug("failed to read local management asset hash")
|
||||||
|
}
|
||||||
|
localHash = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
asset, remoteHash, err := fetchLatestAsset(ctx, client)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Warn("failed to fetch latest management release information")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if remoteHash != "" && localHash != "" && strings.EqualFold(remoteHash, localHash) {
|
||||||
|
log.Debug("management asset is already up to date")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data, downloadedHash, err := downloadAsset(ctx, client, asset.BrowserDownloadURL)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Warn("failed to download management asset")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if remoteHash != "" && !strings.EqualFold(remoteHash, downloadedHash) {
|
||||||
|
log.Warnf("remote digest mismatch for management asset: expected %s got %s", remoteHash, downloadedHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = atomicWriteFile(localPath, data); err != nil {
|
||||||
|
log.WithError(err).Warn("failed to update management asset on disk")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("management asset updated successfully (hash=%s)", downloadedHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchLatestAsset(ctx context.Context, client *http.Client) (*releaseAsset, string, error) {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, managementReleaseURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("create release request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Accept", "application/vnd.github+json")
|
||||||
|
req.Header.Set("User-Agent", httpUserAgent)
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("execute release request: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||||
|
return nil, "", fmt.Errorf("unexpected release status %d: %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var release releaseResponse
|
||||||
|
if err = json.NewDecoder(resp.Body).Decode(&release); err != nil {
|
||||||
|
return nil, "", fmt.Errorf("decode release response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range release.Assets {
|
||||||
|
asset := &release.Assets[i]
|
||||||
|
if strings.EqualFold(asset.Name, managementAssetName) {
|
||||||
|
remoteHash := parseDigest(asset.Digest)
|
||||||
|
return asset, remoteHash, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, "", fmt.Errorf("management asset %s not found in latest release", managementAssetName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func downloadAsset(ctx context.Context, client *http.Client, downloadURL string) ([]byte, string, error) {
|
||||||
|
if strings.TrimSpace(downloadURL) == "" {
|
||||||
|
return nil, "", fmt.Errorf("empty download url")
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("create download request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", httpUserAgent)
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("execute download request: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||||
|
return nil, "", fmt.Errorf("unexpected download status %d: %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("read download body: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sum := sha256.Sum256(data)
|
||||||
|
return data, hex.EncodeToString(sum[:]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileSHA256(path string) (string, error) {
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = file.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
if _, err = io.Copy(h, file); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func atomicWriteFile(path string, data []byte) error {
|
||||||
|
tmpFile, err := os.CreateTemp(filepath.Dir(path), "management-*.html")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpName := tmpFile.Name()
|
||||||
|
defer func() {
|
||||||
|
_ = tmpFile.Close()
|
||||||
|
_ = os.Remove(tmpName)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err = tmpFile.Write(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tmpFile.Chmod(0o644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tmpFile.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = os.Rename(tmpName, path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDigest(digest string) string {
|
||||||
|
digest = strings.TrimSpace(digest)
|
||||||
|
if digest == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx := strings.Index(digest, ":"); idx >= 0 {
|
||||||
|
digest = digest[idx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.ToLower(strings.TrimSpace(digest))
|
||||||
|
}
|
||||||
File diff suppressed because one or more lines are too long
@@ -1,13 +1,15 @@
|
|||||||
package misc
|
package misc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var credentialSeparator = strings.Repeat("-", 70)
|
// Separator used to visually group related log lines.
|
||||||
|
var credentialSeparator = strings.Repeat("-", 67)
|
||||||
|
|
||||||
// LogSavingCredentials emits a consistent log message when persisting auth material.
|
// LogSavingCredentials emits a consistent log message when persisting auth material.
|
||||||
func LogSavingCredentials(path string) {
|
func LogSavingCredentials(path string) {
|
||||||
@@ -15,10 +17,10 @@ func LogSavingCredentials(path string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Use filepath.Clean so logs remain stable even if callers pass redundant separators.
|
// Use filepath.Clean so logs remain stable even if callers pass redundant separators.
|
||||||
log.Infof("Saving credentials to %s", filepath.Clean(path))
|
fmt.Printf("Saving credentials to %s\n", filepath.Clean(path))
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogCredentialSeparator adds a visual separator to group auth/key processing logs.
|
// LogCredentialSeparator adds a visual separator to group auth/key processing logs.
|
||||||
func LogCredentialSeparator() {
|
func LogCredentialSeparator() {
|
||||||
log.Info(credentialSeparator)
|
log.Debug(credentialSeparator)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,8 +9,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/http/cookiejar"
|
"net/http/cookiejar"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -126,19 +124,6 @@ func getAccessToken(baseCookies map[string]string, proxy string, verbose bool, i
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheDir := "temp"
|
|
||||||
_ = os.MkdirAll(cacheDir, 0o755)
|
|
||||||
if v1, ok1 := baseCookies["__Secure-1PSID"]; ok1 {
|
|
||||||
cacheFile := filepath.Join(cacheDir, ".cached_1psidts_"+v1+".txt")
|
|
||||||
if b, err := os.ReadFile(cacheFile); err == nil {
|
|
||||||
cv := strings.TrimSpace(string(b))
|
|
||||||
if cv != "" {
|
|
||||||
merged := map[string]string{"__Secure-1PSID": v1, "__Secure-1PSIDTS": cv}
|
|
||||||
trySets = append(trySets, merged)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(extraCookies) > 0 {
|
if len(extraCookies) > 0 {
|
||||||
trySets = append(trySets, extraCookies)
|
trySets = append(trySets, extraCookies)
|
||||||
}
|
}
|
||||||
@@ -162,7 +147,7 @@ func getAccessToken(baseCookies map[string]string, proxy string, verbose bool, i
|
|||||||
if len(matches) >= 2 {
|
if len(matches) >= 2 {
|
||||||
token := matches[1]
|
token := matches[1]
|
||||||
if verbose {
|
if verbose {
|
||||||
log.Infof("Gemini access token acquired.")
|
fmt.Println("Gemini access token acquired.")
|
||||||
}
|
}
|
||||||
return token, mergedCookies, nil
|
return token, mergedCookies, nil
|
||||||
}
|
}
|
||||||
@@ -240,7 +225,7 @@ func MaskToken28(s string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var NanoBananaModel = map[string]struct{}{
|
var NanoBananaModel = map[string]struct{}{
|
||||||
"gemini-2.5-flash-image-preview": {},
|
"gemini-2.5-flash-image-web": {},
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGeminiClient creates a client. Pass empty strings to auto-detect via browser cookies (not implemented in Go port).
|
// NewGeminiClient creates a client. Pass empty strings to auto-detect via browser cookies (not implemented in Go port).
|
||||||
@@ -295,7 +280,7 @@ func (c *GeminiClient) Init(timeoutSec float64, verbose bool) error {
|
|||||||
|
|
||||||
c.Timeout = time.Duration(timeoutSec * float64(time.Second))
|
c.Timeout = time.Duration(timeoutSec * float64(time.Second))
|
||||||
if verbose {
|
if verbose {
|
||||||
log.Infof("Gemini client initialized successfully.")
|
fmt.Println("Gemini client initialized successfully.")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -307,7 +292,7 @@ func (c *GeminiClient) Close(delaySec float64) {
|
|||||||
c.Running = false
|
c.Running = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensureRunning mirrors the Python decorator behavior and retries on APIError.
|
// ensureRunning mirrors the decorator behavior and retries on APIError.
|
||||||
func (c *GeminiClient) ensureRunning() error {
|
func (c *GeminiClient) ensureRunning() error {
|
||||||
if c.Running {
|
if c.Running {
|
||||||
return nil
|
return nil
|
||||||
@@ -395,6 +380,15 @@ func (c *GeminiClient) generateOnce(prompt string, files []string, model Model,
|
|||||||
}
|
}
|
||||||
|
|
||||||
inner := []any{item0, nil, item2}
|
inner := []any{item0, nil, item2}
|
||||||
|
// Attach Gem first to keep index alignment with reference implementation
|
||||||
|
// so the Gemini Web UI can recognize the selected Gem.
|
||||||
|
if gem != nil {
|
||||||
|
// pad with 16 nils then gem ID
|
||||||
|
for i := 0; i < 16; i++ {
|
||||||
|
inner = append(inner, nil)
|
||||||
|
}
|
||||||
|
inner = append(inner, gem.ID)
|
||||||
|
}
|
||||||
requestedModel := strings.ToLower(model.Name)
|
requestedModel := strings.ToLower(model.Name)
|
||||||
if chat != nil && chat.RequestedModel() != "" {
|
if chat != nil && chat.RequestedModel() != "" {
|
||||||
requestedModel = chat.RequestedModel()
|
requestedModel = chat.RequestedModel()
|
||||||
@@ -403,13 +397,6 @@ func (c *GeminiClient) generateOnce(prompt string, files []string, model Model,
|
|||||||
inner = ensureAnyLen(inner, 49)
|
inner = ensureAnyLen(inner, 49)
|
||||||
inner[49] = 14
|
inner[49] = 14
|
||||||
}
|
}
|
||||||
if gem != nil {
|
|
||||||
// pad with 16 nils then gem ID
|
|
||||||
for i := 0; i < 16; i++ {
|
|
||||||
inner = append(inner, nil)
|
|
||||||
}
|
|
||||||
inner = append(inner, gem.ID)
|
|
||||||
}
|
|
||||||
innerJSON, _ := json.Marshal(inner)
|
innerJSON, _ := json.Marshal(inner)
|
||||||
outer := []any{nil, string(innerJSON)}
|
outer := []any{nil, string(innerJSON)}
|
||||||
outerJSON, _ := json.Marshal(outer)
|
outerJSON, _ := json.Marshal(outer)
|
||||||
@@ -434,7 +421,7 @@ func (c *GeminiClient) generateOnce(prompt string, files []string, model Model,
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if resp.StatusCode == 429 {
|
if resp.StatusCode == 429 {
|
||||||
// Surface 429 as TemporarilyBlocked to match Python behavior
|
// Surface 429 as TemporarilyBlocked to match reference behavior
|
||||||
c.Close(0)
|
c.Close(0)
|
||||||
return empty, &TemporarilyBlocked{GeminiError{Msg: "Too many requests. IP temporarily blocked."}}
|
return empty, &TemporarilyBlocked{GeminiError{Msg: "Too many requests. IP temporarily blocked."}}
|
||||||
}
|
}
|
||||||
@@ -514,7 +501,7 @@ func (c *GeminiClient) generateOnce(prompt string, files []string, model Model,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Parse nested error code to align with Python mapping
|
// Parse nested error code to align with error mapping
|
||||||
var top []any
|
var top []any
|
||||||
// Prefer lastTop from fallback scan; otherwise try parts[2]
|
// Prefer lastTop from fallback scan; otherwise try parts[2]
|
||||||
if len(lastTop) > 0 {
|
if len(lastTop) > 0 {
|
||||||
@@ -537,7 +524,7 @@ func (c *GeminiClient) generateOnce(prompt string, files []string, model Model,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Debug("Invalid response: control frames only; no body found")
|
// Debug("Invalid response: control frames only; no body found")
|
||||||
// Close the client to force re-initialization on next request (parity with Python client behavior)
|
// Close the client to force re-initialization on next request (parity with reference client behavior)
|
||||||
c.Close(0)
|
c.Close(0)
|
||||||
return empty, &APIError{Msg: "Failed to generate contents. Invalid response data received."}
|
return empty, &APIError{Msg: "Failed to generate contents. Invalid response data received."}
|
||||||
}
|
}
|
||||||
@@ -760,7 +747,7 @@ func (c *GeminiClient) generateOnce(prompt string, files []string, model Model,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// extractErrorCode attempts to navigate the known nested error structure and fetch the integer code.
|
// extractErrorCode attempts to navigate the known nested error structure and fetch the integer code.
|
||||||
// Mirrors Python path: response_json[0][5][2][0][1][0]
|
// Mirrors reference path: response_json[0][5][2][0][1][0]
|
||||||
func extractErrorCode(top []any) (int, bool) {
|
func extractErrorCode(top []any) (int, bool) {
|
||||||
if len(top) == 0 {
|
if len(top) == 0 {
|
||||||
return 0, false
|
return 0, false
|
||||||
|
|||||||
80
internal/provider/gemini-web/conversation/alias.go
Normal file
80
internal/provider/gemini-web/conversation/alias.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
package conversation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
aliasOnce sync.Once
|
||||||
|
aliasMap map[string]string
|
||||||
|
)
|
||||||
|
|
||||||
|
// EnsureGeminiWebAliasMap populates the alias map once.
|
||||||
|
func EnsureGeminiWebAliasMap() {
|
||||||
|
aliasOnce.Do(func() {
|
||||||
|
aliasMap = make(map[string]string)
|
||||||
|
for _, m := range registry.GetGeminiModels() {
|
||||||
|
if m.ID == "gemini-2.5-flash-lite" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if m.ID == "gemini-2.5-flash" {
|
||||||
|
aliasMap["gemini-2.5-flash-image-web"] = "gemini-2.5-flash"
|
||||||
|
}
|
||||||
|
alias := AliasFromModelID(m.ID)
|
||||||
|
aliasMap[strings.ToLower(alias)] = strings.ToLower(m.ID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapAliasToUnderlying normalizes a model alias to its underlying identifier.
|
||||||
|
func MapAliasToUnderlying(name string) string {
|
||||||
|
EnsureGeminiWebAliasMap()
|
||||||
|
n := strings.ToLower(strings.TrimSpace(name))
|
||||||
|
if n == "" {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
if u, ok := aliasMap[n]; ok {
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
const suffix = "-web"
|
||||||
|
if strings.HasSuffix(n, suffix) {
|
||||||
|
return strings.TrimSuffix(n, suffix)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// AliasFromModelID mirrors the original helper for deriving alias IDs.
|
||||||
|
func AliasFromModelID(modelID string) string {
|
||||||
|
return modelID + "-web"
|
||||||
|
}
|
||||||
|
|
||||||
|
// NormalizeModel returns the canonical identifier used for hashing.
|
||||||
|
func NormalizeModel(model string) string {
|
||||||
|
return MapAliasToUnderlying(model)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetGeminiWebAliasedModels returns alias metadata for registry exposure.
|
||||||
|
func GetGeminiWebAliasedModels() []*registry.ModelInfo {
|
||||||
|
EnsureGeminiWebAliasMap()
|
||||||
|
aliased := make([]*registry.ModelInfo, 0)
|
||||||
|
for _, m := range registry.GetGeminiModels() {
|
||||||
|
if m.ID == "gemini-2.5-flash-lite" {
|
||||||
|
continue
|
||||||
|
} else if m.ID == "gemini-2.5-flash" {
|
||||||
|
cpy := *m
|
||||||
|
cpy.ID = "gemini-2.5-flash-image-web"
|
||||||
|
cpy.Name = "gemini-2.5-flash-image-web"
|
||||||
|
cpy.DisplayName = "Nano Banana"
|
||||||
|
cpy.Description = "Gemini 2.5 Flash Preview Image"
|
||||||
|
aliased = append(aliased, &cpy)
|
||||||
|
}
|
||||||
|
cpy := *m
|
||||||
|
cpy.ID = AliasFromModelID(m.ID)
|
||||||
|
cpy.Name = cpy.ID
|
||||||
|
aliased = append(aliased, &cpy)
|
||||||
|
}
|
||||||
|
return aliased
|
||||||
|
}
|
||||||
74
internal/provider/gemini-web/conversation/hash.go
Normal file
74
internal/provider/gemini-web/conversation/hash.go
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
package conversation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Message represents a minimal role-text pair used for hashing and comparison.
|
||||||
|
type Message struct {
|
||||||
|
Role string `json:"role"`
|
||||||
|
Text string `json:"text"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoredMessage mirrors the persisted conversation message structure.
|
||||||
|
type StoredMessage struct {
|
||||||
|
Role string `json:"role"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sha256Hex computes SHA-256 hex digest for the specified string.
|
||||||
|
func Sha256Hex(s string) string {
|
||||||
|
sum := sha256.Sum256([]byte(s))
|
||||||
|
return hex.EncodeToString(sum[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToStoredMessages converts in-memory messages into the persisted representation.
|
||||||
|
func ToStoredMessages(msgs []Message) []StoredMessage {
|
||||||
|
out := make([]StoredMessage, 0, len(msgs))
|
||||||
|
for _, m := range msgs {
|
||||||
|
out = append(out, StoredMessage{Role: m.Role, Content: m.Text})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoredToMessages converts stored messages back into the in-memory representation.
|
||||||
|
func StoredToMessages(msgs []StoredMessage) []Message {
|
||||||
|
out := make([]Message, 0, len(msgs))
|
||||||
|
for _, m := range msgs {
|
||||||
|
out = append(out, Message{Role: m.Role, Text: m.Content})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashMessage normalizes message data and returns a stable digest.
|
||||||
|
func hashMessage(m StoredMessage) string {
|
||||||
|
s := fmt.Sprintf(`{"content":%q,"role":%q}`, m.Content, strings.ToLower(m.Role))
|
||||||
|
return Sha256Hex(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashConversationWithPrefix computes a conversation hash using the provided prefix (client identifier) and model.
|
||||||
|
func HashConversationWithPrefix(prefix, model string, msgs []StoredMessage) string {
|
||||||
|
var b strings.Builder
|
||||||
|
b.WriteString(strings.ToLower(strings.TrimSpace(prefix)))
|
||||||
|
b.WriteString("|")
|
||||||
|
b.WriteString(strings.ToLower(strings.TrimSpace(model)))
|
||||||
|
for _, m := range msgs {
|
||||||
|
b.WriteString("|")
|
||||||
|
b.WriteString(hashMessage(m))
|
||||||
|
}
|
||||||
|
return Sha256Hex(b.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashConversationForAccount keeps compatibility with the per-account hash previously used.
|
||||||
|
func HashConversationForAccount(clientID, model string, msgs []StoredMessage) string {
|
||||||
|
return HashConversationWithPrefix(clientID, model, msgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashConversationGlobal produces a hash suitable for cross-account lookups.
|
||||||
|
func HashConversationGlobal(model string, msgs []StoredMessage) string {
|
||||||
|
return HashConversationWithPrefix("global", model, msgs)
|
||||||
|
}
|
||||||
280
internal/provider/gemini-web/conversation/index.go
Normal file
280
internal/provider/gemini-web/conversation/index.go
Normal file
@@ -0,0 +1,280 @@
|
|||||||
|
package conversation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
bolt "go.etcd.io/bbolt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bucketMatches = "matches"
|
||||||
|
defaultIndexFile = "gemini-web-index.bolt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MatchRecord stores persisted mapping metadata for a conversation prefix.
|
||||||
|
type MatchRecord struct {
|
||||||
|
AccountLabel string `json:"account_label"`
|
||||||
|
Metadata []string `json:"metadata,omitempty"`
|
||||||
|
PrefixLen int `json:"prefix_len"`
|
||||||
|
UpdatedAt int64 `json:"updated_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchResult combines a persisted record with the hash that produced it.
|
||||||
|
type MatchResult struct {
|
||||||
|
Hash string
|
||||||
|
Record MatchRecord
|
||||||
|
Model string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
indexOnce sync.Once
|
||||||
|
indexDB *bolt.DB
|
||||||
|
indexErr error
|
||||||
|
)
|
||||||
|
|
||||||
|
func openIndex() (*bolt.DB, error) {
|
||||||
|
indexOnce.Do(func() {
|
||||||
|
path := indexPath()
|
||||||
|
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||||
|
indexErr = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
db, err := bolt.Open(path, 0o600, &bolt.Options{Timeout: 2 * time.Second})
|
||||||
|
if err != nil {
|
||||||
|
indexErr = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
indexDB = db
|
||||||
|
})
|
||||||
|
return indexDB, indexErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func indexPath() string {
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil || wd == "" {
|
||||||
|
wd = "."
|
||||||
|
}
|
||||||
|
return filepath.Join(wd, "conv", defaultIndexFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreMatch persists or updates a conversation hash mapping.
|
||||||
|
func StoreMatch(hash string, record MatchRecord) error {
|
||||||
|
if strings.TrimSpace(hash) == "" {
|
||||||
|
return errors.New("gemini-web conversation: empty hash")
|
||||||
|
}
|
||||||
|
db, err := openIndex()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
record.UpdatedAt = time.Now().UTC().Unix()
|
||||||
|
payload, err := json.Marshal(record)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return db.Update(func(tx *bolt.Tx) error {
|
||||||
|
bucket, err := tx.CreateBucketIfNotExists([]byte(bucketMatches))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Namespace by account label to avoid cross-account collisions.
|
||||||
|
label := strings.ToLower(strings.TrimSpace(record.AccountLabel))
|
||||||
|
if label == "" {
|
||||||
|
return errors.New("gemini-web conversation: empty account label")
|
||||||
|
}
|
||||||
|
key := []byte(hash + ":" + label)
|
||||||
|
if err := bucket.Put(key, payload); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Best-effort cleanup of legacy single-key format (hash -> MatchRecord).
|
||||||
|
// We do not know its label; leave it for lookup fallback/cleanup elsewhere.
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupMatch retrieves a stored mapping.
|
||||||
|
// It prefers namespaced entries (hash:label). If multiple labels exist for the same
|
||||||
|
// hash, it returns not found to avoid redirecting to the wrong credential.
|
||||||
|
// Falls back to legacy single-key entries if present.
|
||||||
|
func LookupMatch(hash string) (MatchRecord, bool, error) {
|
||||||
|
db, err := openIndex()
|
||||||
|
if err != nil {
|
||||||
|
return MatchRecord{}, false, err
|
||||||
|
}
|
||||||
|
var foundOne bool
|
||||||
|
var ambiguous bool
|
||||||
|
var firstLabel string
|
||||||
|
var single MatchRecord
|
||||||
|
err = db.View(func(tx *bolt.Tx) error {
|
||||||
|
bucket := tx.Bucket([]byte(bucketMatches))
|
||||||
|
if bucket == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Scan namespaced keys with prefix "hash:"
|
||||||
|
prefix := []byte(hash + ":")
|
||||||
|
c := bucket.Cursor()
|
||||||
|
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||||
|
if len(v) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var rec MatchRecord
|
||||||
|
if err := json.Unmarshal(v, &rec); err != nil {
|
||||||
|
// Ignore malformed; removal is handled elsewhere.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(rec.AccountLabel) == "" || rec.PrefixLen <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
label := strings.ToLower(strings.TrimSpace(rec.AccountLabel))
|
||||||
|
if !foundOne {
|
||||||
|
firstLabel = label
|
||||||
|
single = rec
|
||||||
|
foundOne = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if label != firstLabel {
|
||||||
|
ambiguous = true
|
||||||
|
// Early exit scan; ambiguity detected.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if foundOne {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Fallback to legacy single-key format
|
||||||
|
raw := bucket.Get([]byte(hash))
|
||||||
|
if len(raw) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return json.Unmarshal(raw, &single)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return MatchRecord{}, false, err
|
||||||
|
}
|
||||||
|
if ambiguous {
|
||||||
|
return MatchRecord{}, false, nil
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(single.AccountLabel) == "" || single.PrefixLen <= 0 {
|
||||||
|
return MatchRecord{}, false, nil
|
||||||
|
}
|
||||||
|
return single, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveMatch deletes all mappings for the given hash (all labels and legacy key).
|
||||||
|
func RemoveMatch(hash string) error {
|
||||||
|
db, err := openIndex()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return db.Update(func(tx *bolt.Tx) error {
|
||||||
|
bucket := tx.Bucket([]byte(bucketMatches))
|
||||||
|
if bucket == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Delete namespaced entries
|
||||||
|
prefix := []byte(hash + ":")
|
||||||
|
c := bucket.Cursor()
|
||||||
|
for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Next() {
|
||||||
|
if err := bucket.Delete(k); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete legacy entry
|
||||||
|
_ = bucket.Delete([]byte(hash))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveMatchForLabel deletes the mapping for the given hash and label only.
|
||||||
|
func RemoveMatchForLabel(hash, label string) error {
|
||||||
|
label = strings.ToLower(strings.TrimSpace(label))
|
||||||
|
if strings.TrimSpace(hash) == "" || label == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
db, err := openIndex()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return db.Update(func(tx *bolt.Tx) error {
|
||||||
|
bucket := tx.Bucket([]byte(bucketMatches))
|
||||||
|
if bucket == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Remove namespaced key
|
||||||
|
_ = bucket.Delete([]byte(hash + ":" + label))
|
||||||
|
// If legacy single-key exists and matches label, remove it as well.
|
||||||
|
if raw := bucket.Get([]byte(hash)); len(raw) > 0 {
|
||||||
|
var rec MatchRecord
|
||||||
|
if err := json.Unmarshal(raw, &rec); err == nil {
|
||||||
|
if strings.EqualFold(strings.TrimSpace(rec.AccountLabel), label) {
|
||||||
|
_ = bucket.Delete([]byte(hash))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveMatchesByLabel removes all entries associated with the specified label.
|
||||||
|
func RemoveMatchesByLabel(label string) error {
|
||||||
|
label = strings.TrimSpace(label)
|
||||||
|
if label == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
db, err := openIndex()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return db.Update(func(tx *bolt.Tx) error {
|
||||||
|
bucket := tx.Bucket([]byte(bucketMatches))
|
||||||
|
if bucket == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cursor := bucket.Cursor()
|
||||||
|
for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
|
||||||
|
if len(v) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var record MatchRecord
|
||||||
|
if err := json.Unmarshal(v, &record); err != nil {
|
||||||
|
_ = bucket.Delete(k)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.EqualFold(strings.TrimSpace(record.AccountLabel), label) {
|
||||||
|
if err := bucket.Delete(k); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreConversation updates all hashes representing the provided conversation snapshot.
|
||||||
|
func StoreConversation(label, model string, msgs []Message, metadata []string) error {
|
||||||
|
label = strings.TrimSpace(label)
|
||||||
|
if label == "" || len(msgs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
hashes := BuildStorageHashes(model, msgs)
|
||||||
|
if len(hashes) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, h := range hashes {
|
||||||
|
rec := MatchRecord{
|
||||||
|
AccountLabel: label,
|
||||||
|
Metadata: append([]string(nil), metadata...),
|
||||||
|
PrefixLen: h.PrefixLen,
|
||||||
|
}
|
||||||
|
if err := StoreMatch(h.Hash, rec); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
64
internal/provider/gemini-web/conversation/lookup.go
Normal file
64
internal/provider/gemini-web/conversation/lookup.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package conversation
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
// PrefixHash represents a hash candidate for a specific prefix length.
|
||||||
|
type PrefixHash struct {
|
||||||
|
Hash string
|
||||||
|
PrefixLen int
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildLookupHashes generates hash candidates ordered from longest to shortest prefix.
|
||||||
|
func BuildLookupHashes(model string, msgs []Message) []PrefixHash {
|
||||||
|
if len(msgs) < 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
model = NormalizeModel(model)
|
||||||
|
sanitized := SanitizeAssistantMessages(msgs)
|
||||||
|
result := make([]PrefixHash, 0, len(sanitized))
|
||||||
|
for end := len(sanitized); end >= 2; end-- {
|
||||||
|
tailRole := strings.ToLower(strings.TrimSpace(sanitized[end-1].Role))
|
||||||
|
if tailRole != "assistant" && tailRole != "system" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
prefix := sanitized[:end]
|
||||||
|
hash := HashConversationGlobal(model, ToStoredMessages(prefix))
|
||||||
|
result = append(result, PrefixHash{Hash: hash, PrefixLen: end})
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildStorageHashes returns hashes representing the full conversation snapshot.
|
||||||
|
func BuildStorageHashes(model string, msgs []Message) []PrefixHash {
|
||||||
|
if len(msgs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
model = NormalizeModel(model)
|
||||||
|
sanitized := SanitizeAssistantMessages(msgs)
|
||||||
|
if len(sanitized) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
result := make([]PrefixHash, 0, len(sanitized))
|
||||||
|
seen := make(map[string]struct{}, len(sanitized))
|
||||||
|
for start := 0; start < len(sanitized); start++ {
|
||||||
|
segment := sanitized[start:]
|
||||||
|
if len(segment) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tailRole := strings.ToLower(strings.TrimSpace(segment[len(segment)-1].Role))
|
||||||
|
if tailRole != "assistant" && tailRole != "system" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hash := HashConversationGlobal(model, ToStoredMessages(segment))
|
||||||
|
if _, exists := seen[hash]; exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[hash] = struct{}{}
|
||||||
|
result = append(result, PrefixHash{Hash: hash, PrefixLen: len(segment)})
|
||||||
|
}
|
||||||
|
if len(result) == 0 {
|
||||||
|
hash := HashConversationGlobal(model, ToStoredMessages(sanitized))
|
||||||
|
return []PrefixHash{{Hash: hash, PrefixLen: len(sanitized)}}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
6
internal/provider/gemini-web/conversation/metadata.go
Normal file
6
internal/provider/gemini-web/conversation/metadata.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
package conversation
|
||||||
|
|
||||||
|
const (
|
||||||
|
MetadataMessagesKey = "gemini_web_messages"
|
||||||
|
MetadataMatchKey = "gemini_web_match"
|
||||||
|
)
|
||||||
110
internal/provider/gemini-web/conversation/parse.go
Normal file
110
internal/provider/gemini-web/conversation/parse.go
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
package conversation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/tidwall/gjson"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ExtractMessages attempts to build a message list from the inbound request payload.
|
||||||
|
func ExtractMessages(handlerType string, raw []byte) []Message {
|
||||||
|
if len(raw) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if msgs := extractOpenAIStyle(raw); len(msgs) > 0 {
|
||||||
|
return msgs
|
||||||
|
}
|
||||||
|
if msgs := extractGeminiContents(raw); len(msgs) > 0 {
|
||||||
|
return msgs
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractOpenAIStyle(raw []byte) []Message {
|
||||||
|
root := gjson.ParseBytes(raw)
|
||||||
|
messages := root.Get("messages")
|
||||||
|
if !messages.Exists() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make([]Message, 0, 8)
|
||||||
|
messages.ForEach(func(_, entry gjson.Result) bool {
|
||||||
|
role := strings.ToLower(strings.TrimSpace(entry.Get("role").String()))
|
||||||
|
if role == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if role == "system" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Ignore OpenAI tool messages to keep hashing aligned with
|
||||||
|
// persistence (which only keeps text/inlineData for Gemini contents).
|
||||||
|
// This avoids mismatches when a tool response is present: the
|
||||||
|
// storage path drops tool payloads while the lookup path would
|
||||||
|
// otherwise include them, causing sticky selection to fail.
|
||||||
|
if role == "tool" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var contentBuilder strings.Builder
|
||||||
|
content := entry.Get("content")
|
||||||
|
if !content.Exists() {
|
||||||
|
out = append(out, Message{Role: role, Text: ""})
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
switch content.Type {
|
||||||
|
case gjson.String:
|
||||||
|
contentBuilder.WriteString(content.String())
|
||||||
|
case gjson.JSON:
|
||||||
|
if content.IsArray() {
|
||||||
|
content.ForEach(func(_, part gjson.Result) bool {
|
||||||
|
if text := part.Get("text"); text.Exists() {
|
||||||
|
if contentBuilder.Len() > 0 {
|
||||||
|
contentBuilder.WriteString("\n")
|
||||||
|
}
|
||||||
|
contentBuilder.WriteString(text.String())
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, Message{Role: role, Text: contentBuilder.String()})
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractGeminiContents(raw []byte) []Message {
|
||||||
|
contents := gjson.GetBytes(raw, "contents")
|
||||||
|
if !contents.Exists() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make([]Message, 0, 8)
|
||||||
|
contents.ForEach(func(_, entry gjson.Result) bool {
|
||||||
|
role := strings.TrimSpace(entry.Get("role").String())
|
||||||
|
if role == "" {
|
||||||
|
role = "user"
|
||||||
|
} else {
|
||||||
|
role = strings.ToLower(role)
|
||||||
|
if role == "model" {
|
||||||
|
role = "assistant"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var builder strings.Builder
|
||||||
|
entry.Get("parts").ForEach(func(_, part gjson.Result) bool {
|
||||||
|
if text := part.Get("text"); text.Exists() {
|
||||||
|
if builder.Len() > 0 {
|
||||||
|
builder.WriteString("\n")
|
||||||
|
}
|
||||||
|
builder.WriteString(text.String())
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
out = append(out, Message{Role: role, Text: builder.String()})
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
39
internal/provider/gemini-web/conversation/sanitize.go
Normal file
39
internal/provider/gemini-web/conversation/sanitize.go
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
package conversation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var reThink = regexp.MustCompile(`(?is)<think>.*?</think>`)
|
||||||
|
|
||||||
|
// RemoveThinkTags strips <think>...</think> blocks and trims whitespace.
|
||||||
|
func RemoveThinkTags(s string) string {
|
||||||
|
return strings.TrimSpace(reThink.ReplaceAllString(s, ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SanitizeAssistantMessages removes think tags from assistant messages while leaving others untouched.
|
||||||
|
func SanitizeAssistantMessages(msgs []Message) []Message {
|
||||||
|
out := make([]Message, 0, len(msgs))
|
||||||
|
for _, m := range msgs {
|
||||||
|
if strings.EqualFold(strings.TrimSpace(m.Role), "assistant") {
|
||||||
|
out = append(out, Message{Role: m.Role, Text: RemoveThinkTags(m.Text)})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, m)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// EqualMessages compares two message slices for equality.
|
||||||
|
func EqualMessages(a, b []Message) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range a {
|
||||||
|
if a[i].Role != b[i].Role || a[i].Text != b[i].Text {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
@@ -52,7 +52,7 @@ func (i Image) Save(path string, filename string, cookies map[string]string, ver
|
|||||||
filename = q[0]
|
filename = q[0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Regex validation (align with Python: ^(.*\.\w+)) to extract name with extension.
|
// Regex validation (pattern: ^(.*\.\w+)) to extract name with extension.
|
||||||
if filename != "" {
|
if filename != "" {
|
||||||
re := regexp.MustCompile(`^(.*\.\w+)`)
|
re := regexp.MustCompile(`^(.*\.\w+)`)
|
||||||
if m := re.FindStringSubmatch(filename); len(m) >= 2 {
|
if m := re.FindStringSubmatch(filename); len(m) >= 2 {
|
||||||
@@ -70,7 +70,7 @@ func (i Image) Save(path string, filename string, cookies map[string]string, ver
|
|||||||
client := newHTTPClient(httpOptions{ProxyURL: i.Proxy, Insecure: insecure, FollowRedirects: true})
|
client := newHTTPClient(httpOptions{ProxyURL: i.Proxy, Insecure: insecure, FollowRedirects: true})
|
||||||
client.Timeout = 120 * time.Second
|
client.Timeout = 120 * time.Second
|
||||||
|
|
||||||
// Helper to set raw Cookie header using provided cookies (to mirror Python client behavior).
|
// Helper to set raw Cookie header using provided cookies (parity with the reference client behavior).
|
||||||
buildCookieHeader := func(m map[string]string) string {
|
buildCookieHeader := func(m map[string]string) string {
|
||||||
if len(m) == 0 {
|
if len(m) == 0 {
|
||||||
return ""
|
return ""
|
||||||
@@ -136,7 +136,7 @@ func (i Image) Save(path string, filename string, cookies map[string]string, ver
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
if verbose {
|
if verbose {
|
||||||
log.Infof("Image saved as %s", dest)
|
fmt.Printf("Image saved as %s\n", dest)
|
||||||
}
|
}
|
||||||
abspath, _ := filepath.Abs(dest)
|
abspath, _ := filepath.Abs(dest)
|
||||||
return abspath, nil
|
return abspath, nil
|
||||||
|
|||||||
@@ -4,10 +4,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"html"
|
"html"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
conversation "github.com/router-for-me/CLIProxyAPI/v6/internal/provider/gemini-web/conversation"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -105,76 +104,20 @@ const (
|
|||||||
ErrorIPTemporarilyBlocked = 1060
|
ErrorIPTemporarilyBlocked = 1060
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
func EnsureGeminiWebAliasMap() { conversation.EnsureGeminiWebAliasMap() }
|
||||||
GeminiWebAliasOnce sync.Once
|
|
||||||
GeminiWebAliasMap map[string]string
|
|
||||||
)
|
|
||||||
|
|
||||||
func EnsureGeminiWebAliasMap() {
|
|
||||||
GeminiWebAliasOnce.Do(func() {
|
|
||||||
GeminiWebAliasMap = make(map[string]string)
|
|
||||||
for _, m := range registry.GetGeminiModels() {
|
|
||||||
if m.ID == "gemini-2.5-flash-lite" {
|
|
||||||
continue
|
|
||||||
} else if m.ID == "gemini-2.5-flash" {
|
|
||||||
GeminiWebAliasMap["gemini-2.5-flash-image-preview"] = "gemini-2.5-flash"
|
|
||||||
}
|
|
||||||
alias := AliasFromModelID(m.ID)
|
|
||||||
GeminiWebAliasMap[strings.ToLower(alias)] = strings.ToLower(m.ID)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetGeminiWebAliasedModels() []*registry.ModelInfo {
|
func GetGeminiWebAliasedModels() []*registry.ModelInfo {
|
||||||
EnsureGeminiWebAliasMap()
|
return conversation.GetGeminiWebAliasedModels()
|
||||||
aliased := make([]*registry.ModelInfo, 0)
|
|
||||||
for _, m := range registry.GetGeminiModels() {
|
|
||||||
if m.ID == "gemini-2.5-flash-lite" {
|
|
||||||
continue
|
|
||||||
} else if m.ID == "gemini-2.5-flash" {
|
|
||||||
cpy := *m
|
|
||||||
cpy.ID = "gemini-2.5-flash-image-preview"
|
|
||||||
cpy.Name = "gemini-2.5-flash-image-preview"
|
|
||||||
cpy.DisplayName = "Nano Banana"
|
|
||||||
cpy.Description = "Gemini 2.5 Flash Preview Image"
|
|
||||||
aliased = append(aliased, &cpy)
|
|
||||||
}
|
|
||||||
cpy := *m
|
|
||||||
cpy.ID = AliasFromModelID(m.ID)
|
|
||||||
cpy.Name = cpy.ID
|
|
||||||
aliased = append(aliased, &cpy)
|
|
||||||
}
|
|
||||||
return aliased
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func MapAliasToUnderlying(name string) string {
|
func MapAliasToUnderlying(name string) string { return conversation.MapAliasToUnderlying(name) }
|
||||||
EnsureGeminiWebAliasMap()
|
|
||||||
n := strings.ToLower(name)
|
|
||||||
if u, ok := GeminiWebAliasMap[n]; ok {
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
const suffix = "-web"
|
|
||||||
if strings.HasSuffix(n, suffix) {
|
|
||||||
return strings.TrimSuffix(n, suffix)
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
func AliasFromModelID(modelID string) string {
|
func AliasFromModelID(modelID string) string { return conversation.AliasFromModelID(modelID) }
|
||||||
return modelID + "-web"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Conversation domain structures -------------------------------------------
|
// Conversation domain structures -------------------------------------------
|
||||||
type RoleText struct {
|
type RoleText = conversation.Message
|
||||||
Role string
|
|
||||||
Text string
|
|
||||||
}
|
|
||||||
|
|
||||||
type StoredMessage struct {
|
type StoredMessage = conversation.StoredMessage
|
||||||
Role string `json:"role"`
|
|
||||||
Content string `json:"content"`
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ConversationRecord struct {
|
type ConversationRecord struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
|
|||||||
@@ -8,11 +8,11 @@ import (
|
|||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
conversation "github.com/router-for-me/CLIProxyAPI/v6/internal/provider/gemini-web/conversation"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
reThink = regexp.MustCompile(`(?s)^\s*<think>.*?</think>\s*`)
|
|
||||||
reXMLAnyTag = regexp.MustCompile(`(?s)<\s*[^>]+>`)
|
reXMLAnyTag = regexp.MustCompile(`(?s)<\s*[^>]+>`)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -77,20 +77,13 @@ func BuildPrompt(msgs []RoleText, tagged bool, appendAssistant bool) string {
|
|||||||
|
|
||||||
// RemoveThinkTags strips <think>...</think> blocks from a string.
|
// RemoveThinkTags strips <think>...</think> blocks from a string.
|
||||||
func RemoveThinkTags(s string) string {
|
func RemoveThinkTags(s string) string {
|
||||||
return strings.TrimSpace(reThink.ReplaceAllString(s, ""))
|
return conversation.RemoveThinkTags(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SanitizeAssistantMessages removes think tags from assistant messages.
|
// SanitizeAssistantMessages removes think tags from assistant messages.
|
||||||
func SanitizeAssistantMessages(msgs []RoleText) []RoleText {
|
func SanitizeAssistantMessages(msgs []RoleText) []RoleText {
|
||||||
out := make([]RoleText, 0, len(msgs))
|
cleaned := conversation.SanitizeAssistantMessages(msgs)
|
||||||
for _, m := range msgs {
|
return cleaned
|
||||||
if strings.ToLower(m.Role) == "assistant" {
|
|
||||||
out = append(out, RoleText{Role: m.Role, Text: RemoveThinkTags(m.Text)})
|
|
||||||
} else {
|
|
||||||
out = append(out, m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendXMLWrapHintIfNeeded appends an XML wrap hint to messages containing XML-like blocks.
|
// AppendXMLWrapHintIfNeeded appends an XML wrap hint to messages containing XML-like blocks.
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ package geminiwebapi
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -19,6 +17,7 @@ import (
|
|||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||||
|
conversation "github.com/router-for-me/CLIProxyAPI/v6/internal/provider/gemini-web/conversation"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/translator/translator"
|
||||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@@ -35,6 +34,7 @@ type GeminiWebState struct {
|
|||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
token *gemini.GeminiWebTokenStorage
|
token *gemini.GeminiWebTokenStorage
|
||||||
storagePath string
|
storagePath string
|
||||||
|
authLabel string
|
||||||
|
|
||||||
stableClientID string
|
stableClientID string
|
||||||
accountID string
|
accountID string
|
||||||
@@ -51,18 +51,28 @@ type GeminiWebState struct {
|
|||||||
convIndex map[string]string
|
convIndex map[string]string
|
||||||
|
|
||||||
lastRefresh time.Time
|
lastRefresh time.Time
|
||||||
|
|
||||||
|
pendingMatchMu sync.Mutex
|
||||||
|
pendingMatch *conversation.MatchResult
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGeminiWebState(cfg *config.Config, token *gemini.GeminiWebTokenStorage, storagePath string) *GeminiWebState {
|
type reuseComputation struct {
|
||||||
|
metadata []string
|
||||||
|
history []RoleText
|
||||||
|
overlap int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewGeminiWebState(cfg *config.Config, token *gemini.GeminiWebTokenStorage, storagePath, authLabel string) *GeminiWebState {
|
||||||
state := &GeminiWebState{
|
state := &GeminiWebState{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
token: token,
|
token: token,
|
||||||
storagePath: storagePath,
|
storagePath: storagePath,
|
||||||
|
authLabel: strings.TrimSpace(authLabel),
|
||||||
convStore: make(map[string][]string),
|
convStore: make(map[string][]string),
|
||||||
convData: make(map[string]ConversationRecord),
|
convData: make(map[string]ConversationRecord),
|
||||||
convIndex: make(map[string]string),
|
convIndex: make(map[string]string),
|
||||||
}
|
}
|
||||||
suffix := Sha256Hex(token.Secure1PSID)
|
suffix := conversation.Sha256Hex(token.Secure1PSID)
|
||||||
if len(suffix) > 16 {
|
if len(suffix) > 16 {
|
||||||
suffix = suffix[:16]
|
suffix = suffix[:16]
|
||||||
}
|
}
|
||||||
@@ -81,6 +91,28 @@ func NewGeminiWebState(cfg *config.Config, token *gemini.GeminiWebTokenStorage,
|
|||||||
return state
|
return state
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GeminiWebState) setPendingMatch(match *conversation.MatchResult) {
|
||||||
|
if s == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.pendingMatchMu.Lock()
|
||||||
|
s.pendingMatch = match
|
||||||
|
s.pendingMatchMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GeminiWebState) consumePendingMatch() *conversation.MatchResult {
|
||||||
|
s.pendingMatchMu.Lock()
|
||||||
|
defer s.pendingMatchMu.Unlock()
|
||||||
|
match := s.pendingMatch
|
||||||
|
s.pendingMatch = nil
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPendingMatch makes a cached conversation match available for the next request.
|
||||||
|
func (s *GeminiWebState) SetPendingMatch(match *conversation.MatchResult) {
|
||||||
|
s.setPendingMatch(match)
|
||||||
|
}
|
||||||
|
|
||||||
// Label returns a stable account label for logging and persistence.
|
// Label returns a stable account label for logging and persistence.
|
||||||
// If a storage file path is known, it uses the file base name (without extension).
|
// If a storage file path is known, it uses the file base name (without extension).
|
||||||
// Otherwise, it falls back to the stable client ID (e.g., "gemini-web-<hash>").
|
// Otherwise, it falls back to the stable client ID (e.g., "gemini-web-<hash>").
|
||||||
@@ -88,6 +120,14 @@ func (s *GeminiWebState) Label() string {
|
|||||||
if s == nil {
|
if s == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
if s.token != nil {
|
||||||
|
if lbl := strings.TrimSpace(s.token.Label); lbl != "" {
|
||||||
|
return lbl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if lbl := strings.TrimSpace(s.authLabel); lbl != "" {
|
||||||
|
return lbl
|
||||||
|
}
|
||||||
if s.storagePath != "" {
|
if s.storagePath != "" {
|
||||||
base := strings.TrimSuffix(filepath.Base(s.storagePath), filepath.Ext(s.storagePath))
|
base := strings.TrimSuffix(filepath.Base(s.storagePath), filepath.Ext(s.storagePath))
|
||||||
if base != "" {
|
if base != "" {
|
||||||
@@ -98,18 +138,18 @@ func (s *GeminiWebState) Label() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *GeminiWebState) loadConversationCaches() {
|
func (s *GeminiWebState) loadConversationCaches() {
|
||||||
if path := s.convPath(); path != "" {
|
path := s.convPath()
|
||||||
|
if path == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
if store, err := LoadConvStore(path); err == nil {
|
if store, err := LoadConvStore(path); err == nil {
|
||||||
s.convStore = store
|
s.convStore = store
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if path := s.convPath(); path != "" {
|
|
||||||
if items, index, err := LoadConvData(path); err == nil {
|
if items, index, err := LoadConvData(path); err == nil {
|
||||||
s.convData = items
|
s.convData = items
|
||||||
s.convIndex = index
|
s.convIndex = index
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// convPath returns the BoltDB file path used for both account metadata and conversation data.
|
// convPath returns the BoltDB file path used for both account metadata and conversation data.
|
||||||
func (s *GeminiWebState) convPath() string {
|
func (s *GeminiWebState) convPath() string {
|
||||||
@@ -121,6 +161,78 @@ func (s *GeminiWebState) convPath() string {
|
|||||||
return ConvBoltPath(base)
|
return ConvBoltPath(base)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cloneRoleTextSlice(in []RoleText) []RoleText {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make([]RoleText, len(in))
|
||||||
|
copy(out, in)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func cloneStringSlice(in []string) []string {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make([]string, len(in))
|
||||||
|
copy(out, in)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func longestHistoryOverlap(history, incoming []RoleText) int {
|
||||||
|
max := len(history)
|
||||||
|
if len(incoming) < max {
|
||||||
|
max = len(incoming)
|
||||||
|
}
|
||||||
|
for overlap := max; overlap > 0; overlap-- {
|
||||||
|
if conversation.EqualMessages(history[len(history)-overlap:], incoming[:overlap]) {
|
||||||
|
return overlap
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalStringSlice(a, b []string) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range a {
|
||||||
|
if a[i] != b[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func storedMessagesToRoleText(stored []conversation.StoredMessage) []RoleText {
|
||||||
|
if len(stored) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
converted := make([]RoleText, len(stored))
|
||||||
|
for i, msg := range stored {
|
||||||
|
converted[i] = RoleText{Role: msg.Role, Text: msg.Content}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GeminiWebState) findConversationByMetadata(model string, metadata []string) ([]RoleText, bool) {
|
||||||
|
if len(metadata) == 0 {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
s.convMu.RLock()
|
||||||
|
defer s.convMu.RUnlock()
|
||||||
|
for _, rec := range s.convData {
|
||||||
|
if !strings.EqualFold(strings.TrimSpace(rec.Model), strings.TrimSpace(model)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !equalStringSlice(rec.Metadata, metadata) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return cloneRoleTextSlice(storedMessagesToRoleText(rec.Messages)), true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GeminiWebState) GetRequestMutex() *sync.Mutex { return &s.reqMu }
|
func (s *GeminiWebState) GetRequestMutex() *sync.Mutex { return &s.reqMu }
|
||||||
|
|
||||||
func (s *GeminiWebState) EnsureClient() error {
|
func (s *GeminiWebState) EnsureClient() error {
|
||||||
@@ -169,8 +281,12 @@ func (s *GeminiWebState) Refresh(ctx context.Context) error {
|
|||||||
s.client.Cookies["__Secure-1PSIDTS"] = newTS
|
s.client.Cookies["__Secure-1PSIDTS"] = newTS
|
||||||
}
|
}
|
||||||
s.tokenMu.Unlock()
|
s.tokenMu.Unlock()
|
||||||
// Detailed debug log: provider and account.
|
// Detailed debug log: provider and account label.
|
||||||
log.Debugf("gemini web account %s rotated 1PSIDTS: %s", s.accountID, MaskToken28(newTS))
|
label := strings.TrimSpace(s.Label())
|
||||||
|
if label == "" {
|
||||||
|
label = s.accountID
|
||||||
|
}
|
||||||
|
log.Debugf("gemini web account %s rotated 1PSIDTS: %s", label, MaskToken28(newTS))
|
||||||
}
|
}
|
||||||
s.lastRefresh = time.Now()
|
s.lastRefresh = time.Now()
|
||||||
return nil
|
return nil
|
||||||
@@ -210,7 +326,7 @@ func (s *GeminiWebState) prepare(ctx context.Context, modelName string, rawJSON
|
|||||||
return nil, &interfaces.ErrorMessage{StatusCode: 400, Error: fmt.Errorf("bad request: %w", err)}
|
return nil, &interfaces.ErrorMessage{StatusCode: 400, Error: fmt.Errorf("bad request: %w", err)}
|
||||||
}
|
}
|
||||||
cleaned := SanitizeAssistantMessages(messages)
|
cleaned := SanitizeAssistantMessages(messages)
|
||||||
res.cleaned = cleaned
|
fullCleaned := cloneRoleTextSlice(cleaned)
|
||||||
res.underlying = MapAliasToUnderlying(modelName)
|
res.underlying = MapAliasToUnderlying(modelName)
|
||||||
model, err := ModelFromName(res.underlying)
|
model, err := ModelFromName(res.underlying)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -223,15 +339,27 @@ func (s *GeminiWebState) prepare(ctx context.Context, modelName string, rawJSON
|
|||||||
mimesSubset := mimes
|
mimesSubset := mimes
|
||||||
|
|
||||||
if s.useReusableContext() {
|
if s.useReusableContext() {
|
||||||
reuseMeta, remaining := s.findReusableSession(res.underlying, cleaned)
|
reusePlan := s.reuseFromPending(res.underlying, cleaned)
|
||||||
if len(reuseMeta) > 0 {
|
if reusePlan == nil {
|
||||||
|
reusePlan = s.findReusableSession(res.underlying, cleaned)
|
||||||
|
}
|
||||||
|
if reusePlan != nil {
|
||||||
res.reuse = true
|
res.reuse = true
|
||||||
meta = reuseMeta
|
meta = cloneStringSlice(reusePlan.metadata)
|
||||||
if len(remaining) == 1 {
|
overlap := reusePlan.overlap
|
||||||
useMsgs = []RoleText{remaining[0]}
|
if overlap > len(cleaned) {
|
||||||
} else if len(remaining) > 1 {
|
overlap = len(cleaned)
|
||||||
useMsgs = remaining
|
} else if overlap < 0 {
|
||||||
} else if len(cleaned) > 0 {
|
overlap = 0
|
||||||
|
}
|
||||||
|
delta := cloneRoleTextSlice(cleaned[overlap:])
|
||||||
|
if len(reusePlan.history) > 0 {
|
||||||
|
fullCleaned = append(cloneRoleTextSlice(reusePlan.history), delta...)
|
||||||
|
} else {
|
||||||
|
fullCleaned = append(cloneRoleTextSlice(cleaned[:overlap]), delta...)
|
||||||
|
}
|
||||||
|
useMsgs = delta
|
||||||
|
if len(delta) == 0 && len(cleaned) > 0 {
|
||||||
useMsgs = []RoleText{cleaned[len(cleaned)-1]}
|
useMsgs = []RoleText{cleaned[len(cleaned)-1]}
|
||||||
}
|
}
|
||||||
if len(useMsgs) == 1 && len(messages) > 0 && len(msgFileIdx) == len(messages) {
|
if len(useMsgs) == 1 && len(messages) > 0 && len(msgFileIdx) == len(messages) {
|
||||||
@@ -289,6 +417,8 @@ func (s *GeminiWebState) prepare(ctx context.Context, modelName string, rawJSON
|
|||||||
s.convMu.RUnlock()
|
s.convMu.RUnlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
res.cleaned = fullCleaned
|
||||||
|
|
||||||
res.tagged = NeedRoleTags(useMsgs)
|
res.tagged = NeedRoleTags(useMsgs)
|
||||||
if res.reuse && len(useMsgs) == 1 {
|
if res.reuse && len(useMsgs) == 1 {
|
||||||
res.tagged = false
|
res.tagged = false
|
||||||
@@ -330,10 +460,10 @@ func (s *GeminiWebState) Send(ctx context.Context, modelName string, reqPayload
|
|||||||
return nil, s.wrapSendError(err), nil
|
return nil, s.wrapSendError(err), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hook: For gemini-2.5-flash-image-preview, if the API returns only images without any text,
|
// Hook: For gemini-2.5-flash-image-web, if the API returns only images without any text,
|
||||||
// inject a small textual summary so that conversation persistence has non-empty assistant text.
|
// inject a small textual summary so that conversation persistence has non-empty assistant text.
|
||||||
// This helps conversation recovery (conv store) to match sessions reliably.
|
// This helps conversation recovery (conv store) to match sessions reliably.
|
||||||
if strings.EqualFold(modelName, "gemini-2.5-flash-image-preview") {
|
if strings.EqualFold(modelName, "gemini-2.5-flash-image-web") {
|
||||||
if len(output.Candidates) > 0 {
|
if len(output.Candidates) > 0 {
|
||||||
c := output.Candidates[output.Chosen]
|
c := output.Candidates[output.Chosen]
|
||||||
hasNoText := strings.TrimSpace(c.Text) == ""
|
hasNoText := strings.TrimSpace(c.Text) == ""
|
||||||
@@ -412,8 +542,22 @@ func (s *GeminiWebState) persistConversation(modelName string, prep *geminiWebPr
|
|||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
stableHash := HashConversation(rec.ClientID, prep.underlying, rec.Messages)
|
label := strings.TrimSpace(s.Label())
|
||||||
accountHash := HashConversation(s.accountID, prep.underlying, rec.Messages)
|
if label == "" {
|
||||||
|
label = s.accountID
|
||||||
|
}
|
||||||
|
conversationMsgs := conversation.StoredToMessages(rec.Messages)
|
||||||
|
if err := conversation.StoreConversation(label, prep.underlying, conversationMsgs, metadata); err != nil {
|
||||||
|
log.Debugf("gemini web: failed to persist global conversation index: %v", err)
|
||||||
|
}
|
||||||
|
stableHash := conversation.HashConversationForAccount(rec.ClientID, prep.underlying, rec.Messages)
|
||||||
|
accountHash := conversation.HashConversationForAccount(s.accountID, prep.underlying, rec.Messages)
|
||||||
|
|
||||||
|
suffixSeen := make(map[string]struct{})
|
||||||
|
suffixSeen["hash:"+stableHash] = struct{}{}
|
||||||
|
if accountHash != stableHash {
|
||||||
|
suffixSeen["hash:"+accountHash] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
s.convMu.Lock()
|
s.convMu.Lock()
|
||||||
s.convData[stableHash] = rec
|
s.convData[stableHash] = rec
|
||||||
@@ -421,6 +565,33 @@ func (s *GeminiWebState) persistConversation(modelName string, prep *geminiWebPr
|
|||||||
if accountHash != stableHash {
|
if accountHash != stableHash {
|
||||||
s.convIndex["hash:"+accountHash] = stableHash
|
s.convIndex["hash:"+accountHash] = stableHash
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sanitizedHistory := conversation.SanitizeAssistantMessages(conversation.StoredToMessages(rec.Messages))
|
||||||
|
for start := 1; start < len(sanitizedHistory); start++ {
|
||||||
|
segment := sanitizedHistory[start:]
|
||||||
|
if len(segment) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tailRole := strings.ToLower(strings.TrimSpace(segment[len(segment)-1].Role))
|
||||||
|
if tailRole != "assistant" && tailRole != "system" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
storedSegment := conversation.ToStoredMessages(segment)
|
||||||
|
segmentStableHash := conversation.HashConversationForAccount(rec.ClientID, prep.underlying, storedSegment)
|
||||||
|
keyStable := "hash:" + segmentStableHash
|
||||||
|
if _, exists := suffixSeen[keyStable]; !exists {
|
||||||
|
s.convIndex[keyStable] = stableHash
|
||||||
|
suffixSeen[keyStable] = struct{}{}
|
||||||
|
}
|
||||||
|
segmentAccountHash := conversation.HashConversationForAccount(s.accountID, prep.underlying, storedSegment)
|
||||||
|
if segmentAccountHash != segmentStableHash {
|
||||||
|
keyAccount := "hash:" + segmentAccountHash
|
||||||
|
if _, exists := suffixSeen[keyAccount]; !exists {
|
||||||
|
s.convIndex[keyAccount] = stableHash
|
||||||
|
suffixSeen[keyAccount] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
dataSnapshot := make(map[string]ConversationRecord, len(s.convData))
|
dataSnapshot := make(map[string]ConversationRecord, len(s.convData))
|
||||||
for k, v := range s.convData {
|
for k, v := range s.convData {
|
||||||
dataSnapshot[k] = v
|
dataSnapshot[k] = v
|
||||||
@@ -484,16 +655,63 @@ func (s *GeminiWebState) useReusableContext() bool {
|
|||||||
return s.cfg.GeminiWeb.Context
|
return s.cfg.GeminiWeb.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GeminiWebState) findReusableSession(modelName string, msgs []RoleText) ([]string, []RoleText) {
|
func (s *GeminiWebState) reuseFromPending(modelName string, msgs []RoleText) *reuseComputation {
|
||||||
|
match := s.consumePendingMatch()
|
||||||
|
if match == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !strings.EqualFold(strings.TrimSpace(match.Model), strings.TrimSpace(modelName)) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
metadata := cloneStringSlice(match.Record.Metadata)
|
||||||
|
if len(metadata) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
history, ok := s.findConversationByMetadata(modelName, metadata)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
overlap := longestHistoryOverlap(history, msgs)
|
||||||
|
return &reuseComputation{metadata: metadata, history: history, overlap: overlap}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GeminiWebState) findReusableSession(modelName string, msgs []RoleText) *reuseComputation {
|
||||||
s.convMu.RLock()
|
s.convMu.RLock()
|
||||||
items := s.convData
|
items := s.convData
|
||||||
index := s.convIndex
|
index := s.convIndex
|
||||||
s.convMu.RUnlock()
|
s.convMu.RUnlock()
|
||||||
return FindReusableSessionIn(items, index, s.stableClientID, s.accountID, modelName, msgs)
|
rec, metadata, overlap, ok := FindReusableSessionIn(items, index, s.stableClientID, s.accountID, modelName, msgs)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
history := cloneRoleTextSlice(storedMessagesToRoleText(rec.Messages))
|
||||||
|
if len(history) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Ensure overlap reflects the actual history alignment.
|
||||||
|
if computed := longestHistoryOverlap(history, msgs); computed > 0 {
|
||||||
|
overlap = computed
|
||||||
|
}
|
||||||
|
return &reuseComputation{metadata: cloneStringSlice(metadata), history: history, overlap: overlap}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GeminiWebState) getConfiguredGem() *Gem {
|
func (s *GeminiWebState) getConfiguredGem() *Gem {
|
||||||
if s.cfg != nil && s.cfg.GeminiWeb.CodeMode {
|
if s.cfg == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// New behavior: attach Gem based on explicit GemMode selection.
|
||||||
|
// Only attaches the Gem; does not toggle any other behavior.
|
||||||
|
if gm := strings.ToLower(strings.TrimSpace(s.cfg.GeminiWeb.GemMode)); gm != "" {
|
||||||
|
switch gm {
|
||||||
|
case "coding-partner":
|
||||||
|
return &Gem{ID: "coding-partner", Name: "Coding partner", Predefined: true}
|
||||||
|
case "writing-editor":
|
||||||
|
return &Gem{ID: "writing-editor", Name: "Writing editor", Predefined: true}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Backwards compatibility: legacy CodeMode still attaches Coding partner
|
||||||
|
// and may enable extra behaviors elsewhere.
|
||||||
|
if s.cfg.GeminiWeb.CodeMode {
|
||||||
return &Gem{ID: "coding-partner", Name: "Coding partner", Predefined: true}
|
return &Gem{ID: "coding-partner", Name: "Coding partner", Predefined: true}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -531,42 +749,6 @@ func appendAPIResponseChunk(ctx context.Context, cfg *config.Config, chunk []byt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Persistence helpers --------------------------------------------------
|
|
||||||
|
|
||||||
// Sha256Hex computes the SHA256 hash of a string and returns its hex representation.
|
|
||||||
func Sha256Hex(s string) string {
|
|
||||||
sum := sha256.Sum256([]byte(s))
|
|
||||||
return hex.EncodeToString(sum[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToStoredMessages(msgs []RoleText) []StoredMessage {
|
|
||||||
out := make([]StoredMessage, 0, len(msgs))
|
|
||||||
for _, m := range msgs {
|
|
||||||
out = append(out, StoredMessage{
|
|
||||||
Role: m.Role,
|
|
||||||
Content: m.Text,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func HashMessage(m StoredMessage) string {
|
|
||||||
s := fmt.Sprintf(`{"content":%q,"role":%q}`, m.Content, strings.ToLower(m.Role))
|
|
||||||
return Sha256Hex(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func HashConversation(clientID, model string, msgs []StoredMessage) string {
|
|
||||||
var b strings.Builder
|
|
||||||
b.WriteString(clientID)
|
|
||||||
b.WriteString("|")
|
|
||||||
b.WriteString(model)
|
|
||||||
for _, m := range msgs {
|
|
||||||
b.WriteString("|")
|
|
||||||
b.WriteString(HashMessage(m))
|
|
||||||
}
|
|
||||||
return Sha256Hex(b.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvBoltPath returns the BoltDB file path used for both account metadata and conversation data.
|
// ConvBoltPath returns the BoltDB file path used for both account metadata and conversation data.
|
||||||
// Different logical datasets are kept in separate buckets within this single DB file.
|
// Different logical datasets are kept in separate buckets within this single DB file.
|
||||||
func ConvBoltPath(tokenFilePath string) string {
|
func ConvBoltPath(tokenFilePath string) string {
|
||||||
@@ -781,7 +963,7 @@ func BuildConversationRecord(model, clientID string, history []RoleText, output
|
|||||||
Model: model,
|
Model: model,
|
||||||
ClientID: clientID,
|
ClientID: clientID,
|
||||||
Metadata: metadata,
|
Metadata: metadata,
|
||||||
Messages: ToStoredMessages(final),
|
Messages: conversation.ToStoredMessages(final),
|
||||||
CreatedAt: time.Now(),
|
CreatedAt: time.Now(),
|
||||||
UpdatedAt: time.Now(),
|
UpdatedAt: time.Now(),
|
||||||
}
|
}
|
||||||
@@ -791,9 +973,9 @@ func BuildConversationRecord(model, clientID string, history []RoleText, output
|
|||||||
// FindByMessageListIn looks up a conversation record by hashed message list.
|
// FindByMessageListIn looks up a conversation record by hashed message list.
|
||||||
// It attempts both the stable client ID and a legacy email-based ID.
|
// It attempts both the stable client ID and a legacy email-based ID.
|
||||||
func FindByMessageListIn(items map[string]ConversationRecord, index map[string]string, stableClientID, email, model string, msgs []RoleText) (ConversationRecord, bool) {
|
func FindByMessageListIn(items map[string]ConversationRecord, index map[string]string, stableClientID, email, model string, msgs []RoleText) (ConversationRecord, bool) {
|
||||||
stored := ToStoredMessages(msgs)
|
stored := conversation.ToStoredMessages(msgs)
|
||||||
stableHash := HashConversation(stableClientID, model, stored)
|
stableHash := conversation.HashConversationForAccount(stableClientID, model, stored)
|
||||||
fallbackHash := HashConversation(email, model, stored)
|
fallbackHash := conversation.HashConversationForAccount(email, model, stored)
|
||||||
|
|
||||||
// Try stable hash via index indirection first
|
// Try stable hash via index indirection first
|
||||||
if key, ok := index["hash:"+stableHash]; ok {
|
if key, ok := index["hash:"+stableHash]; ok {
|
||||||
@@ -831,9 +1013,9 @@ func FindConversationIn(items map[string]ConversationRecord, index map[string]st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindReusableSessionIn returns reusable metadata and the remaining message suffix.
|
// FindReusableSessionIn returns reusable metadata and the remaining message suffix.
|
||||||
func FindReusableSessionIn(items map[string]ConversationRecord, index map[string]string, stableClientID, email, model string, msgs []RoleText) ([]string, []RoleText) {
|
func FindReusableSessionIn(items map[string]ConversationRecord, index map[string]string, stableClientID, email, model string, msgs []RoleText) (ConversationRecord, []string, int, bool) {
|
||||||
if len(msgs) < 2 {
|
if len(msgs) < 2 {
|
||||||
return nil, nil
|
return ConversationRecord{}, nil, 0, false
|
||||||
}
|
}
|
||||||
searchEnd := len(msgs)
|
searchEnd := len(msgs)
|
||||||
for searchEnd >= 2 {
|
for searchEnd >= 2 {
|
||||||
@@ -841,11 +1023,17 @@ func FindReusableSessionIn(items map[string]ConversationRecord, index map[string
|
|||||||
tail := sub[len(sub)-1]
|
tail := sub[len(sub)-1]
|
||||||
if strings.EqualFold(tail.Role, "assistant") || strings.EqualFold(tail.Role, "system") {
|
if strings.EqualFold(tail.Role, "assistant") || strings.EqualFold(tail.Role, "system") {
|
||||||
if rec, ok := FindConversationIn(items, index, stableClientID, email, model, sub); ok {
|
if rec, ok := FindConversationIn(items, index, stableClientID, email, model, sub); ok {
|
||||||
remain := msgs[searchEnd:]
|
return rec, rec.Metadata, searchEnd, true
|
||||||
return rec.Metadata, remain
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
searchEnd--
|
searchEnd--
|
||||||
}
|
}
|
||||||
return nil, nil
|
return ConversationRecord{}, nil, 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConfig updates the configuration reference used by the state.
|
||||||
|
// This allows hot-reload of configuration to take effect for existing
|
||||||
|
// runtime states that were cached on auth during previous requests.
|
||||||
|
func (s *GeminiWebState) SetConfig(cfg *config.Config) {
|
||||||
|
s.cfg = cfg
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,14 @@ import "time"
|
|||||||
// GetClaudeModels returns the standard Claude model definitions
|
// GetClaudeModels returns the standard Claude model definitions
|
||||||
func GetClaudeModels() []*ModelInfo {
|
func GetClaudeModels() []*ModelInfo {
|
||||||
return []*ModelInfo{
|
return []*ModelInfo{
|
||||||
|
{
|
||||||
|
ID: "claude-sonnet-4-5-20250929",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1759104000, // 2025-09-29
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 4.5 Sonnet",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
ID: "claude-opus-4-1-20250805",
|
ID: "claude-opus-4-1-20250805",
|
||||||
Object: "model",
|
Object: "model",
|
||||||
@@ -144,6 +152,20 @@ func GetGeminiCLIModels() []*ModelInfo {
|
|||||||
OutputTokenLimit: 65536,
|
OutputTokenLimit: 65536,
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash-image-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: time.Now().Unix(),
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash-image-preview",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Flash Image Preview",
|
||||||
|
Description: "State-of-the-art image generation and editing model.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 8192,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -314,3 +336,45 @@ func GetQwenModels() []*ModelInfo {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetIFlowModels returns supported models for iFlow OAuth accounts.
|
||||||
|
|
||||||
|
func GetIFlowModels() []*ModelInfo {
|
||||||
|
created := time.Now().Unix()
|
||||||
|
entries := []struct {
|
||||||
|
ID string
|
||||||
|
DisplayName string
|
||||||
|
Description string
|
||||||
|
}{
|
||||||
|
{ID: "tstars2.0", DisplayName: "TStars-2.0", Description: "iFlow TStars-2.0 multimodal assistant"},
|
||||||
|
{ID: "qwen3-coder-plus", DisplayName: "Qwen3-Coder-Plus", Description: "Qwen3 Coder Plus code generation"},
|
||||||
|
{ID: "qwen3-coder", DisplayName: "Qwen3-Coder-480B-A35B", Description: "Qwen3 Coder 480B A35B"},
|
||||||
|
{ID: "qwen3-max", DisplayName: "Qwen3-Max", Description: "Qwen3 flagship model"},
|
||||||
|
{ID: "qwen3-vl-plus", DisplayName: "Qwen3-VL-Plus", Description: "Qwen3 multimodal vision-language"},
|
||||||
|
{ID: "qwen3-max-preview", DisplayName: "Qwen3-Max-Preview", Description: "Qwen3 Max preview build"},
|
||||||
|
{ID: "kimi-k2-0905", DisplayName: "Kimi-K2-Instruct-0905", Description: "Moonshot Kimi K2 instruct 0905"},
|
||||||
|
{ID: "glm-4.5", DisplayName: "GLM-4.5", Description: "Zhipu GLM 4.5 general model"},
|
||||||
|
{ID: "kimi-k2", DisplayName: "Kimi-K2", Description: "Moonshot Kimi K2 general model"},
|
||||||
|
{ID: "deepseek-v3.2", DisplayName: "DeepSeek-V3.2-Exp", Description: "DeepSeek V3.2 experimental"},
|
||||||
|
{ID: "deepseek-v3.1", DisplayName: "DeepSeek-V3.1-Terminus", Description: "DeepSeek V3.1 Terminus"},
|
||||||
|
{ID: "deepseek-r1", DisplayName: "DeepSeek-R1", Description: "DeepSeek reasoning model R1"},
|
||||||
|
{ID: "deepseek-v3", DisplayName: "DeepSeek-V3-671B", Description: "DeepSeek V3 671B"},
|
||||||
|
{ID: "qwen3-32b", DisplayName: "Qwen3-32B", Description: "Qwen3 32B"},
|
||||||
|
{ID: "qwen3-235b-a22b-thinking-2507", DisplayName: "Qwen3-235B-A22B-Thinking", Description: "Qwen3 235B A22B Thinking (2507)"},
|
||||||
|
{ID: "qwen3-235b-a22b-instruct", DisplayName: "Qwen3-235B-A22B-Instruct", Description: "Qwen3 235B A22B Instruct"},
|
||||||
|
{ID: "qwen3-235b", DisplayName: "Qwen3-235B-A22B", Description: "Qwen3 235B A22B"},
|
||||||
|
}
|
||||||
|
models := make([]*ModelInfo, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
models = append(models, &ModelInfo{
|
||||||
|
ID: entry.ID,
|
||||||
|
Object: "model",
|
||||||
|
Created: created,
|
||||||
|
OwnedBy: "iflow",
|
||||||
|
Type: "iflow",
|
||||||
|
DisplayName: entry.DisplayName,
|
||||||
|
Description: entry.Description,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return models
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
misc "github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -100,20 +101,194 @@ func (r *ModelRegistry) RegisterClient(clientID, clientProvider string, models [
|
|||||||
r.mutex.Lock()
|
r.mutex.Lock()
|
||||||
defer r.mutex.Unlock()
|
defer r.mutex.Unlock()
|
||||||
|
|
||||||
// Remove any existing registration for this client
|
|
||||||
r.unregisterClientInternal(clientID)
|
|
||||||
|
|
||||||
provider := strings.ToLower(clientProvider)
|
provider := strings.ToLower(clientProvider)
|
||||||
modelIDs := make([]string, 0, len(models))
|
uniqueModelIDs := make([]string, 0, len(models))
|
||||||
|
rawModelIDs := make([]string, 0, len(models))
|
||||||
|
newModels := make(map[string]*ModelInfo, len(models))
|
||||||
|
newCounts := make(map[string]int, len(models))
|
||||||
|
for _, model := range models {
|
||||||
|
if model == nil || model.ID == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rawModelIDs = append(rawModelIDs, model.ID)
|
||||||
|
newCounts[model.ID]++
|
||||||
|
if _, exists := newModels[model.ID]; exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newModels[model.ID] = model
|
||||||
|
uniqueModelIDs = append(uniqueModelIDs, model.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(uniqueModelIDs) == 0 {
|
||||||
|
// No models supplied; unregister existing client state if present.
|
||||||
|
r.unregisterClientInternal(clientID)
|
||||||
|
delete(r.clientModels, clientID)
|
||||||
|
delete(r.clientProviders, clientID)
|
||||||
|
misc.LogCredentialSeparator()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
for _, model := range models {
|
oldModels, hadExisting := r.clientModels[clientID]
|
||||||
modelIDs = append(modelIDs, model.ID)
|
oldProvider, _ := r.clientProviders[clientID]
|
||||||
|
providerChanged := oldProvider != provider
|
||||||
|
if !hadExisting {
|
||||||
|
// Pure addition path.
|
||||||
|
for _, modelID := range rawModelIDs {
|
||||||
|
model := newModels[modelID]
|
||||||
|
r.addModelRegistration(modelID, provider, model, now)
|
||||||
|
}
|
||||||
|
r.clientModels[clientID] = append([]string(nil), rawModelIDs...)
|
||||||
|
if provider != "" {
|
||||||
|
r.clientProviders[clientID] = provider
|
||||||
|
} else {
|
||||||
|
delete(r.clientProviders, clientID)
|
||||||
|
}
|
||||||
|
log.Debugf("Registered client %s from provider %s with %d models", clientID, clientProvider, len(rawModelIDs))
|
||||||
|
misc.LogCredentialSeparator()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if existing, exists := r.models[model.ID]; exists {
|
oldCounts := make(map[string]int, len(oldModels))
|
||||||
// Model already exists, increment count
|
for _, id := range oldModels {
|
||||||
|
oldCounts[id]++
|
||||||
|
}
|
||||||
|
|
||||||
|
added := make([]string, 0)
|
||||||
|
for _, id := range uniqueModelIDs {
|
||||||
|
if oldCounts[id] == 0 {
|
||||||
|
added = append(added, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
removed := make([]string, 0)
|
||||||
|
for id := range oldCounts {
|
||||||
|
if newCounts[id] == 0 {
|
||||||
|
removed = append(removed, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle provider change for overlapping models before modifications.
|
||||||
|
if providerChanged && oldProvider != "" {
|
||||||
|
for id, newCount := range newCounts {
|
||||||
|
if newCount == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
oldCount := oldCounts[id]
|
||||||
|
if oldCount == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
toRemove := newCount
|
||||||
|
if oldCount < toRemove {
|
||||||
|
toRemove = oldCount
|
||||||
|
}
|
||||||
|
if reg, ok := r.models[id]; ok && reg.Providers != nil {
|
||||||
|
if count, okProv := reg.Providers[oldProvider]; okProv {
|
||||||
|
if count <= toRemove {
|
||||||
|
delete(reg.Providers, oldProvider)
|
||||||
|
} else {
|
||||||
|
reg.Providers[oldProvider] = count - toRemove
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply removals first to keep counters accurate.
|
||||||
|
for _, id := range removed {
|
||||||
|
oldCount := oldCounts[id]
|
||||||
|
for i := 0; i < oldCount; i++ {
|
||||||
|
r.removeModelRegistration(clientID, id, oldProvider, now)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for id, oldCount := range oldCounts {
|
||||||
|
newCount := newCounts[id]
|
||||||
|
if newCount == 0 || oldCount <= newCount {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
overage := oldCount - newCount
|
||||||
|
for i := 0; i < overage; i++ {
|
||||||
|
r.removeModelRegistration(clientID, id, oldProvider, now)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply additions.
|
||||||
|
for id, newCount := range newCounts {
|
||||||
|
oldCount := oldCounts[id]
|
||||||
|
if newCount <= oldCount {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
model := newModels[id]
|
||||||
|
diff := newCount - oldCount
|
||||||
|
for i := 0; i < diff; i++ {
|
||||||
|
r.addModelRegistration(id, provider, model, now)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update metadata for models that remain associated with the client.
|
||||||
|
addedSet := make(map[string]struct{}, len(added))
|
||||||
|
for _, id := range added {
|
||||||
|
addedSet[id] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, id := range uniqueModelIDs {
|
||||||
|
model := newModels[id]
|
||||||
|
if reg, ok := r.models[id]; ok {
|
||||||
|
reg.Info = cloneModelInfo(model)
|
||||||
|
reg.LastUpdated = now
|
||||||
|
if reg.QuotaExceededClients != nil {
|
||||||
|
delete(reg.QuotaExceededClients, clientID)
|
||||||
|
}
|
||||||
|
if reg.SuspendedClients != nil {
|
||||||
|
delete(reg.SuspendedClients, clientID)
|
||||||
|
}
|
||||||
|
if providerChanged && provider != "" {
|
||||||
|
if _, newlyAdded := addedSet[id]; newlyAdded {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
overlapCount := newCounts[id]
|
||||||
|
if oldCount := oldCounts[id]; oldCount < overlapCount {
|
||||||
|
overlapCount = oldCount
|
||||||
|
}
|
||||||
|
if overlapCount <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if reg.Providers == nil {
|
||||||
|
reg.Providers = make(map[string]int)
|
||||||
|
}
|
||||||
|
reg.Providers[provider] += overlapCount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update client bookkeeping.
|
||||||
|
if len(rawModelIDs) > 0 {
|
||||||
|
r.clientModels[clientID] = append([]string(nil), rawModelIDs...)
|
||||||
|
}
|
||||||
|
if provider != "" {
|
||||||
|
r.clientProviders[clientID] = provider
|
||||||
|
} else {
|
||||||
|
delete(r.clientProviders, clientID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(added) == 0 && len(removed) == 0 && !providerChanged {
|
||||||
|
// Only metadata (e.g., display name) changed; skip separator when no log output.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Reconciled client %s (provider %s) models: +%d, -%d", clientID, provider, len(added), len(removed))
|
||||||
|
misc.LogCredentialSeparator()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ModelRegistry) addModelRegistration(modelID, provider string, model *ModelInfo, now time.Time) {
|
||||||
|
if model == nil || modelID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if existing, exists := r.models[modelID]; exists {
|
||||||
existing.Count++
|
existing.Count++
|
||||||
existing.LastUpdated = now
|
existing.LastUpdated = now
|
||||||
|
existing.Info = cloneModelInfo(model)
|
||||||
if existing.SuspendedClients == nil {
|
if existing.SuspendedClients == nil {
|
||||||
existing.SuspendedClients = make(map[string]string)
|
existing.SuspendedClients = make(map[string]string)
|
||||||
}
|
}
|
||||||
@@ -123,11 +298,12 @@ func (r *ModelRegistry) RegisterClient(clientID, clientProvider string, models [
|
|||||||
}
|
}
|
||||||
existing.Providers[provider]++
|
existing.Providers[provider]++
|
||||||
}
|
}
|
||||||
log.Debugf("Incremented count for model %s, now %d clients", model.ID, existing.Count)
|
log.Debugf("Incremented count for model %s, now %d clients", modelID, existing.Count)
|
||||||
} else {
|
return
|
||||||
// New model, create registration
|
}
|
||||||
|
|
||||||
registration := &ModelRegistration{
|
registration := &ModelRegistration{
|
||||||
Info: model,
|
Info: cloneModelInfo(model),
|
||||||
Count: 1,
|
Count: 1,
|
||||||
LastUpdated: now,
|
LastUpdated: now,
|
||||||
QuotaExceededClients: make(map[string]*time.Time),
|
QuotaExceededClients: make(map[string]*time.Time),
|
||||||
@@ -136,18 +312,54 @@ func (r *ModelRegistry) RegisterClient(clientID, clientProvider string, models [
|
|||||||
if provider != "" {
|
if provider != "" {
|
||||||
registration.Providers = map[string]int{provider: 1}
|
registration.Providers = map[string]int{provider: 1}
|
||||||
}
|
}
|
||||||
r.models[model.ID] = registration
|
r.models[modelID] = registration
|
||||||
log.Debugf("Registered new model %s from provider %s", model.ID, clientProvider)
|
log.Debugf("Registered new model %s from provider %s", modelID, provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ModelRegistry) removeModelRegistration(clientID, modelID, provider string, now time.Time) {
|
||||||
|
registration, exists := r.models[modelID]
|
||||||
|
if !exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
registration.Count--
|
||||||
|
registration.LastUpdated = now
|
||||||
|
if registration.QuotaExceededClients != nil {
|
||||||
|
delete(registration.QuotaExceededClients, clientID)
|
||||||
|
}
|
||||||
|
if registration.SuspendedClients != nil {
|
||||||
|
delete(registration.SuspendedClients, clientID)
|
||||||
|
}
|
||||||
|
if registration.Count < 0 {
|
||||||
|
registration.Count = 0
|
||||||
|
}
|
||||||
|
if provider != "" && registration.Providers != nil {
|
||||||
|
if count, ok := registration.Providers[provider]; ok {
|
||||||
|
if count <= 1 {
|
||||||
|
delete(registration.Providers, provider)
|
||||||
|
} else {
|
||||||
|
registration.Providers[provider] = count - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Debugf("Decremented count for model %s, now %d clients", modelID, registration.Count)
|
||||||
|
if registration.Count <= 0 {
|
||||||
|
delete(r.models, modelID)
|
||||||
|
log.Debugf("Removed model %s as no clients remain", modelID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r.clientModels[clientID] = modelIDs
|
func cloneModelInfo(model *ModelInfo) *ModelInfo {
|
||||||
if provider != "" {
|
if model == nil {
|
||||||
r.clientProviders[clientID] = provider
|
return nil
|
||||||
} else {
|
|
||||||
delete(r.clientProviders, clientID)
|
|
||||||
}
|
}
|
||||||
log.Debugf("Registered client %s from provider %s with %d models", clientID, clientProvider, len(models))
|
copy := *model
|
||||||
|
if len(model.SupportedGenerationMethods) > 0 {
|
||||||
|
copy.SupportedGenerationMethods = append([]string(nil), model.SupportedGenerationMethods...)
|
||||||
|
}
|
||||||
|
if len(model.SupportedParameters) > 0 {
|
||||||
|
copy.SupportedParameters = append([]string(nil), model.SupportedParameters...)
|
||||||
|
}
|
||||||
|
return ©
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnregisterClient removes a client and decrements counts for its models
|
// UnregisterClient removes a client and decrements counts for its models
|
||||||
@@ -207,6 +419,8 @@ func (r *ModelRegistry) unregisterClientInternal(clientID string) {
|
|||||||
delete(r.clientProviders, clientID)
|
delete(r.clientProviders, clientID)
|
||||||
}
|
}
|
||||||
log.Debugf("Unregistered client %s", clientID)
|
log.Debugf("Unregistered client %s", clientID)
|
||||||
|
// Separator line after completing client unregistration (after the summary line)
|
||||||
|
misc.LogCredentialSeparator()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModelQuotaExceeded marks a model as quota exceeded for a specific client
|
// SetModelQuotaExceeded marks a model as quota exceeded for a specific client
|
||||||
|
|||||||
@@ -61,10 +61,7 @@ func (e *ClaudeExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, r
|
|||||||
}
|
}
|
||||||
applyClaudeHeaders(httpReq, apiKey, false)
|
applyClaudeHeaders(httpReq, apiKey, false)
|
||||||
|
|
||||||
httpClient := &http.Client{}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cliproxyexecutor.Response{}, err
|
return cliproxyexecutor.Response{}, err
|
||||||
@@ -130,10 +127,7 @@ func (e *ClaudeExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
|||||||
}
|
}
|
||||||
applyClaudeHeaders(httpReq, apiKey, true)
|
applyClaudeHeaders(httpReq, apiKey, true)
|
||||||
|
|
||||||
httpClient := &http.Client{Timeout: 0}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -150,8 +144,8 @@ func (e *ClaudeExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
|||||||
defer close(out)
|
defer close(out)
|
||||||
defer func() { _ = resp.Body.Close() }()
|
defer func() { _ = resp.Body.Close() }()
|
||||||
scanner := bufio.NewScanner(resp.Body)
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
buf := make([]byte, 1024*1024)
|
buf := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buf, 1024*1024)
|
scanner.Buffer(buf, 20_971_520)
|
||||||
var param any
|
var param any
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
@@ -196,10 +190,7 @@ func (e *ClaudeExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Aut
|
|||||||
}
|
}
|
||||||
applyClaudeHeaders(httpReq, apiKey, false)
|
applyClaudeHeaders(httpReq, apiKey, false)
|
||||||
|
|
||||||
httpClient := &http.Client{}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cliproxyexecutor.Response{}, err
|
return cliproxyexecutor.Response{}, err
|
||||||
@@ -284,6 +275,7 @@ func hasZSTDEcoding(contentEncoding string) bool {
|
|||||||
func applyClaudeHeaders(r *http.Request, apiKey string, stream bool) {
|
func applyClaudeHeaders(r *http.Request, apiKey string, stream bool) {
|
||||||
r.Header.Set("Authorization", "Bearer "+apiKey)
|
r.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
r.Header.Set("Content-Type", "application/json")
|
r.Header.Set("Content-Type", "application/json")
|
||||||
|
r.Header.Set("Anthropic-Beta", "claude-code-20250219,oauth-2025-04-20,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14")
|
||||||
|
|
||||||
var ginHeaders http.Header
|
var ginHeaders http.Header
|
||||||
if ginCtx, ok := r.Context().Value("gin").(*gin.Context); ok && ginCtx != nil && ginCtx.Request != nil {
|
if ginCtx, ok := r.Context().Value("gin").(*gin.Context); ok && ginCtx != nil && ginCtx.Request != nil {
|
||||||
@@ -292,7 +284,6 @@ func applyClaudeHeaders(r *http.Request, apiKey string, stream bool) {
|
|||||||
|
|
||||||
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Version", "2023-06-01")
|
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Version", "2023-06-01")
|
||||||
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Dangerous-Direct-Browser-Access", "true")
|
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Dangerous-Direct-Browser-Access", "true")
|
||||||
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Beta", "claude-code-20250219,oauth-2025-04-20,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14")
|
|
||||||
misc.EnsureHeader(r.Header, ginHeaders, "X-App", "cli")
|
misc.EnsureHeader(r.Header, ginHeaders, "X-App", "cli")
|
||||||
misc.EnsureHeader(r.Header, ginHeaders, "X-Stainless-Helper-Method", "stream")
|
misc.EnsureHeader(r.Header, ginHeaders, "X-Stainless-Helper-Method", "stream")
|
||||||
misc.EnsureHeader(r.Header, ginHeaders, "X-Stainless-Retry-Count", "0")
|
misc.EnsureHeader(r.Header, ginHeaders, "X-Stainless-Retry-Count", "0")
|
||||||
|
|||||||
@@ -54,8 +54,6 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
|||||||
if util.InArray([]string{"gpt-5", "gpt-5-minimal", "gpt-5-low", "gpt-5-medium", "gpt-5-high"}, req.Model) {
|
if util.InArray([]string{"gpt-5", "gpt-5-minimal", "gpt-5-low", "gpt-5-medium", "gpt-5-high"}, req.Model) {
|
||||||
body, _ = sjson.SetBytes(body, "model", "gpt-5")
|
body, _ = sjson.SetBytes(body, "model", "gpt-5")
|
||||||
switch req.Model {
|
switch req.Model {
|
||||||
case "gpt-5":
|
|
||||||
body, _ = sjson.DeleteBytes(body, "reasoning.effort")
|
|
||||||
case "gpt-5-minimal":
|
case "gpt-5-minimal":
|
||||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "minimal")
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "minimal")
|
||||||
case "gpt-5-low":
|
case "gpt-5-low":
|
||||||
@@ -68,8 +66,6 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
|||||||
} else if util.InArray([]string{"gpt-5-codex", "gpt-5-codex-low", "gpt-5-codex-medium", "gpt-5-codex-high"}, req.Model) {
|
} else if util.InArray([]string{"gpt-5-codex", "gpt-5-codex-low", "gpt-5-codex-medium", "gpt-5-codex-high"}, req.Model) {
|
||||||
body, _ = sjson.SetBytes(body, "model", "gpt-5-codex")
|
body, _ = sjson.SetBytes(body, "model", "gpt-5-codex")
|
||||||
switch req.Model {
|
switch req.Model {
|
||||||
case "gpt-5-codex":
|
|
||||||
body, _ = sjson.DeleteBytes(body, "reasoning.effort")
|
|
||||||
case "gpt-5-codex-low":
|
case "gpt-5-codex-low":
|
||||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "low")
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "low")
|
||||||
case "gpt-5-codex-medium":
|
case "gpt-5-codex-medium":
|
||||||
@@ -80,6 +76,7 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
|||||||
}
|
}
|
||||||
|
|
||||||
body, _ = sjson.SetBytes(body, "stream", true)
|
body, _ = sjson.SetBytes(body, "stream", true)
|
||||||
|
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
||||||
|
|
||||||
url := strings.TrimSuffix(baseURL, "/") + "/responses"
|
url := strings.TrimSuffix(baseURL, "/") + "/responses"
|
||||||
recordAPIRequest(ctx, e.cfg, body)
|
recordAPIRequest(ctx, e.cfg, body)
|
||||||
@@ -89,10 +86,7 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
|
|||||||
}
|
}
|
||||||
applyCodexHeaders(httpReq, auth, apiKey)
|
applyCodexHeaders(httpReq, auth, apiKey)
|
||||||
|
|
||||||
httpClient := &http.Client{}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cliproxyexecutor.Response{}, err
|
return cliproxyexecutor.Response{}, err
|
||||||
@@ -147,8 +141,6 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
|||||||
if util.InArray([]string{"gpt-5", "gpt-5-minimal", "gpt-5-low", "gpt-5-medium", "gpt-5-high"}, req.Model) {
|
if util.InArray([]string{"gpt-5", "gpt-5-minimal", "gpt-5-low", "gpt-5-medium", "gpt-5-high"}, req.Model) {
|
||||||
body, _ = sjson.SetBytes(body, "model", "gpt-5")
|
body, _ = sjson.SetBytes(body, "model", "gpt-5")
|
||||||
switch req.Model {
|
switch req.Model {
|
||||||
case "gpt-5":
|
|
||||||
body, _ = sjson.DeleteBytes(body, "reasoning.effort")
|
|
||||||
case "gpt-5-minimal":
|
case "gpt-5-minimal":
|
||||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "minimal")
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "minimal")
|
||||||
case "gpt-5-low":
|
case "gpt-5-low":
|
||||||
@@ -161,8 +153,6 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
|||||||
} else if util.InArray([]string{"gpt-5-codex", "gpt-5-codex-low", "gpt-5-codex-medium", "gpt-5-codex-high"}, req.Model) {
|
} else if util.InArray([]string{"gpt-5-codex", "gpt-5-codex-low", "gpt-5-codex-medium", "gpt-5-codex-high"}, req.Model) {
|
||||||
body, _ = sjson.SetBytes(body, "model", "gpt-5-codex")
|
body, _ = sjson.SetBytes(body, "model", "gpt-5-codex")
|
||||||
switch req.Model {
|
switch req.Model {
|
||||||
case "gpt-5-codex":
|
|
||||||
body, _ = sjson.DeleteBytes(body, "reasoning.effort")
|
|
||||||
case "gpt-5-codex-low":
|
case "gpt-5-codex-low":
|
||||||
body, _ = sjson.SetBytes(body, "reasoning.effort", "low")
|
body, _ = sjson.SetBytes(body, "reasoning.effort", "low")
|
||||||
case "gpt-5-codex-medium":
|
case "gpt-5-codex-medium":
|
||||||
@@ -172,6 +162,8 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
body, _ = sjson.DeleteBytes(body, "previous_response_id")
|
||||||
|
|
||||||
url := strings.TrimSuffix(baseURL, "/") + "/responses"
|
url := strings.TrimSuffix(baseURL, "/") + "/responses"
|
||||||
recordAPIRequest(ctx, e.cfg, body)
|
recordAPIRequest(ctx, e.cfg, body)
|
||||||
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body))
|
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body))
|
||||||
@@ -180,10 +172,7 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
|||||||
}
|
}
|
||||||
applyCodexHeaders(httpReq, auth, apiKey)
|
applyCodexHeaders(httpReq, auth, apiKey)
|
||||||
|
|
||||||
httpClient := &http.Client{Timeout: 0}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -200,8 +189,8 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
|
|||||||
defer close(out)
|
defer close(out)
|
||||||
defer func() { _ = resp.Body.Close() }()
|
defer func() { _ = resp.Body.Close() }()
|
||||||
scanner := bufio.NewScanner(resp.Body)
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
buf := make([]byte, 1024*1024)
|
buf := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buf, 1024*1024)
|
scanner.Buffer(buf, 20_971_520)
|
||||||
var param any
|
var param any
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ func (e *GeminiCLIExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth
|
|||||||
models = append([]string{req.Model}, models...)
|
models = append([]string{req.Model}, models...)
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := newHTTPClient(ctx, 0)
|
httpClient := newHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
||||||
|
|
||||||
var lastStatus int
|
var lastStatus int
|
||||||
@@ -89,6 +89,7 @@ func (e *GeminiCLIExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth
|
|||||||
payload = setJSONField(payload, "project", projectID)
|
payload = setJSONField(payload, "project", projectID)
|
||||||
payload = setJSONField(payload, "model", attemptModel)
|
payload = setJSONField(payload, "model", attemptModel)
|
||||||
}
|
}
|
||||||
|
payload = disableGeminiThinkingConfig(payload, attemptModel)
|
||||||
|
|
||||||
tok, errTok := tokenSource.Token()
|
tok, errTok := tokenSource.Token()
|
||||||
if errTok != nil {
|
if errTok != nil {
|
||||||
@@ -155,7 +156,7 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
|||||||
models = append([]string{req.Model}, models...)
|
models = append([]string{req.Model}, models...)
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := newHTTPClient(ctx, 0)
|
httpClient := newHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
||||||
|
|
||||||
var lastStatus int
|
var lastStatus int
|
||||||
@@ -165,6 +166,7 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
|||||||
payload := append([]byte(nil), basePayload...)
|
payload := append([]byte(nil), basePayload...)
|
||||||
payload = setJSONField(payload, "project", projectID)
|
payload = setJSONField(payload, "project", projectID)
|
||||||
payload = setJSONField(payload, "model", attemptModel)
|
payload = setJSONField(payload, "model", attemptModel)
|
||||||
|
payload = disableGeminiThinkingConfig(payload, attemptModel)
|
||||||
|
|
||||||
tok, errTok := tokenSource.Token()
|
tok, errTok := tokenSource.Token()
|
||||||
if errTok != nil {
|
if errTok != nil {
|
||||||
@@ -212,8 +214,8 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
|||||||
defer func() { _ = resp.Body.Close() }()
|
defer func() { _ = resp.Body.Close() }()
|
||||||
if opts.Alt == "" {
|
if opts.Alt == "" {
|
||||||
scanner := bufio.NewScanner(resp.Body)
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
buf := make([]byte, 1024*1024)
|
buf := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buf, 1024*1024)
|
scanner.Buffer(buf, 20_971_520)
|
||||||
var param any
|
var param any
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
@@ -281,7 +283,7 @@ func (e *GeminiCLIExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.
|
|||||||
models = append([]string{req.Model}, models...)
|
models = append([]string{req.Model}, models...)
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := newHTTPClient(ctx, 0)
|
httpClient := newHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
||||||
|
|
||||||
var lastStatus int
|
var lastStatus int
|
||||||
@@ -291,6 +293,7 @@ func (e *GeminiCLIExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.
|
|||||||
payload := sdktranslator.TranslateRequest(from, to, attemptModel, bytes.Clone(req.Payload), false)
|
payload := sdktranslator.TranslateRequest(from, to, attemptModel, bytes.Clone(req.Payload), false)
|
||||||
payload = deleteJSONField(payload, "project")
|
payload = deleteJSONField(payload, "project")
|
||||||
payload = deleteJSONField(payload, "model")
|
payload = deleteJSONField(payload, "model")
|
||||||
|
payload = disableGeminiThinkingConfig(payload, attemptModel)
|
||||||
|
|
||||||
tok, errTok := tokenSource.Token()
|
tok, errTok := tokenSource.Token()
|
||||||
if errTok != nil {
|
if errTok != nil {
|
||||||
@@ -438,15 +441,8 @@ func updateGeminiCLITokenMetadata(auth *cliproxyauth.Auth, base map[string]any,
|
|||||||
auth.Metadata["token"] = merged
|
auth.Metadata["token"] = merged
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHTTPClient(ctx context.Context, timeout time.Duration) *http.Client {
|
func newHTTPClient(ctx context.Context, cfg *config.Config, auth *cliproxyauth.Auth, timeout time.Duration) *http.Client {
|
||||||
client := &http.Client{}
|
return newProxyAwareHTTPClient(ctx, cfg, auth, timeout)
|
||||||
if timeout > 0 {
|
|
||||||
client.Timeout = timeout
|
|
||||||
}
|
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
client.Transport = rt
|
|
||||||
}
|
|
||||||
return client
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func cloneMap(in map[string]any) map[string]any {
|
func cloneMap(in map[string]any) map[string]any {
|
||||||
@@ -507,6 +503,29 @@ func cliPreviewFallbackOrder(model string) []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func disableGeminiThinkingConfig(body []byte, model string) []byte {
|
||||||
|
if !geminiModelDisallowsThinking(model) {
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
updated := deleteJSONField(body, "request.generationConfig.thinkingConfig")
|
||||||
|
updated = deleteJSONField(updated, "generationConfig.thinkingConfig")
|
||||||
|
return updated
|
||||||
|
}
|
||||||
|
|
||||||
|
func geminiModelDisallowsThinking(model string) bool {
|
||||||
|
if model == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
lower := strings.ToLower(model)
|
||||||
|
for _, marker := range []string{"gemini-2.5-flash-image-preview"} {
|
||||||
|
if strings.Contains(lower, marker) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// setJSONField sets a top-level JSON field on a byte slice payload via sjson.
|
// setJSONField sets a top-level JSON field on a byte slice payload via sjson.
|
||||||
func setJSONField(body []byte, key, value string) []byte {
|
func setJSONField(body []byte, key, value string) []byte {
|
||||||
if key == "" {
|
if key == "" {
|
||||||
|
|||||||
@@ -77,6 +77,7 @@ func (e *GeminiExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, r
|
|||||||
from := opts.SourceFormat
|
from := opts.SourceFormat
|
||||||
to := sdktranslator.FromString("gemini")
|
to := sdktranslator.FromString("gemini")
|
||||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||||
|
body = disableGeminiThinkingConfig(body, req.Model)
|
||||||
|
|
||||||
action := "generateContent"
|
action := "generateContent"
|
||||||
if req.Metadata != nil {
|
if req.Metadata != nil {
|
||||||
@@ -103,10 +104,7 @@ func (e *GeminiExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, r
|
|||||||
httpReq.Header.Set("Authorization", "Bearer "+bearer)
|
httpReq.Header.Set("Authorization", "Bearer "+bearer)
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := &http.Client{}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cliproxyexecutor.Response{}, err
|
return cliproxyexecutor.Response{}, err
|
||||||
@@ -137,6 +135,7 @@ func (e *GeminiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
|||||||
from := opts.SourceFormat
|
from := opts.SourceFormat
|
||||||
to := sdktranslator.FromString("gemini")
|
to := sdktranslator.FromString("gemini")
|
||||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
||||||
|
body = disableGeminiThinkingConfig(body, req.Model)
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/%s/models/%s:%s", glEndpoint, glAPIVersion, req.Model, "streamGenerateContent")
|
url := fmt.Sprintf("%s/%s/models/%s:%s", glEndpoint, glAPIVersion, req.Model, "streamGenerateContent")
|
||||||
if opts.Alt == "" {
|
if opts.Alt == "" {
|
||||||
@@ -159,10 +158,7 @@ func (e *GeminiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
|||||||
httpReq.Header.Set("Authorization", "Bearer "+bearer)
|
httpReq.Header.Set("Authorization", "Bearer "+bearer)
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := &http.Client{Timeout: 0}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -179,8 +175,8 @@ func (e *GeminiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
|
|||||||
defer close(out)
|
defer close(out)
|
||||||
defer func() { _ = resp.Body.Close() }()
|
defer func() { _ = resp.Body.Close() }()
|
||||||
scanner := bufio.NewScanner(resp.Body)
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
buf := make([]byte, 1024*1024)
|
buf := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buf, 1024*1024)
|
scanner.Buffer(buf, 20_971_520)
|
||||||
var param any
|
var param any
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
@@ -210,6 +206,7 @@ func (e *GeminiExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Aut
|
|||||||
from := opts.SourceFormat
|
from := opts.SourceFormat
|
||||||
to := sdktranslator.FromString("gemini")
|
to := sdktranslator.FromString("gemini")
|
||||||
translatedReq := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
translatedReq := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||||
|
translatedReq = disableGeminiThinkingConfig(translatedReq, req.Model)
|
||||||
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
respCtx := context.WithValue(ctx, "alt", opts.Alt)
|
||||||
translatedReq, _ = sjson.DeleteBytes(translatedReq, "tools")
|
translatedReq, _ = sjson.DeleteBytes(translatedReq, "tools")
|
||||||
translatedReq, _ = sjson.DeleteBytes(translatedReq, "generationConfig")
|
translatedReq, _ = sjson.DeleteBytes(translatedReq, "generationConfig")
|
||||||
@@ -230,10 +227,7 @@ func (e *GeminiExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Aut
|
|||||||
httpReq.Header.Set("Authorization", "Bearer "+bearer)
|
httpReq.Header.Set("Authorization", "Bearer "+bearer)
|
||||||
}
|
}
|
||||||
|
|
||||||
httpClient := &http.Client{}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cliproxyexecutor.Response{}, err
|
return cliproxyexecutor.Response{}, err
|
||||||
@@ -320,7 +314,7 @@ func (e *GeminiExecutor) Refresh(ctx context.Context, auth *cliproxyauth.Auth) (
|
|||||||
conf := &oauth2.Config{ClientID: clientID, ClientSecret: clientSecret, Endpoint: endpoint}
|
conf := &oauth2.Config{ClientID: clientID, ClientSecret: clientSecret, Endpoint: endpoint}
|
||||||
|
|
||||||
// Ensure proxy-aware HTTP client for token refresh
|
// Ensure proxy-aware HTTP client for token refresh
|
||||||
httpClient := util.SetProxy(e.cfg, &http.Client{})
|
httpClient := util.SetProxy(&e.cfg.SDKConfig, &http.Client{})
|
||||||
ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
|
ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
|
||||||
|
|
||||||
// Build base token
|
// Build base token
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||||
geminiwebapi "github.com/router-for-me/CLIProxyAPI/v6/internal/provider/gemini-web"
|
geminiwebapi "github.com/router-for-me/CLIProxyAPI/v6/internal/provider/gemini-web"
|
||||||
|
conversation "github.com/router-for-me/CLIProxyAPI/v6/internal/provider/gemini-web/conversation"
|
||||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||||
@@ -40,12 +41,18 @@ func (e *GeminiWebExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth
|
|||||||
if err = state.EnsureClient(); err != nil {
|
if err = state.EnsureClient(); err != nil {
|
||||||
return cliproxyexecutor.Response{}, err
|
return cliproxyexecutor.Response{}, err
|
||||||
}
|
}
|
||||||
|
match := extractGeminiWebMatch(opts.Metadata)
|
||||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||||
|
|
||||||
mutex := state.GetRequestMutex()
|
mutex := state.GetRequestMutex()
|
||||||
if mutex != nil {
|
if mutex != nil {
|
||||||
mutex.Lock()
|
mutex.Lock()
|
||||||
defer mutex.Unlock()
|
defer mutex.Unlock()
|
||||||
|
if match != nil {
|
||||||
|
state.SetPendingMatch(match)
|
||||||
|
}
|
||||||
|
} else if match != nil {
|
||||||
|
state.SetPendingMatch(match)
|
||||||
}
|
}
|
||||||
|
|
||||||
payload := bytes.Clone(req.Payload)
|
payload := bytes.Clone(req.Payload)
|
||||||
@@ -72,11 +79,18 @@ func (e *GeminiWebExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
|
|||||||
if err = state.EnsureClient(); err != nil {
|
if err = state.EnsureClient(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
match := extractGeminiWebMatch(opts.Metadata)
|
||||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||||
|
|
||||||
mutex := state.GetRequestMutex()
|
mutex := state.GetRequestMutex()
|
||||||
if mutex != nil {
|
if mutex != nil {
|
||||||
mutex.Lock()
|
mutex.Lock()
|
||||||
|
if match != nil {
|
||||||
|
state.SetPendingMatch(match)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mutex == nil && match != nil {
|
||||||
|
state.SetPendingMatch(match)
|
||||||
}
|
}
|
||||||
|
|
||||||
gemBytes, errMsg, prep := state.Send(ctx, req.Model, bytes.Clone(req.Payload), opts)
|
gemBytes, errMsg, prep := state.Send(ctx, req.Model, bytes.Clone(req.Payload), opts)
|
||||||
@@ -154,6 +168,8 @@ func (e *GeminiWebExecutor) stateFor(auth *cliproxyauth.Auth) (*geminiwebapi.Gem
|
|||||||
return nil, fmt.Errorf("gemini-web executor: auth is nil")
|
return nil, fmt.Errorf("gemini-web executor: auth is nil")
|
||||||
}
|
}
|
||||||
if runtime, ok := auth.Runtime.(*geminiWebRuntime); ok && runtime != nil && runtime.state != nil {
|
if runtime, ok := auth.Runtime.(*geminiWebRuntime); ok && runtime != nil && runtime.state != nil {
|
||||||
|
// Hot-reload: ensure cached state sees the latest config
|
||||||
|
runtime.state.SetConfig(e.cfg)
|
||||||
return runtime.state, nil
|
return runtime.state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,6 +177,8 @@ func (e *GeminiWebExecutor) stateFor(auth *cliproxyauth.Auth) (*geminiwebapi.Gem
|
|||||||
defer e.mu.Unlock()
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
if runtime, ok := auth.Runtime.(*geminiWebRuntime); ok && runtime != nil && runtime.state != nil {
|
if runtime, ok := auth.Runtime.(*geminiWebRuntime); ok && runtime != nil && runtime.state != nil {
|
||||||
|
// Hot-reload: ensure cached state sees the latest config
|
||||||
|
runtime.state.SetConfig(e.cfg)
|
||||||
return runtime.state, nil
|
return runtime.state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -182,7 +200,7 @@ func (e *GeminiWebExecutor) stateFor(auth *cliproxyauth.Auth) (*geminiwebapi.Gem
|
|||||||
storagePath = p
|
storagePath = p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
state := geminiwebapi.NewGeminiWebState(cfg, ts, storagePath)
|
state := geminiwebapi.NewGeminiWebState(cfg, ts, storagePath, auth.Label)
|
||||||
runtime := &geminiWebRuntime{state: state}
|
runtime := &geminiWebRuntime{state: state}
|
||||||
auth.Runtime = runtime
|
auth.Runtime = runtime
|
||||||
return state, nil
|
return state, nil
|
||||||
@@ -200,7 +218,8 @@ func parseGeminiWebToken(auth *cliproxyauth.Auth) (*gemini.GeminiWebTokenStorage
|
|||||||
if psid == "" || psidts == "" {
|
if psid == "" || psidts == "" {
|
||||||
return nil, fmt.Errorf("gemini-web executor: incomplete cookie metadata")
|
return nil, fmt.Errorf("gemini-web executor: incomplete cookie metadata")
|
||||||
}
|
}
|
||||||
return &gemini.GeminiWebTokenStorage{Secure1PSID: psid, Secure1PSIDTS: psidts}, nil
|
label := strings.TrimSpace(stringFromMetadata(auth.Metadata, "label"))
|
||||||
|
return &gemini.GeminiWebTokenStorage{Secure1PSID: psid, Secure1PSIDTS: psidts, Label: label}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func stringFromMetadata(meta map[string]any, keys ...string) string {
|
func stringFromMetadata(meta map[string]any, keys ...string) string {
|
||||||
@@ -241,3 +260,21 @@ func (e geminiWebError) StatusCode() int {
|
|||||||
}
|
}
|
||||||
return e.message.StatusCode
|
return e.message.StatusCode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func extractGeminiWebMatch(metadata map[string]any) *conversation.MatchResult {
|
||||||
|
if metadata == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
value, ok := metadata[conversation.MetadataMatchKey]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch v := value.(type) {
|
||||||
|
case *conversation.MatchResult:
|
||||||
|
return v
|
||||||
|
case conversation.MatchResult:
|
||||||
|
return &v
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
261
internal/runtime/executor/iflow_executor.go
Normal file
261
internal/runtime/executor/iflow_executor.go
Normal file
@@ -0,0 +1,261 @@
|
|||||||
|
package executor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
iflowauth "github.com/router-for-me/CLIProxyAPI/v6/internal/auth/iflow"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
|
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||||
|
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/tidwall/gjson"
|
||||||
|
"github.com/tidwall/sjson"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
iflowDefaultEndpoint = "/chat/completions"
|
||||||
|
iflowUserAgent = "iFlow-Cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IFlowExecutor executes OpenAI-compatible chat completions against the iFlow API using API keys derived from OAuth.
|
||||||
|
type IFlowExecutor struct {
|
||||||
|
cfg *config.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIFlowExecutor constructs a new executor instance.
|
||||||
|
func NewIFlowExecutor(cfg *config.Config) *IFlowExecutor { return &IFlowExecutor{cfg: cfg} }
|
||||||
|
|
||||||
|
// Identifier returns the provider key.
|
||||||
|
func (e *IFlowExecutor) Identifier() string { return "iflow" }
|
||||||
|
|
||||||
|
// PrepareRequest implements ProviderExecutor but requires no preprocessing.
|
||||||
|
func (e *IFlowExecutor) PrepareRequest(_ *http.Request, _ *cliproxyauth.Auth) error { return nil }
|
||||||
|
|
||||||
|
// Execute performs a non-streaming chat completion request.
|
||||||
|
func (e *IFlowExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||||
|
apiKey, baseURL := iflowCreds(auth)
|
||||||
|
if strings.TrimSpace(apiKey) == "" {
|
||||||
|
return cliproxyexecutor.Response{}, fmt.Errorf("iflow executor: missing api key")
|
||||||
|
}
|
||||||
|
if baseURL == "" {
|
||||||
|
baseURL = iflowauth.DefaultAPIBaseURL
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||||
|
|
||||||
|
from := opts.SourceFormat
|
||||||
|
to := sdktranslator.FromString("openai")
|
||||||
|
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||||
|
|
||||||
|
endpoint := strings.TrimSuffix(baseURL, "/") + iflowDefaultEndpoint
|
||||||
|
recordAPIRequest(ctx, e.cfg, body)
|
||||||
|
|
||||||
|
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
|
||||||
|
if err != nil {
|
||||||
|
return cliproxyexecutor.Response{}, err
|
||||||
|
}
|
||||||
|
applyIFlowHeaders(httpReq, apiKey, false)
|
||||||
|
|
||||||
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
|
resp, err := httpClient.Do(httpReq)
|
||||||
|
if err != nil {
|
||||||
|
return cliproxyexecutor.Response{}, err
|
||||||
|
}
|
||||||
|
defer func() { _ = resp.Body.Close() }()
|
||||||
|
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||||
|
b, _ := io.ReadAll(resp.Body)
|
||||||
|
appendAPIResponseChunk(ctx, e.cfg, b)
|
||||||
|
log.Debugf("iflow request error: status %d body %s", resp.StatusCode, string(b))
|
||||||
|
return cliproxyexecutor.Response{}, statusErr{code: resp.StatusCode, msg: string(b)}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return cliproxyexecutor.Response{}, err
|
||||||
|
}
|
||||||
|
appendAPIResponseChunk(ctx, e.cfg, data)
|
||||||
|
reporter.publish(ctx, parseOpenAIUsage(data))
|
||||||
|
|
||||||
|
var param any
|
||||||
|
out := sdktranslator.TranslateNonStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), body, data, ¶m)
|
||||||
|
return cliproxyexecutor.Response{Payload: []byte(out)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecuteStream performs a streaming chat completion request.
|
||||||
|
func (e *IFlowExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error) {
|
||||||
|
apiKey, baseURL := iflowCreds(auth)
|
||||||
|
if strings.TrimSpace(apiKey) == "" {
|
||||||
|
return nil, fmt.Errorf("iflow executor: missing api key")
|
||||||
|
}
|
||||||
|
if baseURL == "" {
|
||||||
|
baseURL = iflowauth.DefaultAPIBaseURL
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||||
|
|
||||||
|
from := opts.SourceFormat
|
||||||
|
to := sdktranslator.FromString("openai")
|
||||||
|
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
||||||
|
|
||||||
|
// Ensure tools array exists to avoid provider quirks similar to Qwen's behaviour.
|
||||||
|
toolsResult := gjson.GetBytes(body, "tools")
|
||||||
|
if toolsResult.Exists() && toolsResult.IsArray() && len(toolsResult.Array()) == 0 {
|
||||||
|
body = ensureToolsArray(body)
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint := strings.TrimSuffix(baseURL, "/") + iflowDefaultEndpoint
|
||||||
|
recordAPIRequest(ctx, e.cfg, body)
|
||||||
|
|
||||||
|
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
applyIFlowHeaders(httpReq, apiKey, true)
|
||||||
|
|
||||||
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
|
resp, err := httpClient.Do(httpReq)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||||
|
defer func() { _ = resp.Body.Close() }()
|
||||||
|
b, _ := io.ReadAll(resp.Body)
|
||||||
|
appendAPIResponseChunk(ctx, e.cfg, b)
|
||||||
|
log.Debugf("iflow streaming error: status %d body %s", resp.StatusCode, string(b))
|
||||||
|
return nil, statusErr{code: resp.StatusCode, msg: string(b)}
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make(chan cliproxyexecutor.StreamChunk)
|
||||||
|
go func() {
|
||||||
|
defer close(out)
|
||||||
|
defer func() { _ = resp.Body.Close() }()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
|
buf := make([]byte, 20_971_520)
|
||||||
|
scanner.Buffer(buf, 20_971_520)
|
||||||
|
var param any
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Bytes()
|
||||||
|
appendAPIResponseChunk(ctx, e.cfg, line)
|
||||||
|
if detail, ok := parseOpenAIStreamUsage(line); ok {
|
||||||
|
reporter.publish(ctx, detail)
|
||||||
|
}
|
||||||
|
chunks := sdktranslator.TranslateStream(ctx, to, from, req.Model, bytes.Clone(opts.OriginalRequest), body, bytes.Clone(line), ¶m)
|
||||||
|
for i := range chunks {
|
||||||
|
out <- cliproxyexecutor.StreamChunk{Payload: []byte(chunks[i])}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
out <- cliproxyexecutor.StreamChunk{Err: err}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountTokens is not implemented for iFlow.
|
||||||
|
func (e *IFlowExecutor) CountTokens(context.Context, *cliproxyauth.Auth, cliproxyexecutor.Request, cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||||
|
return cliproxyexecutor.Response{Payload: nil}, fmt.Errorf("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh refreshes OAuth tokens and updates the stored API key.
|
||||||
|
func (e *IFlowExecutor) Refresh(ctx context.Context, auth *cliproxyauth.Auth) (*cliproxyauth.Auth, error) {
|
||||||
|
log.Debugf("iflow executor: refresh called")
|
||||||
|
if auth == nil {
|
||||||
|
return nil, fmt.Errorf("iflow executor: auth is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
refreshToken := ""
|
||||||
|
if auth.Metadata != nil {
|
||||||
|
if v, ok := auth.Metadata["refresh_token"].(string); ok {
|
||||||
|
refreshToken = strings.TrimSpace(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if refreshToken == "" {
|
||||||
|
return auth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
svc := iflowauth.NewIFlowAuth(e.cfg)
|
||||||
|
tokenData, err := svc.RefreshTokens(ctx, refreshToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if auth.Metadata == nil {
|
||||||
|
auth.Metadata = make(map[string]any)
|
||||||
|
}
|
||||||
|
auth.Metadata["access_token"] = tokenData.AccessToken
|
||||||
|
if tokenData.RefreshToken != "" {
|
||||||
|
auth.Metadata["refresh_token"] = tokenData.RefreshToken
|
||||||
|
}
|
||||||
|
if tokenData.APIKey != "" {
|
||||||
|
auth.Metadata["api_key"] = tokenData.APIKey
|
||||||
|
}
|
||||||
|
auth.Metadata["expired"] = tokenData.Expire
|
||||||
|
auth.Metadata["type"] = "iflow"
|
||||||
|
auth.Metadata["last_refresh"] = time.Now().Format(time.RFC3339)
|
||||||
|
|
||||||
|
if auth.Attributes == nil {
|
||||||
|
auth.Attributes = make(map[string]string)
|
||||||
|
}
|
||||||
|
if tokenData.APIKey != "" {
|
||||||
|
auth.Attributes["api_key"] = tokenData.APIKey
|
||||||
|
}
|
||||||
|
|
||||||
|
return auth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyIFlowHeaders(r *http.Request, apiKey string, stream bool) {
|
||||||
|
r.Header.Set("Content-Type", "application/json")
|
||||||
|
r.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
r.Header.Set("User-Agent", iflowUserAgent)
|
||||||
|
if stream {
|
||||||
|
r.Header.Set("Accept", "text/event-stream")
|
||||||
|
} else {
|
||||||
|
r.Header.Set("Accept", "application/json")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func iflowCreds(a *cliproxyauth.Auth) (apiKey, baseURL string) {
|
||||||
|
if a == nil {
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
if a.Attributes != nil {
|
||||||
|
if v := strings.TrimSpace(a.Attributes["api_key"]); v != "" {
|
||||||
|
apiKey = v
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(a.Attributes["base_url"]); v != "" {
|
||||||
|
baseURL = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if apiKey == "" && a.Metadata != nil {
|
||||||
|
if v, ok := a.Metadata["api_key"].(string); ok {
|
||||||
|
apiKey = strings.TrimSpace(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if baseURL == "" && a.Metadata != nil {
|
||||||
|
if v, ok := a.Metadata["base_url"].(string); ok {
|
||||||
|
baseURL = strings.TrimSpace(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return apiKey, baseURL
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureToolsArray(body []byte) []byte {
|
||||||
|
placeholder := `[{"type":"function","function":{"name":"noop","description":"Placeholder tool to stabilise streaming","parameters":{"type":"object"}}}]`
|
||||||
|
updated, err := sjson.SetRawBytes(body, "tools", []byte(placeholder))
|
||||||
|
if err != nil {
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
return updated
|
||||||
|
}
|
||||||
@@ -40,8 +40,8 @@ func (e *OpenAICompatExecutor) PrepareRequest(_ *http.Request, _ *cliproxyauth.A
|
|||||||
|
|
||||||
func (e *OpenAICompatExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
func (e *OpenAICompatExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||||
baseURL, apiKey := e.resolveCredentials(auth)
|
baseURL, apiKey := e.resolveCredentials(auth)
|
||||||
if baseURL == "" || apiKey == "" {
|
if baseURL == "" {
|
||||||
return cliproxyexecutor.Response{}, statusErr{code: http.StatusUnauthorized, msg: "missing provider baseURL or apiKey"}
|
return cliproxyexecutor.Response{}, statusErr{code: http.StatusUnauthorized, msg: "missing provider baseURL"}
|
||||||
}
|
}
|
||||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||||
|
|
||||||
@@ -60,13 +60,12 @@ func (e *OpenAICompatExecutor) Execute(ctx context.Context, auth *cliproxyauth.A
|
|||||||
return cliproxyexecutor.Response{}, err
|
return cliproxyexecutor.Response{}, err
|
||||||
}
|
}
|
||||||
httpReq.Header.Set("Content-Type", "application/json")
|
httpReq.Header.Set("Content-Type", "application/json")
|
||||||
|
if apiKey != "" {
|
||||||
httpReq.Header.Set("Authorization", "Bearer "+apiKey)
|
httpReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
httpReq.Header.Set("User-Agent", "cli-proxy-openai-compat")
|
httpReq.Header.Set("User-Agent", "cli-proxy-openai-compat")
|
||||||
|
|
||||||
httpClient := &http.Client{}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cliproxyexecutor.Response{}, err
|
return cliproxyexecutor.Response{}, err
|
||||||
@@ -92,8 +91,8 @@ func (e *OpenAICompatExecutor) Execute(ctx context.Context, auth *cliproxyauth.A
|
|||||||
|
|
||||||
func (e *OpenAICompatExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error) {
|
func (e *OpenAICompatExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error) {
|
||||||
baseURL, apiKey := e.resolveCredentials(auth)
|
baseURL, apiKey := e.resolveCredentials(auth)
|
||||||
if baseURL == "" || apiKey == "" {
|
if baseURL == "" {
|
||||||
return nil, statusErr{code: http.StatusUnauthorized, msg: "missing provider baseURL or apiKey"}
|
return nil, statusErr{code: http.StatusUnauthorized, msg: "missing provider baseURL"}
|
||||||
}
|
}
|
||||||
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
reporter := newUsageReporter(ctx, e.Identifier(), req.Model, auth)
|
||||||
from := opts.SourceFormat
|
from := opts.SourceFormat
|
||||||
@@ -110,15 +109,14 @@ func (e *OpenAICompatExecutor) ExecuteStream(ctx context.Context, auth *cliproxy
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
httpReq.Header.Set("Content-Type", "application/json")
|
httpReq.Header.Set("Content-Type", "application/json")
|
||||||
|
if apiKey != "" {
|
||||||
httpReq.Header.Set("Authorization", "Bearer "+apiKey)
|
httpReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
httpReq.Header.Set("User-Agent", "cli-proxy-openai-compat")
|
httpReq.Header.Set("User-Agent", "cli-proxy-openai-compat")
|
||||||
httpReq.Header.Set("Accept", "text/event-stream")
|
httpReq.Header.Set("Accept", "text/event-stream")
|
||||||
httpReq.Header.Set("Cache-Control", "no-cache")
|
httpReq.Header.Set("Cache-Control", "no-cache")
|
||||||
|
|
||||||
httpClient := &http.Client{Timeout: 0}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -135,8 +133,8 @@ func (e *OpenAICompatExecutor) ExecuteStream(ctx context.Context, auth *cliproxy
|
|||||||
defer close(out)
|
defer close(out)
|
||||||
defer func() { _ = resp.Body.Close() }()
|
defer func() { _ = resp.Body.Close() }()
|
||||||
scanner := bufio.NewScanner(resp.Body)
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
buf := make([]byte, 1024*1024)
|
buf := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buf, 1024*1024)
|
scanner.Buffer(buf, 20_971_520)
|
||||||
var param any
|
var param any
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
@@ -177,8 +175,8 @@ func (e *OpenAICompatExecutor) resolveCredentials(auth *cliproxyauth.Auth) (base
|
|||||||
return "", ""
|
return "", ""
|
||||||
}
|
}
|
||||||
if auth.Attributes != nil {
|
if auth.Attributes != nil {
|
||||||
baseURL = auth.Attributes["base_url"]
|
baseURL = strings.TrimSpace(auth.Attributes["base_url"])
|
||||||
apiKey = auth.Attributes["api_key"]
|
apiKey = strings.TrimSpace(auth.Attributes["api_key"])
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
116
internal/runtime/executor/proxy_helpers.go
Normal file
116
internal/runtime/executor/proxy_helpers.go
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
package executor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/net/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newProxyAwareHTTPClient creates an HTTP client with proper proxy configuration priority:
|
||||||
|
// 1. Use auth.ProxyURL if configured (highest priority)
|
||||||
|
// 2. Use cfg.ProxyURL if auth proxy is not configured
|
||||||
|
// 3. Use RoundTripper from context if neither are configured
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - ctx: The context containing optional RoundTripper
|
||||||
|
// - cfg: The application configuration
|
||||||
|
// - auth: The authentication information
|
||||||
|
// - timeout: The client timeout (0 means no timeout)
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - *http.Client: An HTTP client with configured proxy or transport
|
||||||
|
func newProxyAwareHTTPClient(ctx context.Context, cfg *config.Config, auth *cliproxyauth.Auth, timeout time.Duration) *http.Client {
|
||||||
|
httpClient := &http.Client{}
|
||||||
|
if timeout > 0 {
|
||||||
|
httpClient.Timeout = timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 1: Use auth.ProxyURL if configured
|
||||||
|
var proxyURL string
|
||||||
|
if auth != nil {
|
||||||
|
proxyURL = strings.TrimSpace(auth.ProxyURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 2: Use cfg.ProxyURL if auth proxy is not configured
|
||||||
|
if proxyURL == "" && cfg != nil {
|
||||||
|
proxyURL = strings.TrimSpace(cfg.ProxyURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a proxy URL configured, set up the transport
|
||||||
|
if proxyURL != "" {
|
||||||
|
transport := buildProxyTransport(proxyURL)
|
||||||
|
if transport != nil {
|
||||||
|
httpClient.Transport = transport
|
||||||
|
return httpClient
|
||||||
|
}
|
||||||
|
// If proxy setup failed, log and fall through to context RoundTripper
|
||||||
|
log.Debugf("failed to setup proxy from URL: %s, falling back to context transport", proxyURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 3: Use RoundTripper from context (typically from RoundTripperFor)
|
||||||
|
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
||||||
|
httpClient.Transport = rt
|
||||||
|
}
|
||||||
|
|
||||||
|
return httpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildProxyTransport creates an HTTP transport configured for the given proxy URL.
|
||||||
|
// It supports SOCKS5, HTTP, and HTTPS proxy protocols.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - proxyURL: The proxy URL string (e.g., "socks5://user:pass@host:port", "http://host:port")
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - *http.Transport: A configured transport, or nil if the proxy URL is invalid
|
||||||
|
func buildProxyTransport(proxyURL string) *http.Transport {
|
||||||
|
if proxyURL == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedURL, errParse := url.Parse(proxyURL)
|
||||||
|
if errParse != nil {
|
||||||
|
log.Errorf("parse proxy URL failed: %v", errParse)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var transport *http.Transport
|
||||||
|
|
||||||
|
// Handle different proxy schemes
|
||||||
|
if parsedURL.Scheme == "socks5" {
|
||||||
|
// Configure SOCKS5 proxy with optional authentication
|
||||||
|
var proxyAuth *proxy.Auth
|
||||||
|
if parsedURL.User != nil {
|
||||||
|
username := parsedURL.User.Username()
|
||||||
|
password, _ := parsedURL.User.Password()
|
||||||
|
proxyAuth = &proxy.Auth{User: username, Password: password}
|
||||||
|
}
|
||||||
|
dialer, errSOCKS5 := proxy.SOCKS5("tcp", parsedURL.Host, proxyAuth, proxy.Direct)
|
||||||
|
if errSOCKS5 != nil {
|
||||||
|
log.Errorf("create SOCKS5 dialer failed: %v", errSOCKS5)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Set up a custom transport using the SOCKS5 dialer
|
||||||
|
transport = &http.Transport{
|
||||||
|
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||||
|
return dialer.Dial(network, addr)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else if parsedURL.Scheme == "http" || parsedURL.Scheme == "https" {
|
||||||
|
// Configure HTTP or HTTPS proxy
|
||||||
|
transport = &http.Transport{Proxy: http.ProxyURL(parsedURL)}
|
||||||
|
} else {
|
||||||
|
log.Errorf("unsupported proxy scheme: %s", parsedURL.Scheme)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return transport
|
||||||
|
}
|
||||||
@@ -58,10 +58,7 @@ func (e *QwenExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, req
|
|||||||
}
|
}
|
||||||
applyQwenHeaders(httpReq, token, false)
|
applyQwenHeaders(httpReq, token, false)
|
||||||
|
|
||||||
httpClient := &http.Client{}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cliproxyexecutor.Response{}, err
|
return cliproxyexecutor.Response{}, err
|
||||||
@@ -112,10 +109,7 @@ func (e *QwenExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Aut
|
|||||||
}
|
}
|
||||||
applyQwenHeaders(httpReq, token, true)
|
applyQwenHeaders(httpReq, token, true)
|
||||||
|
|
||||||
httpClient := &http.Client{Timeout: 0}
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
if rt, ok := ctx.Value("cliproxy.roundtripper").(http.RoundTripper); ok && rt != nil {
|
|
||||||
httpClient.Transport = rt
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(httpReq)
|
resp, err := httpClient.Do(httpReq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -132,8 +126,8 @@ func (e *QwenExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Aut
|
|||||||
defer close(out)
|
defer close(out)
|
||||||
defer func() { _ = resp.Body.Close() }()
|
defer func() { _ = resp.Body.Close() }()
|
||||||
scanner := bufio.NewScanner(resp.Body)
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
buf := make([]byte, 1024*1024)
|
buf := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buf, 1024*1024)
|
scanner.Buffer(buf, 20_971_520)
|
||||||
var param any
|
var param any
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
|
|||||||
@@ -8,15 +8,24 @@ package gemini
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
"github.com/tidwall/sjson"
|
"github.com/tidwall/sjson"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
user = ""
|
||||||
|
account = ""
|
||||||
|
session = ""
|
||||||
|
)
|
||||||
|
|
||||||
// ConvertGeminiRequestToClaude parses and transforms a Gemini API request into Claude Code API format.
|
// ConvertGeminiRequestToClaude parses and transforms a Gemini API request into Claude Code API format.
|
||||||
// It extracts the model name, system instruction, message contents, and tool declarations
|
// It extracts the model name, system instruction, message contents, and tool declarations
|
||||||
// from the raw JSON request and returns them in the format expected by the Claude Code API.
|
// from the raw JSON request and returns them in the format expected by the Claude Code API.
|
||||||
@@ -37,8 +46,23 @@ import (
|
|||||||
// - []byte: The transformed request data in Claude Code API format
|
// - []byte: The transformed request data in Claude Code API format
|
||||||
func ConvertGeminiRequestToClaude(modelName string, inputRawJSON []byte, stream bool) []byte {
|
func ConvertGeminiRequestToClaude(modelName string, inputRawJSON []byte, stream bool) []byte {
|
||||||
rawJSON := bytes.Clone(inputRawJSON)
|
rawJSON := bytes.Clone(inputRawJSON)
|
||||||
// Base Claude Code API template with default max_tokens value
|
|
||||||
out := `{"model":"","max_tokens":32000,"messages":[]}`
|
if account == "" {
|
||||||
|
u, _ := uuid.NewRandom()
|
||||||
|
account = u.String()
|
||||||
|
}
|
||||||
|
if session == "" {
|
||||||
|
u, _ := uuid.NewRandom()
|
||||||
|
session = u.String()
|
||||||
|
}
|
||||||
|
if user == "" {
|
||||||
|
sum := sha256.Sum256([]byte(account + session))
|
||||||
|
user = hex.EncodeToString(sum[:])
|
||||||
|
}
|
||||||
|
userID := fmt.Sprintf("user_%s_account_%s_session_%s", user, account, session)
|
||||||
|
|
||||||
|
// Base Claude message payload
|
||||||
|
out := fmt.Sprintf(`{"model":"","max_tokens":32000,"messages":[],"metadata":{"user_id":"%s"}}`, userID)
|
||||||
|
|
||||||
root := gjson.ParseBytes(rawJSON)
|
root := gjson.ParseBytes(rawJSON)
|
||||||
|
|
||||||
|
|||||||
@@ -331,8 +331,8 @@ func ConvertClaudeResponseToGeminiNonStream(_ context.Context, modelName string,
|
|||||||
streamingEvents := make([][]byte, 0)
|
streamingEvents := make([][]byte, 0)
|
||||||
|
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||||
buffer := make([]byte, 10240*1024)
|
buffer := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buffer, 10240*1024)
|
scanner.Buffer(buffer, 20_971_520)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
// log.Debug(string(line))
|
// log.Debug(string(line))
|
||||||
|
|||||||
@@ -8,14 +8,24 @@ package chat_completions
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
"github.com/tidwall/sjson"
|
"github.com/tidwall/sjson"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
user = ""
|
||||||
|
account = ""
|
||||||
|
session = ""
|
||||||
|
)
|
||||||
|
|
||||||
// ConvertOpenAIRequestToClaude parses and transforms an OpenAI Chat Completions API request into Claude Code API format.
|
// ConvertOpenAIRequestToClaude parses and transforms an OpenAI Chat Completions API request into Claude Code API format.
|
||||||
// It extracts the model name, system instruction, message contents, and tool declarations
|
// It extracts the model name, system instruction, message contents, and tool declarations
|
||||||
// from the raw JSON request and returns them in the format expected by the Claude Code API.
|
// from the raw JSON request and returns them in the format expected by the Claude Code API.
|
||||||
@@ -36,8 +46,22 @@ import (
|
|||||||
func ConvertOpenAIRequestToClaude(modelName string, inputRawJSON []byte, stream bool) []byte {
|
func ConvertOpenAIRequestToClaude(modelName string, inputRawJSON []byte, stream bool) []byte {
|
||||||
rawJSON := bytes.Clone(inputRawJSON)
|
rawJSON := bytes.Clone(inputRawJSON)
|
||||||
|
|
||||||
|
if account == "" {
|
||||||
|
u, _ := uuid.NewRandom()
|
||||||
|
account = u.String()
|
||||||
|
}
|
||||||
|
if session == "" {
|
||||||
|
u, _ := uuid.NewRandom()
|
||||||
|
session = u.String()
|
||||||
|
}
|
||||||
|
if user == "" {
|
||||||
|
sum := sha256.Sum256([]byte(account + session))
|
||||||
|
user = hex.EncodeToString(sum[:])
|
||||||
|
}
|
||||||
|
userID := fmt.Sprintf("user_%s_account_%s_session_%s", user, account, session)
|
||||||
|
|
||||||
// Base Claude Code API template with default max_tokens value
|
// Base Claude Code API template with default max_tokens value
|
||||||
out := `{"model":"","max_tokens":32000,"messages":[]}`
|
out := fmt.Sprintf(`{"model":"","max_tokens":32000,"messages":[],"metadata":{"user_id":"%s"}}`, userID)
|
||||||
|
|
||||||
root := gjson.ParseBytes(rawJSON)
|
root := gjson.ParseBytes(rawJSON)
|
||||||
|
|
||||||
|
|||||||
@@ -3,13 +3,23 @@ package responses
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
"github.com/tidwall/sjson"
|
"github.com/tidwall/sjson"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
user = ""
|
||||||
|
account = ""
|
||||||
|
session = ""
|
||||||
|
)
|
||||||
|
|
||||||
// ConvertOpenAIResponsesRequestToClaude transforms an OpenAI Responses API request
|
// ConvertOpenAIResponsesRequestToClaude transforms an OpenAI Responses API request
|
||||||
// into a Claude Messages API request using only gjson/sjson for JSON handling.
|
// into a Claude Messages API request using only gjson/sjson for JSON handling.
|
||||||
// It supports:
|
// It supports:
|
||||||
@@ -23,8 +33,22 @@ import (
|
|||||||
func ConvertOpenAIResponsesRequestToClaude(modelName string, inputRawJSON []byte, stream bool) []byte {
|
func ConvertOpenAIResponsesRequestToClaude(modelName string, inputRawJSON []byte, stream bool) []byte {
|
||||||
rawJSON := bytes.Clone(inputRawJSON)
|
rawJSON := bytes.Clone(inputRawJSON)
|
||||||
|
|
||||||
|
if account == "" {
|
||||||
|
u, _ := uuid.NewRandom()
|
||||||
|
account = u.String()
|
||||||
|
}
|
||||||
|
if session == "" {
|
||||||
|
u, _ := uuid.NewRandom()
|
||||||
|
session = u.String()
|
||||||
|
}
|
||||||
|
if user == "" {
|
||||||
|
sum := sha256.Sum256([]byte(account + session))
|
||||||
|
user = hex.EncodeToString(sum[:])
|
||||||
|
}
|
||||||
|
userID := fmt.Sprintf("user_%s_account_%s_session_%s", user, account, session)
|
||||||
|
|
||||||
// Base Claude message payload
|
// Base Claude message payload
|
||||||
out := `{"model":"","max_tokens":32000,"messages":[]}`
|
out := fmt.Sprintf(`{"model":"","max_tokens":32000,"messages":[],"metadata":{"user_id":"%s"}}`, userID)
|
||||||
|
|
||||||
root := gjson.ParseBytes(rawJSON)
|
root := gjson.ParseBytes(rawJSON)
|
||||||
|
|
||||||
|
|||||||
@@ -32,6 +32,10 @@ type claudeToResponsesState struct {
|
|||||||
ReasoningBuf strings.Builder
|
ReasoningBuf strings.Builder
|
||||||
ReasoningPartAdded bool
|
ReasoningPartAdded bool
|
||||||
ReasoningIndex int
|
ReasoningIndex int
|
||||||
|
// usage aggregation
|
||||||
|
InputTokens int64
|
||||||
|
OutputTokens int64
|
||||||
|
UsageSeen bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var dataTag = []byte("data:")
|
var dataTag = []byte("data:")
|
||||||
@@ -77,6 +81,19 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
|||||||
st.FuncArgsBuf = make(map[int]*strings.Builder)
|
st.FuncArgsBuf = make(map[int]*strings.Builder)
|
||||||
st.FuncNames = make(map[int]string)
|
st.FuncNames = make(map[int]string)
|
||||||
st.FuncCallIDs = make(map[int]string)
|
st.FuncCallIDs = make(map[int]string)
|
||||||
|
st.InputTokens = 0
|
||||||
|
st.OutputTokens = 0
|
||||||
|
st.UsageSeen = false
|
||||||
|
if usage := msg.Get("usage"); usage.Exists() {
|
||||||
|
if v := usage.Get("input_tokens"); v.Exists() {
|
||||||
|
st.InputTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
}
|
||||||
|
if v := usage.Get("output_tokens"); v.Exists() {
|
||||||
|
st.OutputTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
}
|
||||||
|
}
|
||||||
// response.created
|
// response.created
|
||||||
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null,"instructions":""}}`
|
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null,"instructions":""}}`
|
||||||
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
||||||
@@ -227,7 +244,6 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
|||||||
out = append(out, emitEvent("response.output_item.done", itemDone))
|
out = append(out, emitEvent("response.output_item.done", itemDone))
|
||||||
st.InFuncBlock = false
|
st.InFuncBlock = false
|
||||||
} else if st.ReasoningActive {
|
} else if st.ReasoningActive {
|
||||||
// close reasoning
|
|
||||||
full := st.ReasoningBuf.String()
|
full := st.ReasoningBuf.String()
|
||||||
textDone := `{"type":"response.reasoning_summary_text.done","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"text":""}`
|
textDone := `{"type":"response.reasoning_summary_text.done","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"text":""}`
|
||||||
textDone, _ = sjson.Set(textDone, "sequence_number", nextSeq())
|
textDone, _ = sjson.Set(textDone, "sequence_number", nextSeq())
|
||||||
@@ -244,7 +260,19 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
|||||||
st.ReasoningActive = false
|
st.ReasoningActive = false
|
||||||
st.ReasoningPartAdded = false
|
st.ReasoningPartAdded = false
|
||||||
}
|
}
|
||||||
|
case "message_delta":
|
||||||
|
if usage := root.Get("usage"); usage.Exists() {
|
||||||
|
if v := usage.Get("output_tokens"); v.Exists() {
|
||||||
|
st.OutputTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
}
|
||||||
|
if v := usage.Get("input_tokens"); v.Exists() {
|
||||||
|
st.InputTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
}
|
||||||
|
}
|
||||||
case "message_stop":
|
case "message_stop":
|
||||||
|
|
||||||
completed := `{"type":"response.completed","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"completed","background":false,"error":null}}`
|
completed := `{"type":"response.completed","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"completed","background":false,"error":null}}`
|
||||||
completed, _ = sjson.Set(completed, "sequence_number", nextSeq())
|
completed, _ = sjson.Set(completed, "sequence_number", nextSeq())
|
||||||
completed, _ = sjson.Set(completed, "response.id", st.ResponseID)
|
completed, _ = sjson.Set(completed, "response.id", st.ResponseID)
|
||||||
@@ -381,6 +409,24 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
|||||||
if len(outputs) > 0 {
|
if len(outputs) > 0 {
|
||||||
completed, _ = sjson.Set(completed, "response.output", outputs)
|
completed, _ = sjson.Set(completed, "response.output", outputs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
reasoningTokens := int64(0)
|
||||||
|
if st.ReasoningBuf.Len() > 0 {
|
||||||
|
reasoningTokens = int64(st.ReasoningBuf.Len() / 4)
|
||||||
|
}
|
||||||
|
usagePresent := st.UsageSeen || reasoningTokens > 0
|
||||||
|
if usagePresent {
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.input_tokens", st.InputTokens)
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", 0)
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.output_tokens", st.OutputTokens)
|
||||||
|
if reasoningTokens > 0 {
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", reasoningTokens)
|
||||||
|
}
|
||||||
|
total := st.InputTokens + st.OutputTokens
|
||||||
|
if total > 0 || st.UsageSeen {
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.total_tokens", total)
|
||||||
|
}
|
||||||
|
}
|
||||||
out = append(out, emitEvent("response.completed", completed))
|
out = append(out, emitEvent("response.completed", completed))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -399,8 +445,8 @@ func ConvertClaudeResponseToOpenAIResponsesNonStream(_ context.Context, _ string
|
|||||||
// Use a simple scanner to iterate through raw bytes
|
// Use a simple scanner to iterate through raw bytes
|
||||||
// Note: extremely large responses may require increasing the buffer
|
// Note: extremely large responses may require increasing the buffer
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||||
buf := make([]byte, 10240*1024)
|
buf := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buf, 10240*1024)
|
scanner.Buffer(buf, 20_971_520)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
if !bytes.HasPrefix(line, dataTag) {
|
if !bytes.HasPrefix(line, dataTag) {
|
||||||
|
|||||||
@@ -181,8 +181,8 @@ func ConvertCodexResponseToClaude(_ context.Context, _ string, originalRequestRa
|
|||||||
// - string: A Claude Code-compatible JSON response containing all message content and metadata
|
// - string: A Claude Code-compatible JSON response containing all message content and metadata
|
||||||
func ConvertCodexResponseToClaudeNonStream(_ context.Context, _ string, originalRequestRawJSON, _ []byte, rawJSON []byte, _ *any) string {
|
func ConvertCodexResponseToClaudeNonStream(_ context.Context, _ string, originalRequestRawJSON, _ []byte, rawJSON []byte, _ *any) string {
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||||
buffer := make([]byte, 10240*1024)
|
buffer := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buffer, 10240*1024)
|
scanner.Buffer(buffer, 20_971_520)
|
||||||
revNames := buildReverseMapFromClaudeOriginalShortToOriginal(originalRequestRawJSON)
|
revNames := buildReverseMapFromClaudeOriginalShortToOriginal(originalRequestRawJSON)
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
|
|||||||
@@ -153,8 +153,8 @@ func ConvertCodexResponseToGemini(_ context.Context, modelName string, originalR
|
|||||||
// - string: A Gemini-compatible JSON response containing all message content and metadata
|
// - string: A Gemini-compatible JSON response containing all message content and metadata
|
||||||
func ConvertCodexResponseToGeminiNonStream(_ context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
func ConvertCodexResponseToGeminiNonStream(_ context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||||
buffer := make([]byte, 10240*1024)
|
buffer := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buffer, 10240*1024)
|
scanner.Buffer(buffer, 20_971_520)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
// log.Debug(string(line))
|
// log.Debug(string(line))
|
||||||
|
|||||||
@@ -17,6 +17,9 @@ func ConvertOpenAIResponsesRequestToCodex(modelName string, inputRawJSON []byte,
|
|||||||
rawJSON, _ = sjson.SetBytes(rawJSON, "store", false)
|
rawJSON, _ = sjson.SetBytes(rawJSON, "store", false)
|
||||||
rawJSON, _ = sjson.SetBytes(rawJSON, "parallel_tool_calls", true)
|
rawJSON, _ = sjson.SetBytes(rawJSON, "parallel_tool_calls", true)
|
||||||
rawJSON, _ = sjson.SetBytes(rawJSON, "include", []string{"reasoning.encrypted_content"})
|
rawJSON, _ = sjson.SetBytes(rawJSON, "include", []string{"reasoning.encrypted_content"})
|
||||||
|
// Codex Responses rejects token limit fields, so strip them out before forwarding.
|
||||||
|
rawJSON, _ = sjson.DeleteBytes(rawJSON, "max_output_tokens")
|
||||||
|
rawJSON, _ = sjson.DeleteBytes(rawJSON, "max_completion_tokens")
|
||||||
rawJSON, _ = sjson.DeleteBytes(rawJSON, "temperature")
|
rawJSON, _ = sjson.DeleteBytes(rawJSON, "temperature")
|
||||||
rawJSON, _ = sjson.DeleteBytes(rawJSON, "top_p")
|
rawJSON, _ = sjson.DeleteBytes(rawJSON, "top_p")
|
||||||
|
|
||||||
@@ -31,9 +34,17 @@ func ConvertOpenAIResponsesRequestToCodex(modelName string, inputRawJSON []byte,
|
|||||||
}
|
}
|
||||||
|
|
||||||
inputResult := gjson.GetBytes(rawJSON, "input")
|
inputResult := gjson.GetBytes(rawJSON, "input")
|
||||||
inputResults := []gjson.Result{}
|
var inputResults []gjson.Result
|
||||||
if inputResult.Exists() && inputResult.IsArray() {
|
if inputResult.Exists() {
|
||||||
|
if inputResult.IsArray() {
|
||||||
inputResults = inputResult.Array()
|
inputResults = inputResult.Array()
|
||||||
|
} else if inputResult.Type == gjson.String {
|
||||||
|
newInput := `[{"type":"message","role":"user","content":[{"type":"input_text","text":""}]}]`
|
||||||
|
newInput, _ = sjson.Set(newInput, "0.content.0.text", inputResult.String())
|
||||||
|
inputResults = gjson.Parse(newInput).Array()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
inputResults = []gjson.Result{}
|
||||||
}
|
}
|
||||||
|
|
||||||
extractedSystemInstructions := false
|
extractedSystemInstructions := false
|
||||||
|
|||||||
@@ -30,8 +30,8 @@ func ConvertCodexResponseToOpenAIResponses(ctx context.Context, modelName string
|
|||||||
// from a non-streaming OpenAI Chat Completions response.
|
// from a non-streaming OpenAI Chat Completions response.
|
||||||
func ConvertCodexResponseToOpenAIResponsesNonStream(_ context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
func ConvertCodexResponseToOpenAIResponsesNonStream(_ context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, _ *any) string {
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
scanner := bufio.NewScanner(bytes.NewReader(rawJSON))
|
||||||
buffer := make([]byte, 10240*1024)
|
buffer := make([]byte, 20_971_520)
|
||||||
scanner.Buffer(buffer, 10240*1024)
|
scanner.Buffer(buffer, 20_971_520)
|
||||||
dataTag := []byte("data:")
|
dataTag := []byte("data:")
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Bytes()
|
line := scanner.Bytes()
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ import (
|
|||||||
// Returns:
|
// Returns:
|
||||||
// - []byte: The transformed request data in Gemini CLI API format
|
// - []byte: The transformed request data in Gemini CLI API format
|
||||||
func ConvertOpenAIRequestToGeminiCLI(modelName string, inputRawJSON []byte, _ bool) []byte {
|
func ConvertOpenAIRequestToGeminiCLI(modelName string, inputRawJSON []byte, _ bool) []byte {
|
||||||
log.Debug("ConvertOpenAIRequestToGeminiCLI")
|
|
||||||
rawJSON := bytes.Clone(inputRawJSON)
|
rawJSON := bytes.Clone(inputRawJSON)
|
||||||
// Base envelope
|
// Base envelope
|
||||||
out := []byte(`{"project":"","request":{"contents":[],"generationConfig":{"thinkingConfig":{"include_thoughts":true}}},"model":"gemini-2.5-pro"}`)
|
out := []byte(`{"project":"","request":{"contents":[],"generationConfig":{"thinkingConfig":{"include_thoughts":true}}},"model":"gemini-2.5-pro"}`)
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ package chat_completions
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -100,6 +101,10 @@ func ConvertCliResponseToOpenAI(_ context.Context, _ string, originalRequestRawJ
|
|||||||
partResult := partResults[i]
|
partResult := partResults[i]
|
||||||
partTextResult := partResult.Get("text")
|
partTextResult := partResult.Get("text")
|
||||||
functionCallResult := partResult.Get("functionCall")
|
functionCallResult := partResult.Get("functionCall")
|
||||||
|
inlineDataResult := partResult.Get("inlineData")
|
||||||
|
if !inlineDataResult.Exists() {
|
||||||
|
inlineDataResult = partResult.Get("inline_data")
|
||||||
|
}
|
||||||
|
|
||||||
if partTextResult.Exists() {
|
if partTextResult.Exists() {
|
||||||
// Handle text content, distinguishing between regular content and reasoning/thoughts.
|
// Handle text content, distinguishing between regular content and reasoning/thoughts.
|
||||||
@@ -125,6 +130,34 @@ func ConvertCliResponseToOpenAI(_ context.Context, _ string, originalRequestRawJ
|
|||||||
}
|
}
|
||||||
template, _ = sjson.Set(template, "choices.0.delta.role", "assistant")
|
template, _ = sjson.Set(template, "choices.0.delta.role", "assistant")
|
||||||
template, _ = sjson.SetRaw(template, "choices.0.delta.tool_calls.-1", functionCallTemplate)
|
template, _ = sjson.SetRaw(template, "choices.0.delta.tool_calls.-1", functionCallTemplate)
|
||||||
|
} else if inlineDataResult.Exists() {
|
||||||
|
data := inlineDataResult.Get("data").String()
|
||||||
|
if data == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mimeType := inlineDataResult.Get("mimeType").String()
|
||||||
|
if mimeType == "" {
|
||||||
|
mimeType = inlineDataResult.Get("mime_type").String()
|
||||||
|
}
|
||||||
|
if mimeType == "" {
|
||||||
|
mimeType = "image/png"
|
||||||
|
}
|
||||||
|
imageURL := fmt.Sprintf("data:%s;base64,%s", mimeType, data)
|
||||||
|
imagePayload, err := json.Marshal(map[string]any{
|
||||||
|
"type": "image_url",
|
||||||
|
"image_url": map[string]string{
|
||||||
|
"url": imageURL,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
imagesResult := gjson.Get(template, "choices.0.delta.images")
|
||||||
|
if !imagesResult.Exists() || !imagesResult.IsArray() {
|
||||||
|
template, _ = sjson.SetRaw(template, "choices.0.delta.images", `[]`)
|
||||||
|
}
|
||||||
|
template, _ = sjson.Set(template, "choices.0.delta.role", "assistant")
|
||||||
|
template, _ = sjson.SetRaw(template, "choices.0.delta.images.-1", string(imagePayload))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -78,12 +78,21 @@ func ConvertGeminiResponseToOpenAIResponses(_ context.Context, modelName string,
|
|||||||
textDone, _ = sjson.Set(textDone, "output_index", st.ReasoningIndex)
|
textDone, _ = sjson.Set(textDone, "output_index", st.ReasoningIndex)
|
||||||
textDone, _ = sjson.Set(textDone, "text", full)
|
textDone, _ = sjson.Set(textDone, "text", full)
|
||||||
out = append(out, emitEvent("response.reasoning_summary_text.done", textDone))
|
out = append(out, emitEvent("response.reasoning_summary_text.done", textDone))
|
||||||
|
|
||||||
partDone := `{"type":"response.reasoning_summary_part.done","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"part":{"type":"summary_text","text":""}}`
|
partDone := `{"type":"response.reasoning_summary_part.done","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"part":{"type":"summary_text","text":""}}`
|
||||||
partDone, _ = sjson.Set(partDone, "sequence_number", nextSeq())
|
partDone, _ = sjson.Set(partDone, "sequence_number", nextSeq())
|
||||||
partDone, _ = sjson.Set(partDone, "item_id", st.ReasoningItemID)
|
partDone, _ = sjson.Set(partDone, "item_id", st.ReasoningItemID)
|
||||||
partDone, _ = sjson.Set(partDone, "output_index", st.ReasoningIndex)
|
partDone, _ = sjson.Set(partDone, "output_index", st.ReasoningIndex)
|
||||||
partDone, _ = sjson.Set(partDone, "part.text", full)
|
partDone, _ = sjson.Set(partDone, "part.text", full)
|
||||||
out = append(out, emitEvent("response.reasoning_summary_part.done", partDone))
|
out = append(out, emitEvent("response.reasoning_summary_part.done", partDone))
|
||||||
|
|
||||||
|
itemDone := `{"type":"response.output_item.done","sequence_number":0,"output_index":0,"item":{"id":"","type":"reasoning","encrypted_content":"","summary":[{"type":"summary_text","text":""}]}}`
|
||||||
|
itemDone, _ = sjson.Set(itemDone, "sequence_number", nextSeq())
|
||||||
|
itemDone, _ = sjson.Set(itemDone, "item.id", st.ReasoningItemID)
|
||||||
|
itemDone, _ = sjson.Set(itemDone, "output_index", st.ReasoningIndex)
|
||||||
|
itemDone, _ = sjson.Set(itemDone, "item.summary.0.text", full)
|
||||||
|
out = append(out, emitEvent("response.output_item.done", itemDone))
|
||||||
|
|
||||||
st.ReasoningClosed = true
|
st.ReasoningClosed = true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -414,6 +423,25 @@ func ConvertGeminiResponseToOpenAIResponses(_ context.Context, modelName string,
|
|||||||
completed, _ = sjson.Set(completed, "response.output", outputs)
|
completed, _ = sjson.Set(completed, "response.output", outputs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// usage mapping
|
||||||
|
if um := root.Get("usageMetadata"); um.Exists() {
|
||||||
|
// input tokens = prompt + thoughts
|
||||||
|
input := um.Get("promptTokenCount").Int() + um.Get("thoughtsTokenCount").Int()
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.input_tokens", input)
|
||||||
|
// cached_tokens not provided by Gemini; default to 0 for structure compatibility
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", 0)
|
||||||
|
// output tokens
|
||||||
|
if v := um.Get("candidatesTokenCount"); v.Exists() {
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.output_tokens", v.Int())
|
||||||
|
}
|
||||||
|
if v := um.Get("thoughtsTokenCount"); v.Exists() {
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", v.Int())
|
||||||
|
}
|
||||||
|
if v := um.Get("totalTokenCount"); v.Exists() {
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.total_tokens", v.Int())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
out = append(out, emitEvent("response.completed", completed))
|
out = append(out, emitEvent("response.completed", completed))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -32,6 +32,13 @@ type oaiToResponsesState struct {
|
|||||||
// function item done state
|
// function item done state
|
||||||
FuncArgsDone map[int]bool
|
FuncArgsDone map[int]bool
|
||||||
FuncItemDone map[int]bool
|
FuncItemDone map[int]bool
|
||||||
|
// usage aggregation
|
||||||
|
PromptTokens int64
|
||||||
|
CachedTokens int64
|
||||||
|
CompletionTokens int64
|
||||||
|
TotalTokens int64
|
||||||
|
ReasoningTokens int64
|
||||||
|
UsageSeen bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitRespEvent(event string, payload string) string {
|
func emitRespEvent(event string, payload string) string {
|
||||||
@@ -66,6 +73,35 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
|
|||||||
return []string{}
|
return []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if usage := root.Get("usage"); usage.Exists() {
|
||||||
|
if v := usage.Get("prompt_tokens"); v.Exists() {
|
||||||
|
st.PromptTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
}
|
||||||
|
if v := usage.Get("prompt_tokens_details.cached_tokens"); v.Exists() {
|
||||||
|
st.CachedTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
}
|
||||||
|
if v := usage.Get("completion_tokens"); v.Exists() {
|
||||||
|
st.CompletionTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
} else if v := usage.Get("output_tokens"); v.Exists() {
|
||||||
|
st.CompletionTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
}
|
||||||
|
if v := usage.Get("output_tokens_details.reasoning_tokens"); v.Exists() {
|
||||||
|
st.ReasoningTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
} else if v := usage.Get("completion_tokens_details.reasoning_tokens"); v.Exists() {
|
||||||
|
st.ReasoningTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
}
|
||||||
|
if v := usage.Get("total_tokens"); v.Exists() {
|
||||||
|
st.TotalTokens = v.Int()
|
||||||
|
st.UsageSeen = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
nextSeq := func() int { st.Seq++; return st.Seq }
|
nextSeq := func() int { st.Seq++; return st.Seq }
|
||||||
var out []string
|
var out []string
|
||||||
|
|
||||||
@@ -85,6 +121,12 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
|
|||||||
st.MsgItemDone = make(map[int]bool)
|
st.MsgItemDone = make(map[int]bool)
|
||||||
st.FuncArgsDone = make(map[int]bool)
|
st.FuncArgsDone = make(map[int]bool)
|
||||||
st.FuncItemDone = make(map[int]bool)
|
st.FuncItemDone = make(map[int]bool)
|
||||||
|
st.PromptTokens = 0
|
||||||
|
st.CachedTokens = 0
|
||||||
|
st.CompletionTokens = 0
|
||||||
|
st.TotalTokens = 0
|
||||||
|
st.ReasoningTokens = 0
|
||||||
|
st.UsageSeen = false
|
||||||
// response.created
|
// response.created
|
||||||
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null}}`
|
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null}}`
|
||||||
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
||||||
@@ -503,6 +545,19 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
|
|||||||
if len(outputs) > 0 {
|
if len(outputs) > 0 {
|
||||||
completed, _ = sjson.Set(completed, "response.output", outputs)
|
completed, _ = sjson.Set(completed, "response.output", outputs)
|
||||||
}
|
}
|
||||||
|
if st.UsageSeen {
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.input_tokens", st.PromptTokens)
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.input_tokens_details.cached_tokens", st.CachedTokens)
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.output_tokens", st.CompletionTokens)
|
||||||
|
if st.ReasoningTokens > 0 {
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.output_tokens_details.reasoning_tokens", st.ReasoningTokens)
|
||||||
|
}
|
||||||
|
total := st.TotalTokens
|
||||||
|
if total == 0 {
|
||||||
|
total = st.PromptTokens + st.CompletionTokens
|
||||||
|
}
|
||||||
|
completed, _ = sjson.Set(completed, "response.usage.total_tokens", total)
|
||||||
|
}
|
||||||
out = append(out, emitRespEvent("response.completed", completed))
|
out = append(out, emitRespEvent("response.completed", completed))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,13 +7,17 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
coreusage "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/usage"
|
coreusage "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/usage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var statisticsEnabled atomic.Bool
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
statisticsEnabled.Store(true)
|
||||||
coreusage.RegisterPlugin(NewLoggerPlugin())
|
coreusage.RegisterPlugin(NewLoggerPlugin())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,12 +40,21 @@ func NewLoggerPlugin() *LoggerPlugin { return &LoggerPlugin{stats: defaultReques
|
|||||||
// - ctx: The context for the usage record
|
// - ctx: The context for the usage record
|
||||||
// - record: The usage record to aggregate
|
// - record: The usage record to aggregate
|
||||||
func (p *LoggerPlugin) HandleUsage(ctx context.Context, record coreusage.Record) {
|
func (p *LoggerPlugin) HandleUsage(ctx context.Context, record coreusage.Record) {
|
||||||
|
if !statisticsEnabled.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
if p == nil || p.stats == nil {
|
if p == nil || p.stats == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.stats.Record(ctx, record)
|
p.stats.Record(ctx, record)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetStatisticsEnabled toggles whether in-memory statistics are recorded.
|
||||||
|
func SetStatisticsEnabled(enabled bool) { statisticsEnabled.Store(enabled) }
|
||||||
|
|
||||||
|
// StatisticsEnabled reports the current recording state.
|
||||||
|
func StatisticsEnabled() bool { return statisticsEnabled.Load() }
|
||||||
|
|
||||||
// RequestStatistics maintains aggregated request metrics in memory.
|
// RequestStatistics maintains aggregated request metrics in memory.
|
||||||
type RequestStatistics struct {
|
type RequestStatistics struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
@@ -138,6 +151,9 @@ func (s *RequestStatistics) Record(ctx context.Context, record coreusage.Record)
|
|||||||
if s == nil {
|
if s == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if !statisticsEnabled.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
timestamp := record.RequestedAt
|
timestamp := record.RequestedAt
|
||||||
if timestamp.IsZero() {
|
if timestamp.IsZero() {
|
||||||
timestamp = time.Now()
|
timestamp = time.Now()
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ import (
|
|||||||
//
|
//
|
||||||
// Returns:
|
// Returns:
|
||||||
// - []string: All provider identifiers capable of serving the model, ordered by preference.
|
// - []string: All provider identifiers capable of serving the model, ordered by preference.
|
||||||
func GetProviderName(modelName string, cfg *config.Config) []string {
|
func GetProviderName(modelName string) []string {
|
||||||
if modelName == "" {
|
if modelName == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"golang.org/x/net/proxy"
|
"golang.org/x/net/proxy"
|
||||||
)
|
)
|
||||||
@@ -17,7 +17,7 @@ import (
|
|||||||
// SetProxy configures the provided HTTP client with proxy settings from the configuration.
|
// SetProxy configures the provided HTTP client with proxy settings from the configuration.
|
||||||
// It supports SOCKS5, HTTP, and HTTPS proxies. The function modifies the client's transport
|
// It supports SOCKS5, HTTP, and HTTPS proxies. The function modifies the client's transport
|
||||||
// to route requests through the configured proxy server.
|
// to route requests through the configured proxy server.
|
||||||
func SetProxy(cfg *config.Config, httpClient *http.Client) *http.Client {
|
func SetProxy(cfg *config.SDKConfig, httpClient *http.Client) *http.Client {
|
||||||
var transport *http.Transport
|
var transport *http.Transport
|
||||||
// Attempt to parse the proxy URL from the configuration.
|
// Attempt to parse the proxy URL from the configuration.
|
||||||
proxyURL, errParse := url.Parse(cfg.ProxyURL)
|
proxyURL, errParse := url.Parse(cfg.ProxyURL)
|
||||||
@@ -25,9 +25,12 @@ func SetProxy(cfg *config.Config, httpClient *http.Client) *http.Client {
|
|||||||
// Handle different proxy schemes.
|
// Handle different proxy schemes.
|
||||||
if proxyURL.Scheme == "socks5" {
|
if proxyURL.Scheme == "socks5" {
|
||||||
// Configure SOCKS5 proxy with optional authentication.
|
// Configure SOCKS5 proxy with optional authentication.
|
||||||
|
var proxyAuth *proxy.Auth
|
||||||
|
if proxyURL.User != nil {
|
||||||
username := proxyURL.User.Username()
|
username := proxyURL.User.Username()
|
||||||
password, _ := proxyURL.User.Password()
|
password, _ := proxyURL.User.Password()
|
||||||
proxyAuth := &proxy.Auth{User: username, Password: password}
|
proxyAuth = &proxy.Auth{User: username, Password: password}
|
||||||
|
}
|
||||||
dialer, errSOCKS5 := proxy.SOCKS5("tcp", proxyURL.Host, proxyAuth, proxy.Direct)
|
dialer, errSOCKS5 := proxy.SOCKS5("tcp", proxyURL.Host, proxyAuth, proxy.Direct)
|
||||||
if errSOCKS5 != nil {
|
if errSOCKS5 != nil {
|
||||||
log.Errorf("create SOCKS5 dialer failed: %v", errSOCKS5)
|
log.Errorf("create SOCKS5 dialer failed: %v", errSOCKS5)
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ func GetIPAddress() string {
|
|||||||
func PrintSSHTunnelInstructions(port int) {
|
func PrintSSHTunnelInstructions(port int) {
|
||||||
ipAddress := GetIPAddress()
|
ipAddress := GetIPAddress()
|
||||||
border := "================================================================================"
|
border := "================================================================================"
|
||||||
log.Infof("To authenticate from a remote machine, an SSH tunnel may be required.")
|
fmt.Println("To authenticate from a remote machine, an SSH tunnel may be required.")
|
||||||
fmt.Println(border)
|
fmt.Println(border)
|
||||||
fmt.Println(" Run one of the following commands on your local machine (NOT the server):")
|
fmt.Println(" Run one of the following commands on your local machine (NOT the server):")
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -30,23 +31,42 @@ func SetLogLevel(cfg *config.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CountAuthFiles returns the number of JSON auth files located under the provided directory.
|
// ResolveAuthDir normalizes the auth directory path for consistent reuse throughout the app.
|
||||||
// The function resolves leading tildes to the user's home directory and performs a case-insensitive
|
// It expands a leading tilde (~) to the user's home directory and returns a cleaned path.
|
||||||
// match on the ".json" suffix so that files saved with uppercase extensions are also counted.
|
func ResolveAuthDir(authDir string) (string, error) {
|
||||||
func CountAuthFiles(authDir string) int {
|
|
||||||
if authDir == "" {
|
if authDir == "" {
|
||||||
return 0
|
return "", nil
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(authDir, "~") {
|
if strings.HasPrefix(authDir, "~") {
|
||||||
home, err := os.UserHomeDir()
|
home, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("countAuthFiles: failed to resolve home directory: %v", err)
|
return "", fmt.Errorf("resolve auth dir: %w", err)
|
||||||
|
}
|
||||||
|
remainder := strings.TrimPrefix(authDir, "~")
|
||||||
|
remainder = strings.TrimLeft(remainder, "/\\")
|
||||||
|
if remainder == "" {
|
||||||
|
return filepath.Clean(home), nil
|
||||||
|
}
|
||||||
|
normalized := strings.ReplaceAll(remainder, "\\", "/")
|
||||||
|
return filepath.Clean(filepath.Join(home, filepath.FromSlash(normalized))), nil
|
||||||
|
}
|
||||||
|
return filepath.Clean(authDir), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountAuthFiles returns the number of JSON auth files located under the provided directory.
|
||||||
|
// The function resolves leading tildes to the user's home directory and performs a case-insensitive
|
||||||
|
// match on the ".json" suffix so that files saved with uppercase extensions are also counted.
|
||||||
|
func CountAuthFiles(authDir string) int {
|
||||||
|
dir, err := ResolveAuthDir(authDir)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("countAuthFiles: failed to resolve auth directory: %v", err)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
authDir = filepath.Join(home, authDir[1:])
|
if dir == "" {
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
count := 0
|
count := 0
|
||||||
walkErr := filepath.WalkDir(authDir, func(path string, d fs.DirEntry, err error) error {
|
walkErr := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("countAuthFiles: error accessing %s: %v", path, err)
|
log.Debugf("countAuthFiles: error accessing %s: %v", path, err)
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -52,6 +53,39 @@ type Watcher struct {
|
|||||||
dispatchCancel context.CancelFunc
|
dispatchCancel context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type stableIDGenerator struct {
|
||||||
|
counters map[string]int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStableIDGenerator() *stableIDGenerator {
|
||||||
|
return &stableIDGenerator{counters: make(map[string]int)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *stableIDGenerator) next(kind string, parts ...string) (string, string) {
|
||||||
|
if g == nil {
|
||||||
|
return kind + ":000000000000", "000000000000"
|
||||||
|
}
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write([]byte(kind))
|
||||||
|
for _, part := range parts {
|
||||||
|
trimmed := strings.TrimSpace(part)
|
||||||
|
hasher.Write([]byte{0})
|
||||||
|
hasher.Write([]byte(trimmed))
|
||||||
|
}
|
||||||
|
digest := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
if len(digest) < 12 {
|
||||||
|
digest = fmt.Sprintf("%012s", digest)
|
||||||
|
}
|
||||||
|
short := digest[:12]
|
||||||
|
key := kind + ":" + short
|
||||||
|
index := g.counters[key]
|
||||||
|
g.counters[key] = index + 1
|
||||||
|
if index > 0 {
|
||||||
|
short = fmt.Sprintf("%s-%d", short, index)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%s", kind, short), short
|
||||||
|
}
|
||||||
|
|
||||||
// AuthUpdateAction represents the type of change detected in auth sources.
|
// AuthUpdateAction represents the type of change detected in auth sources.
|
||||||
type AuthUpdateAction string
|
type AuthUpdateAction string
|
||||||
|
|
||||||
@@ -112,7 +146,7 @@ func (w *Watcher) Start(ctx context.Context) error {
|
|||||||
go w.processEvents(ctx)
|
go w.processEvents(ctx)
|
||||||
|
|
||||||
// Perform an initial full reload based on current config and auth dir
|
// Perform an initial full reload based on current config and auth dir
|
||||||
w.reloadClients()
|
w.reloadClients(true)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -320,6 +354,20 @@ func normalizeAuth(a *coreauth.Auth) *coreauth.Auth {
|
|||||||
return clone
|
return clone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// computeOpenAICompatModelsHash returns a stable hash for the compatibility models so that
|
||||||
|
// changes to the model list trigger auth updates during hot reload.
|
||||||
|
func computeOpenAICompatModelsHash(models []config.OpenAICompatibilityModel) string {
|
||||||
|
if len(models) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
data, err := json.Marshal(models)
|
||||||
|
if err != nil || len(data) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
sum := sha256.Sum256(data)
|
||||||
|
return hex.EncodeToString(sum[:])
|
||||||
|
}
|
||||||
|
|
||||||
// SetClients sets the file-based clients.
|
// SetClients sets the file-based clients.
|
||||||
// SetClients removed
|
// SetClients removed
|
||||||
// SetAPIKeyClients removed
|
// SetAPIKeyClients removed
|
||||||
@@ -380,17 +428,24 @@ func (w *Watcher) handleEvent(event fsnotify.Event) {
|
|||||||
log.Debugf("config file content unchanged (hash match), skipping reload")
|
log.Debugf("config file content unchanged (hash match), skipping reload")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Infof("config file changed, reloading: %s", w.configPath)
|
fmt.Printf("config file changed, reloading: %s\n", w.configPath)
|
||||||
if w.reloadConfig() {
|
if w.reloadConfig() {
|
||||||
|
finalHash := newHash
|
||||||
|
if updatedData, errRead := os.ReadFile(w.configPath); errRead == nil && len(updatedData) > 0 {
|
||||||
|
sumUpdated := sha256.Sum256(updatedData)
|
||||||
|
finalHash = hex.EncodeToString(sumUpdated[:])
|
||||||
|
} else if errRead != nil {
|
||||||
|
log.WithError(errRead).Debug("failed to compute updated config hash after reload")
|
||||||
|
}
|
||||||
w.clientsMutex.Lock()
|
w.clientsMutex.Lock()
|
||||||
w.lastConfigHash = newHash
|
w.lastConfigHash = finalHash
|
||||||
w.clientsMutex.Unlock()
|
w.clientsMutex.Unlock()
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle auth directory changes incrementally (.json only)
|
// Handle auth directory changes incrementally (.json only)
|
||||||
log.Infof("auth file changed (%s): %s, processing incrementally", event.Op.String(), filepath.Base(event.Name))
|
fmt.Printf("auth file changed (%s): %s, processing incrementally\n", event.Op.String(), filepath.Base(event.Name))
|
||||||
if event.Op&fsnotify.Create == fsnotify.Create || event.Op&fsnotify.Write == fsnotify.Write {
|
if event.Op&fsnotify.Create == fsnotify.Create || event.Op&fsnotify.Write == fsnotify.Write {
|
||||||
w.addOrUpdateClient(event.Name)
|
w.addOrUpdateClient(event.Name)
|
||||||
} else if event.Op&fsnotify.Remove == fsnotify.Remove {
|
} else if event.Op&fsnotify.Remove == fsnotify.Remove {
|
||||||
@@ -408,6 +463,7 @@ func (w *Watcher) handleEvent(event fsnotify.Event) {
|
|||||||
|
|
||||||
// reloadConfig reloads the configuration and triggers a full reload
|
// reloadConfig reloads the configuration and triggers a full reload
|
||||||
func (w *Watcher) reloadConfig() bool {
|
func (w *Watcher) reloadConfig() bool {
|
||||||
|
log.Debug("=========================== CONFIG RELOAD ============================")
|
||||||
log.Debugf("starting config reload from: %s", w.configPath)
|
log.Debugf("starting config reload from: %s", w.configPath)
|
||||||
|
|
||||||
newConfig, errLoadConfig := config.LoadConfig(w.configPath)
|
newConfig, errLoadConfig := config.LoadConfig(w.configPath)
|
||||||
@@ -416,6 +472,12 @@ func (w *Watcher) reloadConfig() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if resolvedAuthDir, errResolveAuthDir := util.ResolveAuthDir(newConfig.AuthDir); errResolveAuthDir != nil {
|
||||||
|
log.Errorf("failed to resolve auth directory from config: %v", errResolveAuthDir)
|
||||||
|
} else {
|
||||||
|
newConfig.AuthDir = resolvedAuthDir
|
||||||
|
}
|
||||||
|
|
||||||
w.clientsMutex.Lock()
|
w.clientsMutex.Lock()
|
||||||
oldConfig := w.config
|
oldConfig := w.config
|
||||||
w.config = newConfig
|
w.config = newConfig
|
||||||
@@ -459,6 +521,9 @@ func (w *Watcher) reloadConfig() bool {
|
|||||||
if oldConfig.GeminiWeb.DisableContinuationHint != newConfig.GeminiWeb.DisableContinuationHint {
|
if oldConfig.GeminiWeb.DisableContinuationHint != newConfig.GeminiWeb.DisableContinuationHint {
|
||||||
log.Debugf(" gemini-web.disable-continuation-hint: %t -> %t", oldConfig.GeminiWeb.DisableContinuationHint, newConfig.GeminiWeb.DisableContinuationHint)
|
log.Debugf(" gemini-web.disable-continuation-hint: %t -> %t", oldConfig.GeminiWeb.DisableContinuationHint, newConfig.GeminiWeb.DisableContinuationHint)
|
||||||
}
|
}
|
||||||
|
if oldConfig.GeminiWeb.GemMode != newConfig.GeminiWeb.GemMode {
|
||||||
|
log.Debugf(" gemini-web.gem-mode: %s -> %s", oldConfig.GeminiWeb.GemMode, newConfig.GeminiWeb.GemMode)
|
||||||
|
}
|
||||||
if oldConfig.GeminiWeb.CodeMode != newConfig.GeminiWeb.CodeMode {
|
if oldConfig.GeminiWeb.CodeMode != newConfig.GeminiWeb.CodeMode {
|
||||||
log.Debugf(" gemini-web.code-mode: %t -> %t", oldConfig.GeminiWeb.CodeMode, newConfig.GeminiWeb.CodeMode)
|
log.Debugf(" gemini-web.code-mode: %t -> %t", oldConfig.GeminiWeb.CodeMode, newConfig.GeminiWeb.CodeMode)
|
||||||
}
|
}
|
||||||
@@ -477,17 +542,49 @@ func (w *Watcher) reloadConfig() bool {
|
|||||||
if oldConfig.RemoteManagement.AllowRemote != newConfig.RemoteManagement.AllowRemote {
|
if oldConfig.RemoteManagement.AllowRemote != newConfig.RemoteManagement.AllowRemote {
|
||||||
log.Debugf(" remote-management.allow-remote: %t -> %t", oldConfig.RemoteManagement.AllowRemote, newConfig.RemoteManagement.AllowRemote)
|
log.Debugf(" remote-management.allow-remote: %t -> %t", oldConfig.RemoteManagement.AllowRemote, newConfig.RemoteManagement.AllowRemote)
|
||||||
}
|
}
|
||||||
|
if oldConfig.RemoteManagement.SecretKey != newConfig.RemoteManagement.SecretKey {
|
||||||
|
switch {
|
||||||
|
case oldConfig.RemoteManagement.SecretKey == "" && newConfig.RemoteManagement.SecretKey != "":
|
||||||
|
log.Debug(" remote-management.secret-key: created")
|
||||||
|
case oldConfig.RemoteManagement.SecretKey != "" && newConfig.RemoteManagement.SecretKey == "":
|
||||||
|
log.Debug(" remote-management.secret-key: deleted")
|
||||||
|
default:
|
||||||
|
log.Debug(" remote-management.secret-key: updated")
|
||||||
}
|
}
|
||||||
|
if newConfig.RemoteManagement.SecretKey == "" {
|
||||||
|
log.Info("management routes will be disabled after secret key removal")
|
||||||
|
} else {
|
||||||
|
log.Info("management routes will be enabled after secret key update")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if oldConfig.RemoteManagement.DisableControlPanel != newConfig.RemoteManagement.DisableControlPanel {
|
||||||
|
log.Debugf(" remote-management.disable-control-panel: %t -> %t", oldConfig.RemoteManagement.DisableControlPanel, newConfig.RemoteManagement.DisableControlPanel)
|
||||||
|
}
|
||||||
|
if oldConfig.LoggingToFile != newConfig.LoggingToFile {
|
||||||
|
log.Debugf(" logging-to-file: %t -> %t", oldConfig.LoggingToFile, newConfig.LoggingToFile)
|
||||||
|
}
|
||||||
|
if oldConfig.UsageStatisticsEnabled != newConfig.UsageStatisticsEnabled {
|
||||||
|
log.Debugf(" usage-statistics-enabled: %t -> %t", oldConfig.UsageStatisticsEnabled, newConfig.UsageStatisticsEnabled)
|
||||||
|
}
|
||||||
|
if changes := diffOpenAICompatibility(oldConfig.OpenAICompatibility, newConfig.OpenAICompatibility); len(changes) > 0 {
|
||||||
|
log.Debugf(" openai-compatibility:")
|
||||||
|
for _, change := range changes {
|
||||||
|
log.Debugf(" %s", change)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
authDirChanged := oldConfig == nil || oldConfig.AuthDir != newConfig.AuthDir
|
||||||
|
|
||||||
log.Infof("config successfully reloaded, triggering client reload")
|
log.Infof("config successfully reloaded, triggering client reload")
|
||||||
// Reload clients with new config
|
// Reload clients with new config
|
||||||
w.reloadClients()
|
w.reloadClients(authDirChanged)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// reloadClients performs a full scan and reload of all clients.
|
// reloadClients performs a full scan and reload of all clients.
|
||||||
func (w *Watcher) reloadClients() {
|
func (w *Watcher) reloadClients(rescanAuth bool) {
|
||||||
log.Debugf("starting full client reload process")
|
log.Debugf("starting full client load process")
|
||||||
|
|
||||||
w.clientsMutex.RLock()
|
w.clientsMutex.RLock()
|
||||||
cfg := w.config
|
cfg := w.config
|
||||||
@@ -503,21 +600,34 @@ func (w *Watcher) reloadClients() {
|
|||||||
|
|
||||||
// Create new API key clients based on the new config
|
// Create new API key clients based on the new config
|
||||||
glAPIKeyCount, claudeAPIKeyCount, codexAPIKeyCount, openAICompatCount := BuildAPIKeyClients(cfg)
|
glAPIKeyCount, claudeAPIKeyCount, codexAPIKeyCount, openAICompatCount := BuildAPIKeyClients(cfg)
|
||||||
log.Debugf("created %d new API key clients", 0)
|
totalAPIKeyClients := glAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + openAICompatCount
|
||||||
|
log.Debugf("loaded %d API key clients", totalAPIKeyClients)
|
||||||
|
|
||||||
// Load file-based clients
|
var authFileCount int
|
||||||
authFileCount := w.loadFileClients(cfg)
|
if rescanAuth {
|
||||||
log.Debugf("loaded %d new file-based clients", 0)
|
// Load file-based clients when explicitly requested (startup or authDir change)
|
||||||
|
authFileCount = w.loadFileClients(cfg)
|
||||||
|
log.Debugf("loaded %d file-based clients", authFileCount)
|
||||||
|
} else {
|
||||||
|
// Preserve existing auth hashes and only report current known count to avoid redundant scans.
|
||||||
|
w.clientsMutex.RLock()
|
||||||
|
authFileCount = len(w.lastAuthHashes)
|
||||||
|
w.clientsMutex.RUnlock()
|
||||||
|
log.Debugf("skipping auth directory rescan; retaining %d existing auth files", authFileCount)
|
||||||
|
}
|
||||||
|
|
||||||
// no legacy file-based clients to unregister
|
// no legacy file-based clients to unregister
|
||||||
|
|
||||||
// Update client maps
|
// Update client maps
|
||||||
|
if rescanAuth {
|
||||||
w.clientsMutex.Lock()
|
w.clientsMutex.Lock()
|
||||||
|
|
||||||
// Rebuild auth file hash cache for current clients
|
// Rebuild auth file hash cache for current clients
|
||||||
w.lastAuthHashes = make(map[string]string)
|
w.lastAuthHashes = make(map[string]string)
|
||||||
// Recompute hashes for current auth files
|
if resolvedAuthDir, errResolveAuthDir := util.ResolveAuthDir(cfg.AuthDir); errResolveAuthDir != nil {
|
||||||
_ = filepath.Walk(cfg.AuthDir, func(path string, info fs.FileInfo, err error) error {
|
log.Errorf("failed to resolve auth directory for hash cache: %v", errResolveAuthDir)
|
||||||
|
} else if resolvedAuthDir != "" {
|
||||||
|
_ = filepath.Walk(resolvedAuthDir, func(path string, info fs.FileInfo, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -529,14 +639,21 @@ func (w *Watcher) reloadClients() {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
}
|
||||||
w.clientsMutex.Unlock()
|
w.clientsMutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
totalNewClients := authFileCount + glAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + openAICompatCount
|
totalNewClients := authFileCount + glAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + openAICompatCount
|
||||||
|
|
||||||
|
// Ensure consumers observe the new configuration before auth updates dispatch.
|
||||||
|
if w.reloadCallback != nil {
|
||||||
|
log.Debugf("triggering server update callback before auth refresh")
|
||||||
|
w.reloadCallback(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
w.refreshAuthState()
|
w.refreshAuthState()
|
||||||
|
|
||||||
log.Infof("full client reload complete - old: %d clients, new: %d clients (%d auth files + %d GL API keys + %d Claude API keys + %d Codex keys + %d OpenAI-compat)",
|
log.Infof("full client load complete - %d clients (%d auth files + %d GL API keys + %d Claude API keys + %d Codex keys + %d OpenAI-compat)",
|
||||||
0,
|
|
||||||
totalNewClients,
|
totalNewClients,
|
||||||
authFileCount,
|
authFileCount,
|
||||||
glAPIKeyCount,
|
glAPIKeyCount,
|
||||||
@@ -544,12 +661,6 @@ func (w *Watcher) reloadClients() {
|
|||||||
codexAPIKeyCount,
|
codexAPIKeyCount,
|
||||||
openAICompatCount,
|
openAICompatCount,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Trigger the callback to update the server
|
|
||||||
if w.reloadCallback != nil {
|
|
||||||
log.Debugf("triggering server update callback")
|
|
||||||
w.reloadCallback(cfg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// createClientFromFile creates a single client instance from a given token file path.
|
// createClientFromFile creates a single client instance from a given token file path.
|
||||||
@@ -621,6 +732,7 @@ func (w *Watcher) removeClient(path string) {
|
|||||||
func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
||||||
out := make([]*coreauth.Auth, 0, 32)
|
out := make([]*coreauth.Auth, 0, 32)
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
idGen := newStableIDGenerator()
|
||||||
// Also synthesize auth entries for OpenAI-compatibility providers directly from config
|
// Also synthesize auth entries for OpenAI-compatibility providers directly from config
|
||||||
w.clientsMutex.RLock()
|
w.clientsMutex.RLock()
|
||||||
cfg := w.config
|
cfg := w.config
|
||||||
@@ -628,14 +740,18 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
|||||||
if cfg != nil {
|
if cfg != nil {
|
||||||
// Gemini official API keys -> synthesize auths
|
// Gemini official API keys -> synthesize auths
|
||||||
for i := range cfg.GlAPIKey {
|
for i := range cfg.GlAPIKey {
|
||||||
k := cfg.GlAPIKey[i]
|
k := strings.TrimSpace(cfg.GlAPIKey[i])
|
||||||
|
if k == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
id, token := idGen.next("gemini:apikey", k)
|
||||||
a := &coreauth.Auth{
|
a := &coreauth.Auth{
|
||||||
ID: fmt.Sprintf("gemini:apikey:%d", i),
|
ID: id,
|
||||||
Provider: "gemini",
|
Provider: "gemini",
|
||||||
Label: "gemini-apikey",
|
Label: "gemini-apikey",
|
||||||
Status: coreauth.StatusActive,
|
Status: coreauth.StatusActive,
|
||||||
Attributes: map[string]string{
|
Attributes: map[string]string{
|
||||||
"source": fmt.Sprintf("config:gemini#%d", i),
|
"source": fmt.Sprintf("config:gemini[%s]", token),
|
||||||
"api_key": k,
|
"api_key": k,
|
||||||
},
|
},
|
||||||
CreatedAt: now,
|
CreatedAt: now,
|
||||||
@@ -646,18 +762,25 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
|||||||
// Claude API keys -> synthesize auths
|
// Claude API keys -> synthesize auths
|
||||||
for i := range cfg.ClaudeKey {
|
for i := range cfg.ClaudeKey {
|
||||||
ck := cfg.ClaudeKey[i]
|
ck := cfg.ClaudeKey[i]
|
||||||
|
key := strings.TrimSpace(ck.APIKey)
|
||||||
|
if key == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
id, token := idGen.next("claude:apikey", key, ck.BaseURL)
|
||||||
attrs := map[string]string{
|
attrs := map[string]string{
|
||||||
"source": fmt.Sprintf("config:claude#%d", i),
|
"source": fmt.Sprintf("config:claude[%s]", token),
|
||||||
"api_key": ck.APIKey,
|
"api_key": key,
|
||||||
}
|
}
|
||||||
if ck.BaseURL != "" {
|
if ck.BaseURL != "" {
|
||||||
attrs["base_url"] = ck.BaseURL
|
attrs["base_url"] = ck.BaseURL
|
||||||
}
|
}
|
||||||
|
proxyURL := strings.TrimSpace(ck.ProxyURL)
|
||||||
a := &coreauth.Auth{
|
a := &coreauth.Auth{
|
||||||
ID: fmt.Sprintf("claude:apikey:%d", i),
|
ID: id,
|
||||||
Provider: "claude",
|
Provider: "claude",
|
||||||
Label: "claude-apikey",
|
Label: "claude-apikey",
|
||||||
Status: coreauth.StatusActive,
|
Status: coreauth.StatusActive,
|
||||||
|
ProxyURL: proxyURL,
|
||||||
Attributes: attrs,
|
Attributes: attrs,
|
||||||
CreatedAt: now,
|
CreatedAt: now,
|
||||||
UpdatedAt: now,
|
UpdatedAt: now,
|
||||||
@@ -667,18 +790,25 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
|||||||
// Codex API keys -> synthesize auths
|
// Codex API keys -> synthesize auths
|
||||||
for i := range cfg.CodexKey {
|
for i := range cfg.CodexKey {
|
||||||
ck := cfg.CodexKey[i]
|
ck := cfg.CodexKey[i]
|
||||||
|
key := strings.TrimSpace(ck.APIKey)
|
||||||
|
if key == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
id, token := idGen.next("codex:apikey", key, ck.BaseURL)
|
||||||
attrs := map[string]string{
|
attrs := map[string]string{
|
||||||
"source": fmt.Sprintf("config:codex#%d", i),
|
"source": fmt.Sprintf("config:codex[%s]", token),
|
||||||
"api_key": ck.APIKey,
|
"api_key": key,
|
||||||
}
|
}
|
||||||
if ck.BaseURL != "" {
|
if ck.BaseURL != "" {
|
||||||
attrs["base_url"] = ck.BaseURL
|
attrs["base_url"] = ck.BaseURL
|
||||||
}
|
}
|
||||||
|
proxyURL := strings.TrimSpace(ck.ProxyURL)
|
||||||
a := &coreauth.Auth{
|
a := &coreauth.Auth{
|
||||||
ID: fmt.Sprintf("codex:apikey:%d", i),
|
ID: id,
|
||||||
Provider: "codex",
|
Provider: "codex",
|
||||||
Label: "codex-apikey",
|
Label: "codex-apikey",
|
||||||
Status: coreauth.StatusActive,
|
Status: coreauth.StatusActive,
|
||||||
|
ProxyURL: proxyURL,
|
||||||
Attributes: attrs,
|
Attributes: attrs,
|
||||||
CreatedAt: now,
|
CreatedAt: now,
|
||||||
UpdatedAt: now,
|
UpdatedAt: now,
|
||||||
@@ -691,21 +821,92 @@ func (w *Watcher) SnapshotCoreAuths() []*coreauth.Auth {
|
|||||||
if providerName == "" {
|
if providerName == "" {
|
||||||
providerName = "openai-compatibility"
|
providerName = "openai-compatibility"
|
||||||
}
|
}
|
||||||
base := compat.BaseURL
|
base := strings.TrimSpace(compat.BaseURL)
|
||||||
for j := range compat.APIKeys {
|
|
||||||
key := compat.APIKeys[j]
|
// Handle new APIKeyEntries format (preferred)
|
||||||
|
createdEntries := 0
|
||||||
|
if len(compat.APIKeyEntries) > 0 {
|
||||||
|
for j := range compat.APIKeyEntries {
|
||||||
|
entry := &compat.APIKeyEntries[j]
|
||||||
|
key := strings.TrimSpace(entry.APIKey)
|
||||||
|
proxyURL := strings.TrimSpace(entry.ProxyURL)
|
||||||
|
idKind := fmt.Sprintf("openai-compatibility:%s", providerName)
|
||||||
|
id, token := idGen.next(idKind, key, base, proxyURL)
|
||||||
|
attrs := map[string]string{
|
||||||
|
"source": fmt.Sprintf("config:%s[%s]", providerName, token),
|
||||||
|
"base_url": base,
|
||||||
|
"compat_name": compat.Name,
|
||||||
|
"provider_key": providerName,
|
||||||
|
}
|
||||||
|
if key != "" {
|
||||||
|
attrs["api_key"] = key
|
||||||
|
}
|
||||||
|
if hash := computeOpenAICompatModelsHash(compat.Models); hash != "" {
|
||||||
|
attrs["models_hash"] = hash
|
||||||
|
}
|
||||||
a := &coreauth.Auth{
|
a := &coreauth.Auth{
|
||||||
ID: fmt.Sprintf("openai-compatibility:%s:%d", compat.Name, j),
|
ID: id,
|
||||||
Provider: providerName,
|
Provider: providerName,
|
||||||
Label: compat.Name,
|
Label: compat.Name,
|
||||||
Status: coreauth.StatusActive,
|
Status: coreauth.StatusActive,
|
||||||
Attributes: map[string]string{
|
ProxyURL: proxyURL,
|
||||||
"source": fmt.Sprintf("config:%s#%d", compat.Name, j),
|
Attributes: attrs,
|
||||||
|
CreatedAt: now,
|
||||||
|
UpdatedAt: now,
|
||||||
|
}
|
||||||
|
out = append(out, a)
|
||||||
|
createdEntries++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Handle legacy APIKeys format for backward compatibility
|
||||||
|
for j := range compat.APIKeys {
|
||||||
|
key := strings.TrimSpace(compat.APIKeys[j])
|
||||||
|
if key == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
idKind := fmt.Sprintf("openai-compatibility:%s", providerName)
|
||||||
|
id, token := idGen.next(idKind, key, base)
|
||||||
|
attrs := map[string]string{
|
||||||
|
"source": fmt.Sprintf("config:%s[%s]", providerName, token),
|
||||||
"base_url": base,
|
"base_url": base,
|
||||||
"api_key": key,
|
|
||||||
"compat_name": compat.Name,
|
"compat_name": compat.Name,
|
||||||
"provider_key": providerName,
|
"provider_key": providerName,
|
||||||
},
|
}
|
||||||
|
attrs["api_key"] = key
|
||||||
|
if hash := computeOpenAICompatModelsHash(compat.Models); hash != "" {
|
||||||
|
attrs["models_hash"] = hash
|
||||||
|
}
|
||||||
|
a := &coreauth.Auth{
|
||||||
|
ID: id,
|
||||||
|
Provider: providerName,
|
||||||
|
Label: compat.Name,
|
||||||
|
Status: coreauth.StatusActive,
|
||||||
|
Attributes: attrs,
|
||||||
|
CreatedAt: now,
|
||||||
|
UpdatedAt: now,
|
||||||
|
}
|
||||||
|
out = append(out, a)
|
||||||
|
createdEntries++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if createdEntries == 0 {
|
||||||
|
idKind := fmt.Sprintf("openai-compatibility:%s", providerName)
|
||||||
|
id, token := idGen.next(idKind, base)
|
||||||
|
attrs := map[string]string{
|
||||||
|
"source": fmt.Sprintf("config:%s[%s]", providerName, token),
|
||||||
|
"base_url": base,
|
||||||
|
"compat_name": compat.Name,
|
||||||
|
"provider_key": providerName,
|
||||||
|
}
|
||||||
|
if hash := computeOpenAICompatModelsHash(compat.Models); hash != "" {
|
||||||
|
attrs["models_hash"] = hash
|
||||||
|
}
|
||||||
|
a := &coreauth.Auth{
|
||||||
|
ID: id,
|
||||||
|
Provider: providerName,
|
||||||
|
Label: compat.Name,
|
||||||
|
Status: coreauth.StatusActive,
|
||||||
|
Attributes: attrs,
|
||||||
CreatedAt: now,
|
CreatedAt: now,
|
||||||
UpdatedAt: now,
|
UpdatedAt: now,
|
||||||
}
|
}
|
||||||
@@ -779,14 +980,13 @@ func (w *Watcher) loadFileClients(cfg *config.Config) int {
|
|||||||
authFileCount := 0
|
authFileCount := 0
|
||||||
successfulAuthCount := 0
|
successfulAuthCount := 0
|
||||||
|
|
||||||
authDir := cfg.AuthDir
|
authDir, errResolveAuthDir := util.ResolveAuthDir(cfg.AuthDir)
|
||||||
if strings.HasPrefix(authDir, "~") {
|
if errResolveAuthDir != nil {
|
||||||
home, err := os.UserHomeDir()
|
log.Errorf("failed to resolve auth directory: %v", errResolveAuthDir)
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to get home directory: %v", err)
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
authDir = filepath.Join(home, authDir[1:])
|
if authDir == "" {
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
errWalk := filepath.Walk(authDir, func(path string, info fs.FileInfo, err error) error {
|
errWalk := filepath.Walk(authDir, func(path string, info fs.FileInfo, err error) error {
|
||||||
@@ -831,8 +1031,139 @@ func BuildAPIKeyClients(cfg *config.Config) (int, int, int, int) {
|
|||||||
if len(cfg.OpenAICompatibility) > 0 {
|
if len(cfg.OpenAICompatibility) > 0 {
|
||||||
// Do not construct legacy clients for OpenAI-compat providers; these are handled by the stateless executor.
|
// Do not construct legacy clients for OpenAI-compat providers; these are handled by the stateless executor.
|
||||||
for _, compatConfig := range cfg.OpenAICompatibility {
|
for _, compatConfig := range cfg.OpenAICompatibility {
|
||||||
|
// Count from new APIKeyEntries format if present, otherwise fall back to legacy APIKeys
|
||||||
|
if len(compatConfig.APIKeyEntries) > 0 {
|
||||||
|
openAICompatCount += len(compatConfig.APIKeyEntries)
|
||||||
|
} else {
|
||||||
openAICompatCount += len(compatConfig.APIKeys)
|
openAICompatCount += len(compatConfig.APIKeys)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return glAPIKeyCount, claudeAPIKeyCount, codexAPIKeyCount, openAICompatCount
|
return glAPIKeyCount, claudeAPIKeyCount, codexAPIKeyCount, openAICompatCount
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func diffOpenAICompatibility(oldList, newList []config.OpenAICompatibility) []string {
|
||||||
|
changes := make([]string, 0)
|
||||||
|
oldMap := make(map[string]config.OpenAICompatibility, len(oldList))
|
||||||
|
oldLabels := make(map[string]string, len(oldList))
|
||||||
|
for idx, entry := range oldList {
|
||||||
|
key, label := openAICompatKey(entry, idx)
|
||||||
|
oldMap[key] = entry
|
||||||
|
oldLabels[key] = label
|
||||||
|
}
|
||||||
|
newMap := make(map[string]config.OpenAICompatibility, len(newList))
|
||||||
|
newLabels := make(map[string]string, len(newList))
|
||||||
|
for idx, entry := range newList {
|
||||||
|
key, label := openAICompatKey(entry, idx)
|
||||||
|
newMap[key] = entry
|
||||||
|
newLabels[key] = label
|
||||||
|
}
|
||||||
|
keySet := make(map[string]struct{}, len(oldMap)+len(newMap))
|
||||||
|
for key := range oldMap {
|
||||||
|
keySet[key] = struct{}{}
|
||||||
|
}
|
||||||
|
for key := range newMap {
|
||||||
|
keySet[key] = struct{}{}
|
||||||
|
}
|
||||||
|
orderedKeys := make([]string, 0, len(keySet))
|
||||||
|
for key := range keySet {
|
||||||
|
orderedKeys = append(orderedKeys, key)
|
||||||
|
}
|
||||||
|
sort.Strings(orderedKeys)
|
||||||
|
for _, key := range orderedKeys {
|
||||||
|
oldEntry, oldOk := oldMap[key]
|
||||||
|
newEntry, newOk := newMap[key]
|
||||||
|
label := oldLabels[key]
|
||||||
|
if label == "" {
|
||||||
|
label = newLabels[key]
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case !oldOk:
|
||||||
|
changes = append(changes, fmt.Sprintf("provider added: %s (api-keys=%d, models=%d)", label, countAPIKeys(newEntry), countOpenAIModels(newEntry.Models)))
|
||||||
|
case !newOk:
|
||||||
|
changes = append(changes, fmt.Sprintf("provider removed: %s (api-keys=%d, models=%d)", label, countAPIKeys(oldEntry), countOpenAIModels(oldEntry.Models)))
|
||||||
|
default:
|
||||||
|
if detail := describeOpenAICompatibilityUpdate(oldEntry, newEntry); detail != "" {
|
||||||
|
changes = append(changes, fmt.Sprintf("provider updated: %s %s", label, detail))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return changes
|
||||||
|
}
|
||||||
|
|
||||||
|
func describeOpenAICompatibilityUpdate(oldEntry, newEntry config.OpenAICompatibility) string {
|
||||||
|
oldKeyCount := countAPIKeys(oldEntry)
|
||||||
|
newKeyCount := countAPIKeys(newEntry)
|
||||||
|
oldModelCount := countOpenAIModels(oldEntry.Models)
|
||||||
|
newModelCount := countOpenAIModels(newEntry.Models)
|
||||||
|
details := make([]string, 0, 2)
|
||||||
|
if oldKeyCount != newKeyCount {
|
||||||
|
details = append(details, fmt.Sprintf("api-keys %d -> %d", oldKeyCount, newKeyCount))
|
||||||
|
}
|
||||||
|
if oldModelCount != newModelCount {
|
||||||
|
details = append(details, fmt.Sprintf("models %d -> %d", oldModelCount, newModelCount))
|
||||||
|
}
|
||||||
|
if len(details) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return "(" + strings.Join(details, ", ") + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
func countAPIKeys(entry config.OpenAICompatibility) int {
|
||||||
|
// Prefer new APIKeyEntries format
|
||||||
|
if len(entry.APIKeyEntries) > 0 {
|
||||||
|
count := 0
|
||||||
|
for _, keyEntry := range entry.APIKeyEntries {
|
||||||
|
if strings.TrimSpace(keyEntry.APIKey) != "" {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
// Fall back to legacy APIKeys format
|
||||||
|
return countNonEmptyStrings(entry.APIKeys)
|
||||||
|
}
|
||||||
|
|
||||||
|
func countNonEmptyStrings(values []string) int {
|
||||||
|
count := 0
|
||||||
|
for _, value := range values {
|
||||||
|
if strings.TrimSpace(value) != "" {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
func countOpenAIModels(models []config.OpenAICompatibilityModel) int {
|
||||||
|
count := 0
|
||||||
|
for _, model := range models {
|
||||||
|
name := strings.TrimSpace(model.Name)
|
||||||
|
alias := strings.TrimSpace(model.Alias)
|
||||||
|
if name == "" && alias == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
func openAICompatKey(entry config.OpenAICompatibility, index int) (string, string) {
|
||||||
|
name := strings.TrimSpace(entry.Name)
|
||||||
|
if name != "" {
|
||||||
|
return "name:" + name, name
|
||||||
|
}
|
||||||
|
base := strings.TrimSpace(entry.BaseURL)
|
||||||
|
if base != "" {
|
||||||
|
return "base:" + base, base
|
||||||
|
}
|
||||||
|
for _, model := range entry.Models {
|
||||||
|
alias := strings.TrimSpace(model.Alias)
|
||||||
|
if alias == "" {
|
||||||
|
alias = strings.TrimSpace(model.Name)
|
||||||
|
}
|
||||||
|
if alias != "" {
|
||||||
|
return "alias:" + alias, alias
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("index:%d", index), fmt.Sprintf("entry-%d", index+1)
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Provider validates credentials for incoming requests.
|
// Provider validates credentials for incoming requests.
|
||||||
@@ -23,7 +23,7 @@ type Result struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ProviderFactory builds a provider from configuration data.
|
// ProviderFactory builds a provider from configuration data.
|
||||||
type ProviderFactory func(cfg *config.AccessProvider, root *config.Config) (Provider, error)
|
type ProviderFactory func(cfg *config.AccessProvider, root *config.SDKConfig) (Provider, error)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
registryMu sync.RWMutex
|
registryMu sync.RWMutex
|
||||||
@@ -40,7 +40,7 @@ func RegisterProvider(typ string, factory ProviderFactory) {
|
|||||||
registryMu.Unlock()
|
registryMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildProvider(cfg *config.AccessProvider, root *config.Config) (Provider, error) {
|
func BuildProvider(cfg *config.AccessProvider, root *config.SDKConfig) (Provider, error) {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return nil, fmt.Errorf("access: nil provider config")
|
return nil, fmt.Errorf("access: nil provider config")
|
||||||
}
|
}
|
||||||
@@ -58,7 +58,7 @@ func buildProvider(cfg *config.AccessProvider, root *config.Config) (Provider, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildProviders constructs providers declared in configuration.
|
// BuildProviders constructs providers declared in configuration.
|
||||||
func BuildProviders(root *config.Config) ([]Provider, error) {
|
func BuildProviders(root *config.SDKConfig) ([]Provider, error) {
|
||||||
if root == nil {
|
if root == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@@ -68,16 +68,15 @@ func BuildProviders(root *config.Config) ([]Provider, error) {
|
|||||||
if providerCfg.Type == "" {
|
if providerCfg.Type == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
provider, err := buildProvider(providerCfg, root)
|
provider, err := BuildProvider(providerCfg, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
providers = append(providers, provider)
|
providers = append(providers, provider)
|
||||||
}
|
}
|
||||||
if len(providers) == 0 && len(root.APIKeys) > 0 {
|
if len(providers) == 0 {
|
||||||
config.SyncInlineAPIKeys(root, root.APIKeys)
|
if inline := config.MakeInlineAPIKeyProvider(root.APIKeys); inline != nil {
|
||||||
if providerCfg := root.ConfigAPIKeyProvider(); providerCfg != nil {
|
provider, err := BuildProvider(inline, root)
|
||||||
provider, err := buildProvider(providerCfg, root)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,10 +14,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
|
||||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -14,10 +14,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
|
||||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
)
|
)
|
||||||
@@ -13,10 +13,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
|
||||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GeminiAPIHandler contains the handlers for Gemini API endpoints.
|
// GeminiAPIHandler contains the handlers for Gemini API endpoints.
|
||||||
@@ -8,11 +8,12 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||||
|
conversation "github.com/router-for-me/CLIProxyAPI/v6/internal/provider/gemini-web/conversation"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
coreexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
coreexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
@@ -45,9 +46,11 @@ type BaseAPIHandler struct {
|
|||||||
AuthManager *coreauth.Manager
|
AuthManager *coreauth.Manager
|
||||||
|
|
||||||
// Cfg holds the current application configuration.
|
// Cfg holds the current application configuration.
|
||||||
Cfg *config.Config
|
Cfg *config.SDKConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const geminiWebProvider = "gemini-web"
|
||||||
|
|
||||||
// NewBaseAPIHandlers creates a new API handlers instance.
|
// NewBaseAPIHandlers creates a new API handlers instance.
|
||||||
// It takes a slice of clients and configuration as input.
|
// It takes a slice of clients and configuration as input.
|
||||||
//
|
//
|
||||||
@@ -57,7 +60,7 @@ type BaseAPIHandler struct {
|
|||||||
//
|
//
|
||||||
// Returns:
|
// Returns:
|
||||||
// - *BaseAPIHandler: A new API handlers instance
|
// - *BaseAPIHandler: A new API handlers instance
|
||||||
func NewBaseAPIHandlers(cfg *config.Config, authManager *coreauth.Manager) *BaseAPIHandler {
|
func NewBaseAPIHandlers(cfg *config.SDKConfig, authManager *coreauth.Manager) *BaseAPIHandler {
|
||||||
return &BaseAPIHandler{
|
return &BaseAPIHandler{
|
||||||
Cfg: cfg,
|
Cfg: cfg,
|
||||||
AuthManager: authManager,
|
AuthManager: authManager,
|
||||||
@@ -70,7 +73,7 @@ func NewBaseAPIHandlers(cfg *config.Config, authManager *coreauth.Manager) *Base
|
|||||||
// Parameters:
|
// Parameters:
|
||||||
// - clients: The new slice of AI service clients
|
// - clients: The new slice of AI service clients
|
||||||
// - cfg: The new application configuration
|
// - cfg: The new application configuration
|
||||||
func (h *BaseAPIHandler) UpdateClients(cfg *config.Config) { h.Cfg = cfg }
|
func (h *BaseAPIHandler) UpdateClients(cfg *config.SDKConfig) { h.Cfg = cfg }
|
||||||
|
|
||||||
// GetAlt extracts the 'alt' parameter from the request query string.
|
// GetAlt extracts the 'alt' parameter from the request query string.
|
||||||
// It checks both 'alt' and '$alt' parameters and returns the appropriate value.
|
// It checks both 'alt' and '$alt' parameters and returns the appropriate value.
|
||||||
@@ -133,10 +136,11 @@ func (h *BaseAPIHandler) GetContextWithCancel(handler interfaces.APIHandler, c *
|
|||||||
// ExecuteWithAuthManager executes a non-streaming request via the core auth manager.
|
// ExecuteWithAuthManager executes a non-streaming request via the core auth manager.
|
||||||
// This path is the only supported execution route.
|
// This path is the only supported execution route.
|
||||||
func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
||||||
providers := util.GetProviderName(modelName, h.Cfg)
|
providers := util.GetProviderName(modelName)
|
||||||
if len(providers) == 0 {
|
if len(providers) == 0 {
|
||||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||||
}
|
}
|
||||||
|
metadata := h.buildGeminiWebMetadata(handlerType, providers, rawJSON)
|
||||||
req := coreexecutor.Request{
|
req := coreexecutor.Request{
|
||||||
Model: modelName,
|
Model: modelName,
|
||||||
Payload: cloneBytes(rawJSON),
|
Payload: cloneBytes(rawJSON),
|
||||||
@@ -146,6 +150,7 @@ func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType
|
|||||||
Alt: alt,
|
Alt: alt,
|
||||||
OriginalRequest: cloneBytes(rawJSON),
|
OriginalRequest: cloneBytes(rawJSON),
|
||||||
SourceFormat: sdktranslator.FromString(handlerType),
|
SourceFormat: sdktranslator.FromString(handlerType),
|
||||||
|
Metadata: metadata,
|
||||||
}
|
}
|
||||||
resp, err := h.AuthManager.Execute(ctx, providers, req, opts)
|
resp, err := h.AuthManager.Execute(ctx, providers, req, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -157,10 +162,11 @@ func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType
|
|||||||
// ExecuteCountWithAuthManager executes a non-streaming request via the core auth manager.
|
// ExecuteCountWithAuthManager executes a non-streaming request via the core auth manager.
|
||||||
// This path is the only supported execution route.
|
// This path is the only supported execution route.
|
||||||
func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
||||||
providers := util.GetProviderName(modelName, h.Cfg)
|
providers := util.GetProviderName(modelName)
|
||||||
if len(providers) == 0 {
|
if len(providers) == 0 {
|
||||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||||
}
|
}
|
||||||
|
metadata := h.buildGeminiWebMetadata(handlerType, providers, rawJSON)
|
||||||
req := coreexecutor.Request{
|
req := coreexecutor.Request{
|
||||||
Model: modelName,
|
Model: modelName,
|
||||||
Payload: cloneBytes(rawJSON),
|
Payload: cloneBytes(rawJSON),
|
||||||
@@ -170,6 +176,7 @@ func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handle
|
|||||||
Alt: alt,
|
Alt: alt,
|
||||||
OriginalRequest: cloneBytes(rawJSON),
|
OriginalRequest: cloneBytes(rawJSON),
|
||||||
SourceFormat: sdktranslator.FromString(handlerType),
|
SourceFormat: sdktranslator.FromString(handlerType),
|
||||||
|
Metadata: metadata,
|
||||||
}
|
}
|
||||||
resp, err := h.AuthManager.ExecuteCount(ctx, providers, req, opts)
|
resp, err := h.AuthManager.ExecuteCount(ctx, providers, req, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -181,13 +188,14 @@ func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handle
|
|||||||
// ExecuteStreamWithAuthManager executes a streaming request via the core auth manager.
|
// ExecuteStreamWithAuthManager executes a streaming request via the core auth manager.
|
||||||
// This path is the only supported execution route.
|
// This path is the only supported execution route.
|
||||||
func (h *BaseAPIHandler) ExecuteStreamWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) (<-chan []byte, <-chan *interfaces.ErrorMessage) {
|
func (h *BaseAPIHandler) ExecuteStreamWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) (<-chan []byte, <-chan *interfaces.ErrorMessage) {
|
||||||
providers := util.GetProviderName(modelName, h.Cfg)
|
providers := util.GetProviderName(modelName)
|
||||||
if len(providers) == 0 {
|
if len(providers) == 0 {
|
||||||
errChan := make(chan *interfaces.ErrorMessage, 1)
|
errChan := make(chan *interfaces.ErrorMessage, 1)
|
||||||
errChan <- &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
errChan <- &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||||
close(errChan)
|
close(errChan)
|
||||||
return nil, errChan
|
return nil, errChan
|
||||||
}
|
}
|
||||||
|
metadata := h.buildGeminiWebMetadata(handlerType, providers, rawJSON)
|
||||||
req := coreexecutor.Request{
|
req := coreexecutor.Request{
|
||||||
Model: modelName,
|
Model: modelName,
|
||||||
Payload: cloneBytes(rawJSON),
|
Payload: cloneBytes(rawJSON),
|
||||||
@@ -197,6 +205,7 @@ func (h *BaseAPIHandler) ExecuteStreamWithAuthManager(ctx context.Context, handl
|
|||||||
Alt: alt,
|
Alt: alt,
|
||||||
OriginalRequest: cloneBytes(rawJSON),
|
OriginalRequest: cloneBytes(rawJSON),
|
||||||
SourceFormat: sdktranslator.FromString(handlerType),
|
SourceFormat: sdktranslator.FromString(handlerType),
|
||||||
|
Metadata: metadata,
|
||||||
}
|
}
|
||||||
chunks, err := h.AuthManager.ExecuteStream(ctx, providers, req, opts)
|
chunks, err := h.AuthManager.ExecuteStream(ctx, providers, req, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -232,6 +241,18 @@ func cloneBytes(src []byte) []byte {
|
|||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *BaseAPIHandler) buildGeminiWebMetadata(handlerType string, providers []string, rawJSON []byte) map[string]any {
|
||||||
|
if !util.InArray(providers, geminiWebProvider) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
meta := make(map[string]any)
|
||||||
|
msgs := conversation.ExtractMessages(handlerType, rawJSON)
|
||||||
|
if len(msgs) > 0 {
|
||||||
|
meta[conversation.MetadataMessagesKey] = msgs
|
||||||
|
}
|
||||||
|
return meta
|
||||||
|
}
|
||||||
|
|
||||||
// WriteErrorResponse writes an error message to the response writer using the HTTP status embedded in the message.
|
// WriteErrorResponse writes an error message to the response writer using the HTTP status embedded in the message.
|
||||||
func (h *BaseAPIHandler) WriteErrorResponse(c *gin.Context, msg *interfaces.ErrorMessage) {
|
func (h *BaseAPIHandler) WriteErrorResponse(c *gin.Context, msg *interfaces.ErrorMessage) {
|
||||||
status := http.StatusInternalServerError
|
status := http.StatusInternalServerError
|
||||||
@@ -14,10 +14,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
|
||||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
"github.com/tidwall/sjson"
|
"github.com/tidwall/sjson"
|
||||||
)
|
)
|
||||||
@@ -14,10 +14,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
|
||||||
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
. "github.com/router-for-me/CLIProxyAPI/v6/internal/constant"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,7 +36,7 @@ func (a *ClaudeAuthenticator) RefreshLead() *time.Duration {
|
|||||||
return &d
|
return &d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ClaudeAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
func (a *ClaudeAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||||
}
|
}
|
||||||
@@ -80,22 +81,22 @@ func (a *ClaudeAuthenticator) Login(ctx context.Context, cfg *config.Config, opt
|
|||||||
state = returnedState
|
state = returnedState
|
||||||
|
|
||||||
if !opts.NoBrowser {
|
if !opts.NoBrowser {
|
||||||
log.Info("Opening browser for Claude authentication")
|
fmt.Println("Opening browser for Claude authentication")
|
||||||
if !browser.IsAvailable() {
|
if !browser.IsAvailable() {
|
||||||
log.Warn("No browser available; please open the URL manually")
|
log.Warn("No browser available; please open the URL manually")
|
||||||
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
||||||
log.Infof("Visit the following URL to continue authentication:\n%s", authURL)
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
} else if err = browser.OpenURL(authURL); err != nil {
|
} else if err = browser.OpenURL(authURL); err != nil {
|
||||||
log.Warnf("Failed to open browser automatically: %v", err)
|
log.Warnf("Failed to open browser automatically: %v", err)
|
||||||
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
||||||
log.Infof("Visit the following URL to continue authentication:\n%s", authURL)
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
||||||
log.Infof("Visit the following URL to continue authentication:\n%s", authURL)
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Waiting for Claude authentication callback...")
|
fmt.Println("Waiting for Claude authentication callback...")
|
||||||
|
|
||||||
result, err := oauthServer.WaitForCallback(5 * time.Minute)
|
result, err := oauthServer.WaitForCallback(5 * time.Minute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -127,16 +128,17 @@ func (a *ClaudeAuthenticator) Login(ctx context.Context, cfg *config.Config, opt
|
|||||||
}
|
}
|
||||||
|
|
||||||
fileName := fmt.Sprintf("claude-%s.json", tokenStorage.Email)
|
fileName := fmt.Sprintf("claude-%s.json", tokenStorage.Email)
|
||||||
metadata := map[string]string{
|
metadata := map[string]any{
|
||||||
"email": tokenStorage.Email,
|
"email": tokenStorage.Email,
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Claude authentication successful")
|
fmt.Println("Claude authentication successful")
|
||||||
if authBundle.APIKey != "" {
|
if authBundle.APIKey != "" {
|
||||||
log.Info("Claude API key obtained and stored")
|
fmt.Println("Claude API key obtained and stored")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &TokenRecord{
|
return &coreauth.Auth{
|
||||||
|
ID: fileName,
|
||||||
Provider: a.Provider(),
|
Provider: a.Provider(),
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
Storage: tokenStorage,
|
Storage: tokenStorage,
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,7 +36,7 @@ func (a *CodexAuthenticator) RefreshLead() *time.Duration {
|
|||||||
return &d
|
return &d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *CodexAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
func (a *CodexAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||||
}
|
}
|
||||||
@@ -79,22 +80,22 @@ func (a *CodexAuthenticator) Login(ctx context.Context, cfg *config.Config, opts
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !opts.NoBrowser {
|
if !opts.NoBrowser {
|
||||||
log.Info("Opening browser for Codex authentication")
|
fmt.Println("Opening browser for Codex authentication")
|
||||||
if !browser.IsAvailable() {
|
if !browser.IsAvailable() {
|
||||||
log.Warn("No browser available; please open the URL manually")
|
log.Warn("No browser available; please open the URL manually")
|
||||||
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
||||||
log.Infof("Visit the following URL to continue authentication:\n%s", authURL)
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
} else if err = browser.OpenURL(authURL); err != nil {
|
} else if err = browser.OpenURL(authURL); err != nil {
|
||||||
log.Warnf("Failed to open browser automatically: %v", err)
|
log.Warnf("Failed to open browser automatically: %v", err)
|
||||||
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
||||||
log.Infof("Visit the following URL to continue authentication:\n%s", authURL)
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
util.PrintSSHTunnelInstructions(a.CallbackPort)
|
||||||
log.Infof("Visit the following URL to continue authentication:\n%s", authURL)
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Waiting for Codex authentication callback...")
|
fmt.Println("Waiting for Codex authentication callback...")
|
||||||
|
|
||||||
result, err := oauthServer.WaitForCallback(5 * time.Minute)
|
result, err := oauthServer.WaitForCallback(5 * time.Minute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -126,16 +127,17 @@ func (a *CodexAuthenticator) Login(ctx context.Context, cfg *config.Config, opts
|
|||||||
}
|
}
|
||||||
|
|
||||||
fileName := fmt.Sprintf("codex-%s.json", tokenStorage.Email)
|
fileName := fmt.Sprintf("codex-%s.json", tokenStorage.Email)
|
||||||
metadata := map[string]string{
|
metadata := map[string]any{
|
||||||
"email": tokenStorage.Email,
|
"email": tokenStorage.Email,
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Codex authentication successful")
|
fmt.Println("Codex authentication successful")
|
||||||
if authBundle.APIKey != "" {
|
if authBundle.APIKey != "" {
|
||||||
log.Info("Codex API key obtained and stored")
|
fmt.Println("Codex API key obtained and stored")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &TokenRecord{
|
return &coreauth.Auth{
|
||||||
|
ID: fileName,
|
||||||
Provider: a.Provider(),
|
Provider: a.Provider(),
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
Storage: tokenStorage,
|
Storage: tokenStorage,
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
|
||||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,27 +34,71 @@ func (s *FileTokenStore) SetBaseDir(dir string) {
|
|||||||
s.dirLock.Unlock()
|
s.dirLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save writes the token storage to the resolved file path.
|
// Save persists token storage and metadata to the resolved auth file path.
|
||||||
func (s *FileTokenStore) Save(ctx context.Context, cfg *config.Config, record *TokenRecord) (string, error) {
|
func (s *FileTokenStore) Save(ctx context.Context, auth *cliproxyauth.Auth) (string, error) {
|
||||||
if record == nil || record.Storage == nil {
|
if auth == nil {
|
||||||
return "", fmt.Errorf("cliproxy auth: token record is incomplete")
|
return "", fmt.Errorf("auth filestore: auth is nil")
|
||||||
}
|
}
|
||||||
target := strings.TrimSpace(record.FileName)
|
|
||||||
if target == "" {
|
path, err := s.resolveAuthPath(auth)
|
||||||
return "", fmt.Errorf("cliproxy auth: missing file name for provider %s", record.Provider)
|
if err != nil {
|
||||||
}
|
|
||||||
if !filepath.IsAbs(target) {
|
|
||||||
baseDir := s.baseDirFromConfig(cfg)
|
|
||||||
if baseDir != "" {
|
|
||||||
target = filepath.Join(baseDir, target)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
if err := record.Storage.SaveTokenToFile(target); err != nil {
|
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return target, nil
|
if path == "" {
|
||||||
|
return "", fmt.Errorf("auth filestore: missing file path attribute for %s", auth.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if auth.Disabled {
|
||||||
|
if _, statErr := os.Stat(path); os.IsNotExist(statErr) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
||||||
|
return "", fmt.Errorf("auth filestore: create dir failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case auth.Storage != nil:
|
||||||
|
if err = auth.Storage.SaveTokenToFile(path); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
case auth.Metadata != nil:
|
||||||
|
raw, errMarshal := json.Marshal(auth.Metadata)
|
||||||
|
if errMarshal != nil {
|
||||||
|
return "", fmt.Errorf("auth filestore: marshal metadata failed: %w", errMarshal)
|
||||||
|
}
|
||||||
|
if existing, errRead := os.ReadFile(path); errRead == nil {
|
||||||
|
if jsonEqual(existing, raw) {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
} else if errRead != nil && !os.IsNotExist(errRead) {
|
||||||
|
return "", fmt.Errorf("auth filestore: read existing failed: %w", errRead)
|
||||||
|
}
|
||||||
|
tmp := path + ".tmp"
|
||||||
|
if errWrite := os.WriteFile(tmp, raw, 0o600); errWrite != nil {
|
||||||
|
return "", fmt.Errorf("auth filestore: write temp failed: %w", errWrite)
|
||||||
|
}
|
||||||
|
if errRename := os.Rename(tmp, path); errRename != nil {
|
||||||
|
return "", fmt.Errorf("auth filestore: rename failed: %w", errRename)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("auth filestore: nothing to persist for %s", auth.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if auth.Attributes == nil {
|
||||||
|
auth.Attributes = make(map[string]string)
|
||||||
|
}
|
||||||
|
auth.Attributes["path"] = path
|
||||||
|
|
||||||
|
if strings.TrimSpace(auth.FileName) == "" {
|
||||||
|
auth.FileName = auth.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
return path, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// List enumerates all auth JSON files under the configured directory.
|
// List enumerates all auth JSON files under the configured directory.
|
||||||
@@ -90,50 +133,6 @@ func (s *FileTokenStore) List(ctx context.Context) ([]*cliproxyauth.Auth, error)
|
|||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveAuth writes the auth metadata back to its source file location.
|
|
||||||
func (s *FileTokenStore) SaveAuth(ctx context.Context, auth *cliproxyauth.Auth) error {
|
|
||||||
if auth == nil {
|
|
||||||
return fmt.Errorf("auth filestore: auth is nil")
|
|
||||||
}
|
|
||||||
path, err := s.resolveAuthPath(auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if path == "" {
|
|
||||||
return fmt.Errorf("auth filestore: missing file path attribute for %s", auth.ID)
|
|
||||||
}
|
|
||||||
// If the auth has been disabled and the original file was removed, avoid recreating it on disk.
|
|
||||||
if auth.Disabled {
|
|
||||||
if _, statErr := os.Stat(path); statErr != nil {
|
|
||||||
if os.IsNotExist(statErr) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
|
||||||
return fmt.Errorf("auth filestore: create dir failed: %w", err)
|
|
||||||
}
|
|
||||||
raw, err := json.Marshal(auth.Metadata)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("auth filestore: marshal metadata failed: %w", err)
|
|
||||||
}
|
|
||||||
if existing, errRead := os.ReadFile(path); errRead == nil {
|
|
||||||
if jsonEqual(existing, raw) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tmp := path + ".tmp"
|
|
||||||
if err = os.WriteFile(tmp, raw, 0o600); err != nil {
|
|
||||||
return fmt.Errorf("auth filestore: write temp failed: %w", err)
|
|
||||||
}
|
|
||||||
if err = os.Rename(tmp, path); err != nil {
|
|
||||||
return fmt.Errorf("auth filestore: rename failed: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the auth file.
|
// Delete removes the auth file.
|
||||||
func (s *FileTokenStore) Delete(ctx context.Context, id string) error {
|
func (s *FileTokenStore) Delete(ctx context.Context, id string) error {
|
||||||
id = strings.TrimSpace(id)
|
id = strings.TrimSpace(id)
|
||||||
@@ -185,6 +184,7 @@ func (s *FileTokenStore) readAuthFile(path, baseDir string) (*cliproxyauth.Auth,
|
|||||||
auth := &cliproxyauth.Auth{
|
auth := &cliproxyauth.Auth{
|
||||||
ID: id,
|
ID: id,
|
||||||
Provider: provider,
|
Provider: provider,
|
||||||
|
FileName: id,
|
||||||
Label: s.labelFor(metadata),
|
Label: s.labelFor(metadata),
|
||||||
Status: cliproxyauth.StatusActive,
|
Status: cliproxyauth.StatusActive,
|
||||||
Attributes: map[string]string{"path": path},
|
Attributes: map[string]string{"path": path},
|
||||||
@@ -220,6 +220,15 @@ func (s *FileTokenStore) resolveAuthPath(auth *cliproxyauth.Auth) (string, error
|
|||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if fileName := strings.TrimSpace(auth.FileName); fileName != "" {
|
||||||
|
if filepath.IsAbs(fileName) {
|
||||||
|
return fileName, nil
|
||||||
|
}
|
||||||
|
if dir := s.baseDirSnapshot(); dir != "" {
|
||||||
|
return filepath.Join(dir, fileName), nil
|
||||||
|
}
|
||||||
|
return fileName, nil
|
||||||
|
}
|
||||||
if auth.ID == "" {
|
if auth.ID == "" {
|
||||||
return "", fmt.Errorf("auth filestore: missing id")
|
return "", fmt.Errorf("auth filestore: missing id")
|
||||||
}
|
}
|
||||||
@@ -249,13 +258,6 @@ func (s *FileTokenStore) labelFor(metadata map[string]any) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileTokenStore) baseDirFromConfig(cfg *config.Config) string {
|
|
||||||
if cfg != nil && strings.TrimSpace(cfg.AuthDir) != "" {
|
|
||||||
return strings.TrimSpace(cfg.AuthDir)
|
|
||||||
}
|
|
||||||
return s.baseDirSnapshot()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FileTokenStore) baseDirSnapshot() string {
|
func (s *FileTokenStore) baseDirSnapshot() string {
|
||||||
s.dirLock.RLock()
|
s.dirLock.RLock()
|
||||||
defer s.dirLock.RUnlock()
|
defer s.dirLock.RUnlock()
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GeminiWebAuthenticator provides a minimal wrapper so core components can treat
|
// GeminiWebAuthenticator provides a minimal wrapper so core components can treat
|
||||||
@@ -16,7 +17,7 @@ func NewGeminiWebAuthenticator() *GeminiWebAuthenticator { return &GeminiWebAuth
|
|||||||
|
|
||||||
func (a *GeminiWebAuthenticator) Provider() string { return "gemini-web" }
|
func (a *GeminiWebAuthenticator) Provider() string { return "gemini-web" }
|
||||||
|
|
||||||
func (a *GeminiWebAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
func (a *GeminiWebAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||||
_ = ctx
|
_ = ctx
|
||||||
_ = cfg
|
_ = cfg
|
||||||
_ = opts
|
_ = opts
|
||||||
@@ -24,6 +25,6 @@ func (a *GeminiWebAuthenticator) Login(ctx context.Context, cfg *config.Config,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *GeminiWebAuthenticator) RefreshLead() *time.Duration {
|
func (a *GeminiWebAuthenticator) RefreshLead() *time.Duration {
|
||||||
d := 15 * time.Minute
|
d := time.Hour
|
||||||
return &d
|
return &d
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||||
// legacy client removed
|
// legacy client removed
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
log "github.com/sirupsen/logrus"
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GeminiAuthenticator implements the login flow for Google Gemini CLI accounts.
|
// GeminiAuthenticator implements the login flow for Google Gemini CLI accounts.
|
||||||
@@ -27,7 +27,7 @@ func (a *GeminiAuthenticator) RefreshLead() *time.Duration {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *GeminiAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
func (a *GeminiAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||||
}
|
}
|
||||||
@@ -52,14 +52,15 @@ func (a *GeminiAuthenticator) Login(ctx context.Context, cfg *config.Config, opt
|
|||||||
// Skip onboarding here; rely on upstream configuration
|
// Skip onboarding here; rely on upstream configuration
|
||||||
|
|
||||||
fileName := fmt.Sprintf("%s-%s.json", ts.Email, ts.ProjectID)
|
fileName := fmt.Sprintf("%s-%s.json", ts.Email, ts.ProjectID)
|
||||||
metadata := map[string]string{
|
metadata := map[string]any{
|
||||||
"email": ts.Email,
|
"email": ts.Email,
|
||||||
"project_id": ts.ProjectID,
|
"project_id": ts.ProjectID,
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Gemini authentication successful")
|
fmt.Println("Gemini authentication successful")
|
||||||
|
|
||||||
return &TokenRecord{
|
return &coreauth.Auth{
|
||||||
|
ID: fileName,
|
||||||
Provider: a.Provider(),
|
Provider: a.Provider(),
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
Storage: &ts,
|
Storage: &ts,
|
||||||
|
|||||||
131
sdk/auth/iflow.go
Normal file
131
sdk/auth/iflow.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/iflow"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/browser"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||||
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IFlowAuthenticator implements the OAuth login flow for iFlow accounts.
|
||||||
|
type IFlowAuthenticator struct{}
|
||||||
|
|
||||||
|
// NewIFlowAuthenticator constructs a new authenticator instance.
|
||||||
|
func NewIFlowAuthenticator() *IFlowAuthenticator { return &IFlowAuthenticator{} }
|
||||||
|
|
||||||
|
// Provider returns the provider key for the authenticator.
|
||||||
|
func (a *IFlowAuthenticator) Provider() string { return "iflow" }
|
||||||
|
|
||||||
|
// RefreshLead indicates how soon before expiry a refresh should be attempted.
|
||||||
|
func (a *IFlowAuthenticator) RefreshLead() *time.Duration {
|
||||||
|
d := 3 * time.Hour
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Login performs the OAuth code flow using a local callback server.
|
||||||
|
func (a *IFlowAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||||
|
if cfg == nil {
|
||||||
|
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||||
|
}
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
if opts == nil {
|
||||||
|
opts = &LoginOptions{}
|
||||||
|
}
|
||||||
|
|
||||||
|
authSvc := iflow.NewIFlowAuth(cfg)
|
||||||
|
|
||||||
|
oauthServer := iflow.NewOAuthServer(iflow.CallbackPort)
|
||||||
|
if err := oauthServer.Start(); err != nil {
|
||||||
|
if strings.Contains(err.Error(), "already in use") {
|
||||||
|
return nil, fmt.Errorf("iflow authentication server port in use: %w", err)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("iflow authentication server failed: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
stopCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
if stopErr := oauthServer.Stop(stopCtx); stopErr != nil {
|
||||||
|
log.Warnf("iflow oauth server stop error: %v", stopErr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
state, err := misc.GenerateRandomState()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow auth: failed to generate state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
authURL, redirectURI := authSvc.AuthorizationURL(state, iflow.CallbackPort)
|
||||||
|
|
||||||
|
if !opts.NoBrowser {
|
||||||
|
fmt.Println("Opening browser for iFlow authentication")
|
||||||
|
if !browser.IsAvailable() {
|
||||||
|
log.Warn("No browser available; please open the URL manually")
|
||||||
|
util.PrintSSHTunnelInstructions(iflow.CallbackPort)
|
||||||
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
|
} else if err = browser.OpenURL(authURL); err != nil {
|
||||||
|
log.Warnf("Failed to open browser automatically: %v", err)
|
||||||
|
util.PrintSSHTunnelInstructions(iflow.CallbackPort)
|
||||||
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
util.PrintSSHTunnelInstructions(iflow.CallbackPort)
|
||||||
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Waiting for iFlow authentication callback...")
|
||||||
|
|
||||||
|
result, err := oauthServer.WaitForCallback(5 * time.Minute)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow auth: callback wait failed: %w", err)
|
||||||
|
}
|
||||||
|
if result.Error != "" {
|
||||||
|
return nil, fmt.Errorf("iflow auth: provider returned error %s", result.Error)
|
||||||
|
}
|
||||||
|
if result.State != state {
|
||||||
|
return nil, fmt.Errorf("iflow auth: state mismatch")
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenData, err := authSvc.ExchangeCodeForTokens(ctx, result.Code, redirectURI)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("iflow authentication failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenStorage := authSvc.CreateTokenStorage(tokenData)
|
||||||
|
|
||||||
|
email := strings.TrimSpace(tokenStorage.Email)
|
||||||
|
if email == "" {
|
||||||
|
return nil, fmt.Errorf("iflow authentication failed: missing account identifier")
|
||||||
|
}
|
||||||
|
|
||||||
|
fileName := fmt.Sprintf("iflow-%s.json", email)
|
||||||
|
metadata := map[string]any{
|
||||||
|
"email": email,
|
||||||
|
"api_key": tokenStorage.APIKey,
|
||||||
|
"access_token": tokenStorage.AccessToken,
|
||||||
|
"refresh_token": tokenStorage.RefreshToken,
|
||||||
|
"expired": tokenStorage.Expire,
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("iFlow authentication successful")
|
||||||
|
|
||||||
|
return &coreauth.Auth{
|
||||||
|
ID: fileName,
|
||||||
|
Provider: a.Provider(),
|
||||||
|
FileName: fileName,
|
||||||
|
Storage: tokenStorage,
|
||||||
|
Metadata: metadata,
|
||||||
|
Attributes: map[string]string{
|
||||||
|
"api_key": tokenStorage.APIKey,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
baseauth "github.com/router-for-me/CLIProxyAPI/v6/internal/auth"
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrRefreshNotSupported = errors.New("cliproxy auth: refresh not supported")
|
var ErrRefreshNotSupported = errors.New("cliproxy auth: refresh not supported")
|
||||||
@@ -20,22 +20,9 @@ type LoginOptions struct {
|
|||||||
Prompt func(prompt string) (string, error)
|
Prompt func(prompt string) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TokenRecord represents credential material produced by an authenticator.
|
|
||||||
type TokenRecord struct {
|
|
||||||
Provider string
|
|
||||||
FileName string
|
|
||||||
Storage baseauth.TokenStorage
|
|
||||||
Metadata map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenStore persists token records.
|
|
||||||
type TokenStore interface {
|
|
||||||
Save(ctx context.Context, cfg *config.Config, record *TokenRecord) (string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Authenticator manages login and optional refresh flows for a provider.
|
// Authenticator manages login and optional refresh flows for a provider.
|
||||||
type Authenticator interface {
|
type Authenticator interface {
|
||||||
Provider() string
|
Provider() string
|
||||||
Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error)
|
Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error)
|
||||||
RefreshLead() *time.Duration
|
RefreshLead() *time.Duration
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,17 +5,18 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Manager aggregates authenticators and coordinates persistence via a token store.
|
// Manager aggregates authenticators and coordinates persistence via a token store.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
authenticators map[string]Authenticator
|
authenticators map[string]Authenticator
|
||||||
store TokenStore
|
store coreauth.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager constructs a manager with the provided token store and authenticators.
|
// NewManager constructs a manager with the provided token store and authenticators.
|
||||||
// If store is nil, the caller must set it later using SetStore.
|
// If store is nil, the caller must set it later using SetStore.
|
||||||
func NewManager(store TokenStore, authenticators ...Authenticator) *Manager {
|
func NewManager(store coreauth.Store, authenticators ...Authenticator) *Manager {
|
||||||
mgr := &Manager{
|
mgr := &Manager{
|
||||||
authenticators: make(map[string]Authenticator),
|
authenticators: make(map[string]Authenticator),
|
||||||
store: store,
|
store: store,
|
||||||
@@ -38,12 +39,12 @@ func (m *Manager) Register(a Authenticator) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetStore updates the token store used for persistence.
|
// SetStore updates the token store used for persistence.
|
||||||
func (m *Manager) SetStore(store TokenStore) {
|
func (m *Manager) SetStore(store coreauth.Store) {
|
||||||
m.store = store
|
m.store = store
|
||||||
}
|
}
|
||||||
|
|
||||||
// Login executes the provider login flow and persists the resulting token record.
|
// Login executes the provider login flow and persists the resulting auth record.
|
||||||
func (m *Manager) Login(ctx context.Context, provider string, cfg *config.Config, opts *LoginOptions) (*TokenRecord, string, error) {
|
func (m *Manager) Login(ctx context.Context, provider string, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, string, error) {
|
||||||
auth, ok := m.authenticators[provider]
|
auth, ok := m.authenticators[provider]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, "", fmt.Errorf("cliproxy auth: authenticator %s not registered", provider)
|
return nil, "", fmt.Errorf("cliproxy auth: authenticator %s not registered", provider)
|
||||||
@@ -61,7 +62,13 @@ func (m *Manager) Login(ctx context.Context, provider string, cfg *config.Config
|
|||||||
return record, "", nil
|
return record, "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
savedPath, err := m.store.Save(ctx, cfg, record)
|
if cfg != nil {
|
||||||
|
if dirSetter, ok := m.store.(interface{ SetBaseDir(string) }); ok {
|
||||||
|
dirSetter.SetBaseDir(cfg.AuthDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
savedPath, err := m.store.Save(ctx, record)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return record, "", err
|
return record, "", err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/browser"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/browser"
|
||||||
// legacy client removed
|
// legacy client removed
|
||||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,7 +31,7 @@ func (a *QwenAuthenticator) RefreshLead() *time.Duration {
|
|||||||
return &d
|
return &d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *QwenAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*TokenRecord, error) {
|
func (a *QwenAuthenticator) Login(ctx context.Context, cfg *config.Config, opts *LoginOptions) (*coreauth.Auth, error) {
|
||||||
if cfg == nil {
|
if cfg == nil {
|
||||||
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
return nil, fmt.Errorf("cliproxy auth: configuration is required")
|
||||||
}
|
}
|
||||||
@@ -51,19 +52,19 @@ func (a *QwenAuthenticator) Login(ctx context.Context, cfg *config.Config, opts
|
|||||||
authURL := deviceFlow.VerificationURIComplete
|
authURL := deviceFlow.VerificationURIComplete
|
||||||
|
|
||||||
if !opts.NoBrowser {
|
if !opts.NoBrowser {
|
||||||
log.Info("Opening browser for Qwen authentication")
|
fmt.Println("Opening browser for Qwen authentication")
|
||||||
if !browser.IsAvailable() {
|
if !browser.IsAvailable() {
|
||||||
log.Warn("No browser available; please open the URL manually")
|
log.Warn("No browser available; please open the URL manually")
|
||||||
log.Infof("Visit the following URL to continue authentication:\n%s", authURL)
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
} else if err = browser.OpenURL(authURL); err != nil {
|
} else if err = browser.OpenURL(authURL); err != nil {
|
||||||
log.Warnf("Failed to open browser automatically: %v", err)
|
log.Warnf("Failed to open browser automatically: %v", err)
|
||||||
log.Infof("Visit the following URL to continue authentication:\n%s", authURL)
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Infof("Visit the following URL to continue authentication:\n%s", authURL)
|
fmt.Printf("Visit the following URL to continue authentication:\n%s\n", authURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Waiting for Qwen authentication...")
|
fmt.Println("Waiting for Qwen authentication...")
|
||||||
|
|
||||||
tokenData, err := authSvc.PollForToken(deviceFlow.DeviceCode, deviceFlow.CodeVerifier)
|
tokenData, err := authSvc.PollForToken(deviceFlow.DeviceCode, deviceFlow.CodeVerifier)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -97,13 +98,14 @@ func (a *QwenAuthenticator) Login(ctx context.Context, cfg *config.Config, opts
|
|||||||
// no legacy client construction
|
// no legacy client construction
|
||||||
|
|
||||||
fileName := fmt.Sprintf("qwen-%s.json", tokenStorage.Email)
|
fileName := fmt.Sprintf("qwen-%s.json", tokenStorage.Email)
|
||||||
metadata := map[string]string{
|
metadata := map[string]any{
|
||||||
"email": tokenStorage.Email,
|
"email": tokenStorage.Email,
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Qwen authentication successful")
|
fmt.Println("Qwen authentication successful")
|
||||||
|
|
||||||
return &TokenRecord{
|
return &coreauth.Auth{
|
||||||
|
ID: fileName,
|
||||||
Provider: a.Provider(),
|
Provider: a.Provider(),
|
||||||
FileName: fileName,
|
FileName: fileName,
|
||||||
Storage: tokenStorage,
|
Storage: tokenStorage,
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ func init() {
|
|||||||
registerRefreshLead("codex", func() Authenticator { return NewCodexAuthenticator() })
|
registerRefreshLead("codex", func() Authenticator { return NewCodexAuthenticator() })
|
||||||
registerRefreshLead("claude", func() Authenticator { return NewClaudeAuthenticator() })
|
registerRefreshLead("claude", func() Authenticator { return NewClaudeAuthenticator() })
|
||||||
registerRefreshLead("qwen", func() Authenticator { return NewQwenAuthenticator() })
|
registerRefreshLead("qwen", func() Authenticator { return NewQwenAuthenticator() })
|
||||||
|
registerRefreshLead("iflow", func() Authenticator { return NewIFlowAuthenticator() })
|
||||||
registerRefreshLead("gemini", func() Authenticator { return NewGeminiAuthenticator() })
|
registerRefreshLead("gemini", func() Authenticator { return NewGeminiAuthenticator() })
|
||||||
registerRefreshLead("gemini-cli", func() Authenticator { return NewGeminiAuthenticator() })
|
registerRefreshLead("gemini-cli", func() Authenticator { return NewGeminiAuthenticator() })
|
||||||
registerRefreshLead("gemini-web", func() Authenticator { return NewGeminiWebAuthenticator() })
|
registerRefreshLead("gemini-web", func() Authenticator { return NewGeminiWebAuthenticator() })
|
||||||
|
|||||||
@@ -1,31 +1,35 @@
|
|||||||
package auth
|
package auth
|
||||||
|
|
||||||
import "sync"
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
storeMu sync.RWMutex
|
storeMu sync.RWMutex
|
||||||
registeredTokenStore TokenStore
|
registeredStore coreauth.Store
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegisterTokenStore sets the global token store used by the authentication helpers.
|
// RegisterTokenStore sets the global token store used by the authentication helpers.
|
||||||
func RegisterTokenStore(store TokenStore) {
|
func RegisterTokenStore(store coreauth.Store) {
|
||||||
storeMu.Lock()
|
storeMu.Lock()
|
||||||
registeredTokenStore = store
|
registeredStore = store
|
||||||
storeMu.Unlock()
|
storeMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTokenStore returns the globally registered token store.
|
// GetTokenStore returns the globally registered token store.
|
||||||
func GetTokenStore() TokenStore {
|
func GetTokenStore() coreauth.Store {
|
||||||
storeMu.RLock()
|
storeMu.RLock()
|
||||||
s := registeredTokenStore
|
s := registeredStore
|
||||||
storeMu.RUnlock()
|
storeMu.RUnlock()
|
||||||
if s != nil {
|
if s != nil {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
storeMu.Lock()
|
storeMu.Lock()
|
||||||
defer storeMu.Unlock()
|
defer storeMu.Unlock()
|
||||||
if registeredTokenStore == nil {
|
if registeredStore == nil {
|
||||||
registeredTokenStore = NewFileTokenStore()
|
registeredStore = NewFileTokenStore()
|
||||||
}
|
}
|
||||||
return registeredTokenStore
|
return registeredStore
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -818,7 +818,8 @@ func (m *Manager) persist(ctx context.Context, auth *Auth) error {
|
|||||||
if auth.Metadata == nil {
|
if auth.Metadata == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return m.store.SaveAuth(ctx, auth)
|
_, err := m.store.Save(ctx, auth)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartAutoRefresh launches a background loop that evaluates auth freshness
|
// StartAutoRefresh launches a background loop that evaluates auth freshness
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package auth
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -36,6 +37,10 @@ func (s *RoundRobinSelector) Pick(ctx context.Context, provider, model string, o
|
|||||||
if len(available) == 0 {
|
if len(available) == 0 {
|
||||||
return nil, &Error{Code: "auth_unavailable", Message: "no auth available"}
|
return nil, &Error{Code: "auth_unavailable", Message: "no auth available"}
|
||||||
}
|
}
|
||||||
|
// Make round-robin deterministic even if caller's candidate order is unstable.
|
||||||
|
if len(available) > 1 {
|
||||||
|
sort.Slice(available, func(i, j int) bool { return available[i].ID < available[j].ID })
|
||||||
|
}
|
||||||
key := provider + ":" + model
|
key := provider + ":" + model
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
index := s.cursors[key]
|
index := s.cursors[key]
|
||||||
@@ -57,7 +62,11 @@ func isAuthBlockedForModel(auth *Auth, model string, now time.Time) bool {
|
|||||||
if auth.Disabled || auth.Status == StatusDisabled {
|
if auth.Disabled || auth.Status == StatusDisabled {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if model != "" && len(auth.ModelStates) > 0 {
|
// If a specific model is requested, prefer its per-model state over any aggregated
|
||||||
|
// auth-level unavailable flag. This prevents a failure on one model (e.g., 429 quota)
|
||||||
|
// from blocking other models of the same provider that have no errors.
|
||||||
|
if model != "" {
|
||||||
|
if len(auth.ModelStates) > 0 {
|
||||||
if state, ok := auth.ModelStates[model]; ok && state != nil {
|
if state, ok := auth.ModelStates[model]; ok && state != nil {
|
||||||
if state.Status == StatusDisabled {
|
if state.Status == StatusDisabled {
|
||||||
return true
|
return true
|
||||||
@@ -70,8 +79,15 @@ func isAuthBlockedForModel(auth *Auth, model string, now time.Time) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Explicit state exists and is not blocking.
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// No explicit state for this model; do not block based on aggregated
|
||||||
|
// auth-level unavailable status. Allow trying this model.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// No specific model context: fall back to auth-level unavailable window.
|
||||||
if auth.Unavailable && auth.NextRetryAfter.After(now) {
|
if auth.Unavailable && auth.NextRetryAfter.After(now) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
125
sdk/cliproxy/auth/selector_rr.go
Normal file
125
sdk/cliproxy/auth/selector_rr.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
conversation "github.com/router-for-me/CLIProxyAPI/v6/internal/provider/gemini-web/conversation"
|
||||||
|
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
geminiWebProviderKey = "gemini-web"
|
||||||
|
)
|
||||||
|
|
||||||
|
type geminiWebStickySelector struct {
|
||||||
|
base Selector
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewGeminiWebStickySelector(base Selector) Selector {
|
||||||
|
if selector, ok := base.(*geminiWebStickySelector); ok {
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
if base == nil {
|
||||||
|
base = &RoundRobinSelector{}
|
||||||
|
}
|
||||||
|
return &geminiWebStickySelector{base: base}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) EnableGeminiWebStickySelector() {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
if _, ok := m.selector.(*geminiWebStickySelector); ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.selector = NewGeminiWebStickySelector(m.selector)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *geminiWebStickySelector) Pick(ctx context.Context, provider, model string, opts cliproxyexecutor.Options, auths []*Auth) (*Auth, error) {
|
||||||
|
if !strings.EqualFold(provider, geminiWebProviderKey) {
|
||||||
|
if opts.Metadata != nil {
|
||||||
|
delete(opts.Metadata, conversation.MetadataMatchKey)
|
||||||
|
}
|
||||||
|
return s.base.Pick(ctx, provider, model, opts, auths)
|
||||||
|
}
|
||||||
|
|
||||||
|
messages := extractGeminiWebMessages(opts.Metadata)
|
||||||
|
if len(messages) >= 2 {
|
||||||
|
normalizedModel := conversation.NormalizeModel(model)
|
||||||
|
candidates := conversation.BuildLookupHashes(normalizedModel, messages)
|
||||||
|
for _, candidate := range candidates {
|
||||||
|
record, ok, err := conversation.LookupMatch(candidate.Hash)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("gemini-web selector: lookup failed for hash %s: %v", candidate.Hash, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
label := strings.TrimSpace(record.AccountLabel)
|
||||||
|
if label == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
auth := findAuthByLabel(auths, label)
|
||||||
|
if auth != nil {
|
||||||
|
if opts.Metadata != nil {
|
||||||
|
opts.Metadata[conversation.MetadataMatchKey] = &conversation.MatchResult{
|
||||||
|
Hash: candidate.Hash,
|
||||||
|
Record: record,
|
||||||
|
Model: normalizedModel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return auth, nil
|
||||||
|
}
|
||||||
|
_ = conversation.RemoveMatchForLabel(candidate.Hash, label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.base.Pick(ctx, provider, model, opts, auths)
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractGeminiWebMessages(metadata map[string]any) []conversation.Message {
|
||||||
|
if metadata == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
raw, ok := metadata[conversation.MetadataMessagesKey]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch v := raw.(type) {
|
||||||
|
case []conversation.Message:
|
||||||
|
return v
|
||||||
|
case *[]conversation.Message:
|
||||||
|
if v == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return *v
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func findAuthByLabel(auths []*Auth, label string) *Auth {
|
||||||
|
if len(auths) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
normalized := strings.ToLower(strings.TrimSpace(label))
|
||||||
|
for _, auth := range auths {
|
||||||
|
if auth == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.ToLower(strings.TrimSpace(auth.Label)) == normalized {
|
||||||
|
return auth
|
||||||
|
}
|
||||||
|
if auth.Metadata != nil {
|
||||||
|
if v, ok := auth.Metadata["label"].(string); ok && strings.ToLower(strings.TrimSpace(v)) == normalized {
|
||||||
|
return auth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -6,8 +6,8 @@ import "context"
|
|||||||
type Store interface {
|
type Store interface {
|
||||||
// List returns all auth records stored in the backend.
|
// List returns all auth records stored in the backend.
|
||||||
List(ctx context.Context) ([]*Auth, error)
|
List(ctx context.Context) ([]*Auth, error)
|
||||||
// SaveAuth persists the provided auth record, replacing any existing one with same ID.
|
// Save persists the provided auth record, replacing any existing one with same ID.
|
||||||
SaveAuth(ctx context.Context, auth *Auth) error
|
Save(ctx context.Context, auth *Auth) (string, error)
|
||||||
// Delete removes the auth record identified by id.
|
// Delete removes the auth record identified by id.
|
||||||
Delete(ctx context.Context, id string) error
|
Delete(ctx context.Context, id string) error
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
baseauth "github.com/router-for-me/CLIProxyAPI/v6/internal/auth"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Auth encapsulates the runtime state and metadata associated with a single credential.
|
// Auth encapsulates the runtime state and metadata associated with a single credential.
|
||||||
@@ -14,6 +16,10 @@ type Auth struct {
|
|||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
// Provider is the upstream provider key (e.g. "gemini", "claude").
|
// Provider is the upstream provider key (e.g. "gemini", "claude").
|
||||||
Provider string `json:"provider"`
|
Provider string `json:"provider"`
|
||||||
|
// FileName stores the relative or absolute path of the backing auth file.
|
||||||
|
FileName string `json:"-"`
|
||||||
|
// Storage holds the token persistence implementation used during login flows.
|
||||||
|
Storage baseauth.TokenStorage `json:"-"`
|
||||||
// Label is an optional human readable label for logging.
|
// Label is an optional human readable label for logging.
|
||||||
Label string `json:"label,omitempty"`
|
Label string `json:"label,omitempty"`
|
||||||
// Status is the lifecycle status managed by the AuthManager.
|
// Status is the lifecycle status managed by the AuthManager.
|
||||||
@@ -128,6 +134,7 @@ func (a *Auth) AccountInfo() (string, string) {
|
|||||||
if a == nil {
|
if a == nil {
|
||||||
return "", ""
|
return "", ""
|
||||||
}
|
}
|
||||||
|
// For Gemini Web, prefer explicit cookie label for stability.
|
||||||
if strings.ToLower(a.Provider) == "gemini-web" {
|
if strings.ToLower(a.Provider) == "gemini-web" {
|
||||||
// Prefer explicit label written into auth file (e.g., gemini-web-<hash>)
|
// Prefer explicit label written into auth file (e.g., gemini-web-<hash>)
|
||||||
if a.Metadata != nil {
|
if a.Metadata != nil {
|
||||||
@@ -145,6 +152,22 @@ func (a *Auth) AccountInfo() (string, string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// For Gemini CLI, include project ID in the OAuth account info if present.
|
||||||
|
if strings.ToLower(a.Provider) == "gemini-cli" {
|
||||||
|
if a.Metadata != nil {
|
||||||
|
email, _ := a.Metadata["email"].(string)
|
||||||
|
email = strings.TrimSpace(email)
|
||||||
|
if email != "" {
|
||||||
|
if p, ok := a.Metadata["project_id"].(string); ok {
|
||||||
|
p = strings.TrimSpace(p)
|
||||||
|
if p != "" {
|
||||||
|
return "oauth", email + " (" + p + ")"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "oauth", email
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if a.Metadata != nil {
|
if a.Metadata != nil {
|
||||||
if v, ok := a.Metadata["email"].(string); ok {
|
if v, ok := a.Metadata["email"].(string); ok {
|
||||||
return "oauth", v
|
return "oauth", v
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user