mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-03 04:50:52 +08:00
Compare commits
1288 Commits
v6.0.0
...
9299897e04
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9299897e04 | ||
|
|
527a269799 | ||
|
|
2fe0b6cd2d | ||
|
|
eeb1812d60 | ||
|
|
c82d8e250a | ||
|
|
73db4e64f6 | ||
|
|
69ca0a8fac | ||
|
|
3b04e11544 | ||
|
|
e0927afa40 | ||
|
|
f97d9f3e11 | ||
|
|
6d8609e457 | ||
|
|
d216adeffc | ||
|
|
bb09708c02 | ||
|
|
1150d972a1 | ||
|
|
13bb7cf704 | ||
|
|
8bce696a7c | ||
|
|
6db8d2a28e | ||
|
|
adedb16d35 | ||
|
|
89907231c1 | ||
|
|
09044e8ccc | ||
|
|
2854e04bbb | ||
|
|
f99cddf97f | ||
|
|
f887f9985d | ||
|
|
550da0cee8 | ||
|
|
7ff3936efe | ||
|
|
f36a5f5654 | ||
|
|
c1facdff67 | ||
|
|
4ee46bc9f2 | ||
|
|
c3e94a8277 | ||
|
|
6b6d030ed3 | ||
|
|
538039f583 | ||
|
|
ca796510e9 | ||
|
|
d0d66cdcb7 | ||
|
|
d7d54fa2cc | ||
|
|
31649325f0 | ||
|
|
3a43ecb19b | ||
|
|
a709e5a12d | ||
|
|
f0ac77197b | ||
|
|
da0bbf2a3f | ||
|
|
295f34d7f0 | ||
|
|
c41ce77eea | ||
|
|
4eb1e6093f | ||
|
|
189a066807 | ||
|
|
d0bada7a43 | ||
|
|
9dc0e6d08b | ||
|
|
8510fc313e | ||
|
|
2666708c30 | ||
|
|
9e5b1d24e8 | ||
|
|
a7dae6ad52 | ||
|
|
e93e05ae25 | ||
|
|
c8c27325dc | ||
|
|
c3b6f3918c | ||
|
|
bbb55a8ab4 | ||
|
|
04b2290927 | ||
|
|
53920b0399 | ||
|
|
7583193c2a | ||
|
|
7cc3bd4ba0 | ||
|
|
88a0f095e8 | ||
|
|
c65f64dce0 | ||
|
|
d18cd217e1 | ||
|
|
ba4a1ab433 | ||
|
|
decddb521e | ||
|
|
95096bc3fc | ||
|
|
70897247b2 | ||
|
|
9c341f5aa5 | ||
|
|
2af4a8dc12 | ||
|
|
0f53b952b2 | ||
|
|
f30ffd5f5e | ||
|
|
bc9a24d705 | ||
|
|
2c879f13ef | ||
|
|
07b4a08979 | ||
|
|
7f612bb069 | ||
|
|
5743b78694 | ||
|
|
2e6a2b655c | ||
|
|
cb47ac21bf | ||
|
|
a1394b4596 | ||
|
|
9e97948f03 | ||
|
|
f7bfa8a05c | ||
|
|
46c6fb1e7a | ||
|
|
9f9fec5d4c | ||
|
|
e95be10485 | ||
|
|
f3d58fa0ce | ||
|
|
8c0eaa1f71 | ||
|
|
405df58f72 | ||
|
|
e7f13aa008 | ||
|
|
7cb6a9b89a | ||
|
|
9aa5344c29 | ||
|
|
8ba0ebbd2a | ||
|
|
c65407ab9f | ||
|
|
9e59685212 | ||
|
|
4a4dfaa910 | ||
|
|
0d6ecb0191 | ||
|
|
f16461bfe7 | ||
|
|
c32e2a8196 | ||
|
|
873d41582f | ||
|
|
6fb7d85558 | ||
|
|
6da7ed53f2 | ||
|
|
d5e3e32d58 | ||
|
|
f353a54555 | ||
|
|
1d6e2e751d | ||
|
|
cc50b63422 | ||
|
|
15ae83a15b | ||
|
|
81b369aed9 | ||
|
|
c8620d1633 | ||
|
|
ecc850bfb7 | ||
|
|
19b4ef33e0 | ||
|
|
7ca045d8b9 | ||
|
|
abfca6aab2 | ||
|
|
3c71c075db | ||
|
|
9c2992bfb2 | ||
|
|
269a1c5452 | ||
|
|
22ce65ac72 | ||
|
|
a2f8f59192 | ||
|
|
8c7c446f33 | ||
|
|
30a59168d7 | ||
|
|
c8884f5e25 | ||
|
|
d9c6317c84 | ||
|
|
d29ec95526 | ||
|
|
ef4508dbc8 | ||
|
|
f775e46fe2 | ||
|
|
65ad5c0c9d | ||
|
|
88bf4e77ec | ||
|
|
a4f8015caa | ||
|
|
ffd129909e | ||
|
|
9332316383 | ||
|
|
6dcbbf64c3 | ||
|
|
2ce3553612 | ||
|
|
2e14f787d4 | ||
|
|
523b41ccd2 | ||
|
|
09970dc7af | ||
|
|
d81abd401c | ||
|
|
a6cba25bc1 | ||
|
|
c6fa1d0e67 | ||
|
|
ac56e1e88b | ||
|
|
9b72ea9efa | ||
|
|
9f364441e8 | ||
|
|
e49a1c07bf | ||
|
|
8d9f4edf9b | ||
|
|
020e61d0da | ||
|
|
6184c43319 | ||
|
|
2cbe4a790c | ||
|
|
68b3565d7b | ||
|
|
3f385a8572 | ||
|
|
9823dc35e1 | ||
|
|
059bfee91b | ||
|
|
7beaf0eaa2 | ||
|
|
1fef90ff58 | ||
|
|
8447fd27a0 | ||
|
|
7831cba9f6 | ||
|
|
e02b2d58d5 | ||
|
|
28726632a9 | ||
|
|
3b26129c82 | ||
|
|
d4bb4e6624 | ||
|
|
0766c49f93 | ||
|
|
a7ffc77e3d | ||
|
|
e641fde25c | ||
|
|
5717c7f2f4 | ||
|
|
8734d4cb90 | ||
|
|
2f6004d74a | ||
|
|
5baa753539 | ||
|
|
ead98e4bca | ||
|
|
a1634909e8 | ||
|
|
1d2fe55310 | ||
|
|
c175821cc4 | ||
|
|
239a28793c | ||
|
|
c421d653e7 | ||
|
|
2542c2920d | ||
|
|
52e46ced1b | ||
|
|
cf9daf470c | ||
|
|
140d6211cc | ||
|
|
60f9a1442c | ||
|
|
cb6caf3f87 | ||
|
|
99c7abbbf1 | ||
|
|
8f511ac33c | ||
|
|
1046152119 | ||
|
|
f88228f1c5 | ||
|
|
62e2b672d9 | ||
|
|
03005b5d29 | ||
|
|
c7e8830a56 | ||
|
|
d5ef4a6d15 | ||
|
|
97b67e0e49 | ||
|
|
dd6d78cb31 | ||
|
|
46433a25f8 | ||
|
|
c8843edb81 | ||
|
|
f89feb881c | ||
|
|
dbba71028e | ||
|
|
8549a92e9a | ||
|
|
109cffc010 | ||
|
|
f8f3ad84fc | ||
|
|
bc7167e9fe | ||
|
|
384578a88c | ||
|
|
65b4e1ec6c | ||
|
|
6600d58ba2 | ||
|
|
4dc7af5a5d | ||
|
|
902bea24b4 | ||
|
|
c3ef46f409 | ||
|
|
aa0b63e214 | ||
|
|
ea3d22831e | ||
|
|
3b4d6d359b | ||
|
|
48cba39a12 | ||
|
|
cec4e251bd | ||
|
|
526dd866ba | ||
|
|
b31ddc7bf1 | ||
|
|
22e1ad3d8a | ||
|
|
f571b1deb0 | ||
|
|
67f8732683 | ||
|
|
2b387e169b | ||
|
|
199cf480b0 | ||
|
|
4ad6189487 | ||
|
|
fe5b3c80cb | ||
|
|
e0ffec885c | ||
|
|
ff4ff6bc2f | ||
|
|
7248f65c36 | ||
|
|
5c40a2db21 | ||
|
|
086eb3df7a | ||
|
|
ee2976cca0 | ||
|
|
8bc6df329f | ||
|
|
bcd4d9595f | ||
|
|
5a77b7728e | ||
|
|
1fbbba6f59 | ||
|
|
847be0e99d | ||
|
|
f6a2d072e6 | ||
|
|
ed8b0f25ee | ||
|
|
6e4a602c60 | ||
|
|
2262479365 | ||
|
|
33d66959e9 | ||
|
|
7f1b2b3f6e | ||
|
|
40ee065eff | ||
|
|
a75fb6af90 | ||
|
|
72f2125668 | ||
|
|
e8f5888d8e | ||
|
|
0b06d637e7 | ||
|
|
5a7e5bd870 | ||
|
|
6f8a8f8136 | ||
|
|
5df195ea82 | ||
|
|
b163f8ed9e | ||
|
|
a1da6ff5ac | ||
|
|
5977af96a0 | ||
|
|
43652d044c | ||
|
|
b1b379ea18 | ||
|
|
21ac161b21 | ||
|
|
94e979865e | ||
|
|
6c324f2c8b | ||
|
|
543dfd67e0 | ||
|
|
28bd1323a2 | ||
|
|
220ca45f74 | ||
|
|
70a82d80ac | ||
|
|
ac626111ac | ||
|
|
5bb9c2a2bd | ||
|
|
0b5bbe9234 | ||
|
|
14c74e5e84 | ||
|
|
6448d0ee7c | ||
|
|
b0c17af2cf | ||
|
|
8cfe26f10c | ||
|
|
80db2dc254 | ||
|
|
e8e3bc8616 | ||
|
|
bc3195c8d8 | ||
|
|
6494330c6b | ||
|
|
4d7f389b69 | ||
|
|
95f87d5669 | ||
|
|
c83365a349 | ||
|
|
6b3604cf2b | ||
|
|
af6bdca14f | ||
|
|
1c773c428f | ||
|
|
e785bfcd12 | ||
|
|
47dacce6ea | ||
|
|
dcac3407ab | ||
|
|
7004295e1d | ||
|
|
ee62ef4745 | ||
|
|
ef6bafbf7e | ||
|
|
ed28b71e87 | ||
|
|
d47b7dc79a | ||
|
|
49b9709ce5 | ||
|
|
a2eba2cdf5 | ||
|
|
3d01b3cfe8 | ||
|
|
af2efa6f7e | ||
|
|
d73b61d367 | ||
|
|
59a448b645 | ||
|
|
4adb9eed77 | ||
|
|
b6a0f7a07f | ||
|
|
1b2f907671 | ||
|
|
bda04eed8a | ||
|
|
67985d8226 | ||
|
|
cbcb061812 | ||
|
|
9fc2e1b3c8 | ||
|
|
3b484aea9e | ||
|
|
963a0950fa | ||
|
|
f4ba1ab910 | ||
|
|
2662f91082 | ||
|
|
c1db2c7d7c | ||
|
|
5e5d8142f9 | ||
|
|
b01619b441 | ||
|
|
f861bd6a94 | ||
|
|
6dbfdd140d | ||
|
|
aa8526edc0 | ||
|
|
ac3ca0ad8e | ||
|
|
fe6043aec7 | ||
|
|
386ccffed4 | ||
|
|
08d21b76e2 | ||
|
|
ffddd1c90a | ||
|
|
33aa665555 | ||
|
|
00280b6fe8 | ||
|
|
8f8dfd081b | ||
|
|
9f1b445c7c | ||
|
|
ae933dfe14 | ||
|
|
e124db723b | ||
|
|
05444cf32d | ||
|
|
8edbda57cf | ||
|
|
52760a4eaa | ||
|
|
bc32096e9c | ||
|
|
821249a5ed | ||
|
|
ee33863b47 | ||
|
|
cd22c849e2 | ||
|
|
f0e73efda2 | ||
|
|
3156109c71 | ||
|
|
6762e081f3 | ||
|
|
7815ee338d | ||
|
|
44b6c872e2 | ||
|
|
7a77b23f2d | ||
|
|
672e8549c0 | ||
|
|
66f5269a23 | ||
|
|
ebec293497 | ||
|
|
e02ceecd35 | ||
|
|
c8b33a8cc3 | ||
|
|
dca8d5ded8 | ||
|
|
2a7fd1e897 | ||
|
|
b9d1e70ac2 | ||
|
|
fdf5720217 | ||
|
|
f40bd0cd51 | ||
|
|
e33676bb87 | ||
|
|
2a663d5cba | ||
|
|
750b930679 | ||
|
|
3902fd7501 | ||
|
|
4fc3d5e935 | ||
|
|
2d2f4572a7 | ||
|
|
8f4c46f38d | ||
|
|
b6ba51bc2a | ||
|
|
6a66d32d37 | ||
|
|
8d15723195 | ||
|
|
736e0aae86 | ||
|
|
8bf3305b2b | ||
|
|
d00e3ea973 | ||
|
|
89db4e9481 | ||
|
|
e332419081 | ||
|
|
e998b1229a | ||
|
|
bbed134bd1 | ||
|
|
47b9503112 | ||
|
|
3b9253c2be | ||
|
|
d241359153 | ||
|
|
f4d4249ba5 | ||
|
|
cb56cb250e | ||
|
|
e0381a6ae0 | ||
|
|
2c01b2ef64 | ||
|
|
e947266743 | ||
|
|
c6b0e85b54 | ||
|
|
26efbed05c | ||
|
|
96340bf136 | ||
|
|
b055e00c1a | ||
|
|
414db44c00 | ||
|
|
857c880f99 | ||
|
|
ce7474d953 | ||
|
|
70fdd70b84 | ||
|
|
08ab6a7d77 | ||
|
|
9fa2a7e9df | ||
|
|
d443c86620 | ||
|
|
7be3f1c36c | ||
|
|
f6ab6d97b9 | ||
|
|
bc866bac49 | ||
|
|
50e6d845f4 | ||
|
|
a8cb01819d | ||
|
|
530273906b | ||
|
|
06ddf575d9 | ||
|
|
3099114cbb | ||
|
|
44b63f0767 | ||
|
|
6705d20194 | ||
|
|
a38a9c0b0f | ||
|
|
8286caa366 | ||
|
|
bd1ec8424d | ||
|
|
225e2c6797 | ||
|
|
d8fc485513 | ||
|
|
f137eb0ac4 | ||
|
|
f39a460487 | ||
|
|
ee171bc563 | ||
|
|
a95428f204 | ||
|
|
cb3bdffb43 | ||
|
|
48f19aab51 | ||
|
|
48f6d7abdf | ||
|
|
79fbcb3ec4 | ||
|
|
0e4148b229 | ||
|
|
3ca5fb1046 | ||
|
|
a091d12f4e | ||
|
|
457924828a | ||
|
|
aca2ef6359 | ||
|
|
ade7194792 | ||
|
|
3a436e116a | ||
|
|
336867853b | ||
|
|
6403ff4ec4 | ||
|
|
d222469b44 | ||
|
|
7646a2b877 | ||
|
|
62090f2568 | ||
|
|
c281f4cbaf | ||
|
|
09455f9e85 | ||
|
|
c8e72ba0dc | ||
|
|
375ef252ab | ||
|
|
ee552f8720 | ||
|
|
2e88c4858e | ||
|
|
3f50da85c1 | ||
|
|
8be06255f7 | ||
|
|
72274099aa | ||
|
|
dcae098e23 | ||
|
|
2eb05ec640 | ||
|
|
3ce0d76aa4 | ||
|
|
a00b79d9be | ||
|
|
33e53a2a56 | ||
|
|
cd5b80785f | ||
|
|
54f71aa273 | ||
|
|
3f949b7f84 | ||
|
|
443c4538bb | ||
|
|
a7fc2ee4cf | ||
|
|
8e749ac22d | ||
|
|
69e09d9bc7 | ||
|
|
06ad527e8c | ||
|
|
b7409dd2de | ||
|
|
5ba325a8fc | ||
|
|
d502840f91 | ||
|
|
99238a4b59 | ||
|
|
6d43a2ff9a | ||
|
|
3faa1ca9af | ||
|
|
9d975e0375 | ||
|
|
2a6d8b78d4 | ||
|
|
671558a822 | ||
|
|
26fbb77901 | ||
|
|
a277302262 | ||
|
|
969c1a5b72 | ||
|
|
872339bceb | ||
|
|
5dc0dbc7aa | ||
|
|
2b7ba54a2f | ||
|
|
007c3304f2 | ||
|
|
e76ba0ede9 | ||
|
|
c06ac07e23 | ||
|
|
66769ec657 | ||
|
|
f413feec61 | ||
|
|
2e538e3486 | ||
|
|
9617a7b0d6 | ||
|
|
7569320770 | ||
|
|
8d25cf0d75 | ||
|
|
64e85e7019 | ||
|
|
6d1e20e940 | ||
|
|
0c0aae1eac | ||
|
|
5dcf7cb846 | ||
|
|
e52b542e22 | ||
|
|
8f6abb8a86 | ||
|
|
ed8eaae964 | ||
|
|
4e572ec8b9 | ||
|
|
24bc9cba67 | ||
|
|
1084b53fba | ||
|
|
83b90e106f | ||
|
|
5106caf641 | ||
|
|
b84ccc6e7a | ||
|
|
e19ddb53e7 | ||
|
|
5bf89dd757 | ||
|
|
2a0100b2d6 | ||
|
|
4442574e53 | ||
|
|
c020fa60d0 | ||
|
|
b078be4613 | ||
|
|
71a6dffbb6 | ||
|
|
27b43ed63f | ||
|
|
f6a3a1d0ba | ||
|
|
830fd8eac2 | ||
|
|
a86d501dc2 | ||
|
|
24e8e20b59 | ||
|
|
a87f09bad2 | ||
|
|
dbcbe48ead | ||
|
|
63908869f6 | ||
|
|
f6d625114c | ||
|
|
7dc40ba6d4 | ||
|
|
fcd6475377 | ||
|
|
4070c9de81 | ||
|
|
1e9e4a86a2 | ||
|
|
406a27271a | ||
|
|
9f9a4fc2af | ||
|
|
3fc410a253 | ||
|
|
781bc1521b | ||
|
|
05d201ece8 | ||
|
|
cd0c94f48a | ||
|
|
453e744abf | ||
|
|
653439698e | ||
|
|
24970baa57 | ||
|
|
89254cfc97 | ||
|
|
6bd9a034f7 | ||
|
|
26fc65b051 | ||
|
|
ed5ec5b55c | ||
|
|
df777650ac | ||
|
|
9855615f1e | ||
|
|
93414f1baa | ||
|
|
10f8c795ac | ||
|
|
3e4858a624 | ||
|
|
1231dc9cda | ||
|
|
c84ff42bcd | ||
|
|
8a5db02165 | ||
|
|
d7afb6eb0c | ||
|
|
bbd1fe890a | ||
|
|
f607231efa | ||
|
|
2039062845 | ||
|
|
99478d13a8 | ||
|
|
bc6c4cdbfc | ||
|
|
69d3a80fc3 | ||
|
|
404546ce93 | ||
|
|
9e268ad103 | ||
|
|
6dd1cf1dd6 | ||
|
|
9058d406a3 | ||
|
|
9d9b9e7a0d | ||
|
|
13aa82f3f3 | ||
|
|
05e55d7dc5 | ||
|
|
1b358c931c | ||
|
|
e04b02113a | ||
|
|
3275494fde | ||
|
|
ca09db21ff | ||
|
|
c1f8211acb | ||
|
|
718ff7a73f | ||
|
|
fa70b220e9 | ||
|
|
98fa2a1597 | ||
|
|
0e7c79ba23 | ||
|
|
b6ba15fcbd | ||
|
|
e44167d7a4 | ||
|
|
1bfa75f780 | ||
|
|
bbcb5552f3 | ||
|
|
31bd90c748 | ||
|
|
1b8cb7b77b | ||
|
|
774f1fbc17 | ||
|
|
cfa8ddb59f | ||
|
|
39597267ae | ||
|
|
393e38f2c0 | ||
|
|
d1220de02d | ||
|
|
13eb5268de | ||
|
|
88798816f2 | ||
|
|
598f0af19b | ||
|
|
a33f5d31fc | ||
|
|
506699fba1 | ||
|
|
68a27772b3 | ||
|
|
de87fb622b | ||
|
|
f27672f6cf | ||
|
|
28420c14e4 | ||
|
|
0bd221ff41 | ||
|
|
5fda6f8ef3 | ||
|
|
9b956f6338 | ||
|
|
09923f654c | ||
|
|
ae7b972649 | ||
|
|
47885e3710 | ||
|
|
4b9a260b37 | ||
|
|
2c743c8f0b | ||
|
|
9f2c278ee6 | ||
|
|
aea337cfe2 | ||
|
|
811f8f8b4f | ||
|
|
27734a23b1 | ||
|
|
1b8e538a77 | ||
|
|
41c2385aca | ||
|
|
d605985f45 | ||
|
|
d52b28b147 | ||
|
|
4afe1f42ca | ||
|
|
7481c0eaa0 | ||
|
|
ffdfad8482 | ||
|
|
6586f08584 | ||
|
|
f49e887fe6 | ||
|
|
a5b3ff11fd | ||
|
|
084558f200 | ||
|
|
b602eae215 | ||
|
|
d02bf9c243 | ||
|
|
26a5f67df2 | ||
|
|
600fd42a83 | ||
|
|
670685139a | ||
|
|
52b6306388 | ||
|
|
521ec6f1b8 | ||
|
|
b0c5d9640a | ||
|
|
ef8e94e992 | ||
|
|
9df96a4bb4 | ||
|
|
28a428ae2f | ||
|
|
b326ec3641 | ||
|
|
fcecbc7d46 | ||
|
|
f4007f53ba | ||
|
|
5a812a1e93 | ||
|
|
5e624cc7b1 | ||
|
|
3af24597ee | ||
|
|
0b834fcb54 | ||
|
|
e0be6c5786 | ||
|
|
88b101ebf5 | ||
|
|
d9a65745df | ||
|
|
97ab623d42 | ||
|
|
14aa6cc7e8 | ||
|
|
3bc489254b | ||
|
|
4c07ea41c3 | ||
|
|
f6720f8dfa | ||
|
|
e19ab3a066 | ||
|
|
8f1dd69e72 | ||
|
|
f26da24a2f | ||
|
|
8e4fbcaa7d | ||
|
|
09c339953d | ||
|
|
367a05bdf6 | ||
|
|
d20b71deb9 | ||
|
|
712ce9f781 | ||
|
|
a4a3274a55 | ||
|
|
716aa71f6e | ||
|
|
e8976f9898 | ||
|
|
8496cc2444 | ||
|
|
5ef2d59e05 | ||
|
|
07bb89ae80 | ||
|
|
27a5ad8ec2 | ||
|
|
707b07c5f5 | ||
|
|
4a764afd76 | ||
|
|
ecf49d574b | ||
|
|
5a75ef8ffd | ||
|
|
07279f8746 | ||
|
|
71f788b13a | ||
|
|
59c62dc580 | ||
|
|
d5310a3300 | ||
|
|
f0a3eb574e | ||
|
|
bb15855443 | ||
|
|
14ce6aebd1 | ||
|
|
2fe83723f2 | ||
|
|
cd8c86c6fb | ||
|
|
52d5fd1a67 | ||
|
|
b6ad243e9e | ||
|
|
660aabc437 | ||
|
|
566120e8d5 | ||
|
|
f3f0f1717d | ||
|
|
7621ec609e | ||
|
|
9f511f0024 | ||
|
|
374faa2640 | ||
|
|
1c52a89535 | ||
|
|
e7cedbee6e | ||
|
|
b8194e717c | ||
|
|
15c3cc3a50 | ||
|
|
d131435e25 | ||
|
|
6e43669498 | ||
|
|
5ab3032335 | ||
|
|
1215c635a0 | ||
|
|
fc054db51a | ||
|
|
6e2306a5f2 | ||
|
|
b09e2115d1 | ||
|
|
a68c97a40f | ||
|
|
cd2da152d4 | ||
|
|
bb6312b4fc | ||
|
|
3c315551b0 | ||
|
|
27c9c5c4da | ||
|
|
fc9f6c974a | ||
|
|
a74ee3f319 | ||
|
|
564bcbaa54 | ||
|
|
88bdd25f06 | ||
|
|
e79f65fd8e | ||
|
|
2760989401 | ||
|
|
facfe7c518 | ||
|
|
6285459c08 | ||
|
|
21bbceca0c | ||
|
|
f6300c72b7 | ||
|
|
007572b58e | ||
|
|
3a81ab22fd | ||
|
|
519da2e042 | ||
|
|
169f4295d0 | ||
|
|
d06d0eab2f | ||
|
|
3ffd120ae9 | ||
|
|
a03d514095 | ||
|
|
07d21463ca | ||
|
|
1da03bfe15 | ||
|
|
423ce97665 | ||
|
|
e717939edb | ||
|
|
76c563d161 | ||
|
|
a89514951f | ||
|
|
94d61c7b2b | ||
|
|
1249b07eb8 | ||
|
|
6b37f33d31 | ||
|
|
f25f419e5a | ||
|
|
b7e382008f | ||
|
|
70d6b95097 | ||
|
|
9b202b6c1c | ||
|
|
6a66b6801a | ||
|
|
5b6d201408 | ||
|
|
5ec9b5e5a9 | ||
|
|
5db3b58717 | ||
|
|
347769b3e3 | ||
|
|
3cfe7008a2 | ||
|
|
da23ddb061 | ||
|
|
39b6b3b289 | ||
|
|
c600519fa4 | ||
|
|
e5312fb5a2 | ||
|
|
92df0cada9 | ||
|
|
96b55acff8 | ||
|
|
bb45fee1cf | ||
|
|
af00304b0c | ||
|
|
5c3a013cd1 | ||
|
|
6ad188921c | ||
|
|
15ed98d6a9 | ||
|
|
a283545b6b | ||
|
|
3efbd865a8 | ||
|
|
aee659fb66 | ||
|
|
5aa386d8b9 | ||
|
|
0adc0ee6aa | ||
|
|
92f13fc316 | ||
|
|
05cfa16e5f | ||
|
|
93a6e2d920 | ||
|
|
de77903915 | ||
|
|
56ed0d8d90 | ||
|
|
42e818ce05 | ||
|
|
2d4c54ba54 | ||
|
|
e9eb4db8bb | ||
|
|
d26ed069fa | ||
|
|
afcab5efda | ||
|
|
6cf1d8a947 | ||
|
|
a174d015f2 | ||
|
|
9c09128e00 | ||
|
|
549c0c2c5a | ||
|
|
f092801b61 | ||
|
|
1b638b3629 | ||
|
|
6f5f81753d | ||
|
|
76af454034 | ||
|
|
e54d2f6b2a | ||
|
|
bfc738b76a | ||
|
|
396899a530 | ||
|
|
f383840cf9 | ||
|
|
fd29ab418a | ||
|
|
7a628426dc | ||
|
|
56b4d7a76e | ||
|
|
b211c3546d | ||
|
|
edc654edf9 | ||
|
|
08586334af | ||
|
|
7ea14479fb | ||
|
|
54af96d321 | ||
|
|
22579155c5 | ||
|
|
c04c3832a4 | ||
|
|
5ffbd54755 | ||
|
|
5d12d4ce33 | ||
|
|
0ebabf5152 | ||
|
|
d7564173dd | ||
|
|
c44c46dd80 | ||
|
|
412148af0e | ||
|
|
d28258501a | ||
|
|
55cd31fb96 | ||
|
|
c5df8e7897 | ||
|
|
d4d529833d | ||
|
|
caa48e7c6f | ||
|
|
acdfb3bceb | ||
|
|
89d68962b1 | ||
|
|
361443db10 | ||
|
|
d6352dd4d4 | ||
|
|
a7eeb06f3d | ||
|
|
9426be7a5c | ||
|
|
4a135f1986 | ||
|
|
c4c02f4ad0 | ||
|
|
b87b9b455f | ||
|
|
db03ae9663 | ||
|
|
969ff6bb68 | ||
|
|
bceecfb2e3 | ||
|
|
6a2906e3e5 | ||
|
|
d72886c801 | ||
|
|
6efba3d829 | ||
|
|
897c40bed8 | ||
|
|
373ea8d7e4 | ||
|
|
b5de004c01 | ||
|
|
94ec772521 | ||
|
|
e216d26731 | ||
|
|
6eb94dac33 | ||
|
|
c4a5be6edf | ||
|
|
651179a642 | ||
|
|
8c42b21e66 | ||
|
|
b693d632d2 | ||
|
|
b5033c22d8 | ||
|
|
df0fd1add1 | ||
|
|
b6bdbe78ef | ||
|
|
06c0d2bab2 | ||
|
|
bd1678457b | ||
|
|
559b7df404 | ||
|
|
8b13c91132 | ||
|
|
e93f87294a | ||
|
|
a67b6811d1 | ||
|
|
35fdc4cfd3 | ||
|
|
3ebbab0a9a | ||
|
|
480cd714b2 | ||
|
|
41ee44432d | ||
|
|
1434bc38e5 | ||
|
|
0fd2abbc3b | ||
|
|
0ebb654019 | ||
|
|
08a1d2edf9 | ||
|
|
3409f4e336 | ||
|
|
9354b87e54 | ||
|
|
54e24110ec | ||
|
|
717c703bff | ||
|
|
1c6f4be8ae | ||
|
|
0de2560cee | ||
|
|
85eb926482 | ||
|
|
c52ef08e67 | ||
|
|
cb580cd083 | ||
|
|
75e278c7a5 | ||
|
|
73208c4e55 | ||
|
|
32d3809f8c | ||
|
|
a748e93fd9 | ||
|
|
54a9c4c3c7 | ||
|
|
18b5c35dea | ||
|
|
7b7871ede2 | ||
|
|
c4e3646b75 | ||
|
|
022aa81be1 | ||
|
|
c43f0ea7b1 | ||
|
|
6a191358af | ||
|
|
db1119dd78 | ||
|
|
33a5656235 | ||
|
|
2cd59806e2 | ||
|
|
5983e3ec87 | ||
|
|
f8cebb9343 | ||
|
|
72c7ef7647 | ||
|
|
d2e4639b2a | ||
|
|
08321223c4 | ||
|
|
7e30157590 | ||
|
|
e73cdf5cff | ||
|
|
39621a0340 | ||
|
|
346b663079 | ||
|
|
0bcae68c6c | ||
|
|
c8cee547fd | ||
|
|
36755421fe | ||
|
|
6c17dbc4da | ||
|
|
ee6429cc75 | ||
|
|
a4a26d978e | ||
|
|
ed9f6e897e | ||
|
|
9c1e3c0687 | ||
|
|
2e5681ea32 | ||
|
|
52c17f03a5 | ||
|
|
d0e694d4ed | ||
|
|
506f1117dd | ||
|
|
113db3c5bf | ||
|
|
1aa0b6cd11 | ||
|
|
0895533400 | ||
|
|
43f007c234 | ||
|
|
0ceee56d99 | ||
|
|
943a8c74df | ||
|
|
0a47b452e9 | ||
|
|
261f08a82a | ||
|
|
d114d8d0bd | ||
|
|
bb9955e461 | ||
|
|
7063a176f4 | ||
|
|
e3082887a6 | ||
|
|
ddb0c0ec1c | ||
|
|
d1736cb29c | ||
|
|
62bfd62871 | ||
|
|
257621c5ed | ||
|
|
ac064389ca | ||
|
|
8d23ffc873 | ||
|
|
4307f08bbc | ||
|
|
9d50a68768 | ||
|
|
7c3c24addc | ||
|
|
166fa9e2e6 | ||
|
|
88e566281e | ||
|
|
d32bb9db6b | ||
|
|
8356b35320 | ||
|
|
19a048879c | ||
|
|
1061354b2f | ||
|
|
46b4110ff3 | ||
|
|
c29931e093 | ||
|
|
b05cfd9f84 | ||
|
|
8ce22b8403 | ||
|
|
d1cdedc4d1 | ||
|
|
d291eb9489 | ||
|
|
dc8d3201e1 | ||
|
|
7757210af6 | ||
|
|
cbf9a57135 | ||
|
|
c1031e2d3f | ||
|
|
327cc7039e | ||
|
|
b4d15ace91 | ||
|
|
abc2465b29 | ||
|
|
4ba5b43d82 | ||
|
|
27faf718a3 | ||
|
|
2d84d2fb6a | ||
|
|
cbcfeb92cc | ||
|
|
db81331ae8 | ||
|
|
93fa1d1802 | ||
|
|
b70bfd8092 | ||
|
|
9ff38dd785 | ||
|
|
98596c0a3f | ||
|
|
670ce2e528 | ||
|
|
3f4f8b3b2d | ||
|
|
371324c090 | ||
|
|
d50b0f7524 | ||
|
|
a6cb16bb48 | ||
|
|
70ee4e0aa0 | ||
|
|
03334f8bb4 | ||
|
|
5a2bebccfa | ||
|
|
0586da9c2b | ||
|
|
3d8d02bfc3 | ||
|
|
7ae00320dc | ||
|
|
1fb96f5379 | ||
|
|
897d108e4c | ||
|
|
72d82268e5 | ||
|
|
8193392bfe | ||
|
|
9ad0f3f91e | ||
|
|
618511ff67 | ||
|
|
0ff094b87f | ||
|
|
ed23472d94 | ||
|
|
ede4471b84 | ||
|
|
6a3de3a89c | ||
|
|
782bba0bc4 | ||
|
|
bf116b68f8 | ||
|
|
cc3cf09c00 | ||
|
|
9acfbcc2a0 | ||
|
|
b285b07986 | ||
|
|
c40e00526b | ||
|
|
8a33f3ef69 | ||
|
|
7a8e00fcea | ||
|
|
89771216a1 | ||
|
|
14ddfd4b79 | ||
|
|
567227f35f | ||
|
|
17016ae6a5 | ||
|
|
01b7b60901 | ||
|
|
b52a5cc066 | ||
|
|
1ba057112a | ||
|
|
23a7633e6d | ||
|
|
e5e985978d | ||
|
|
db2d22c978 | ||
|
|
1c815c58a6 | ||
|
|
4eab141410 | ||
|
|
5937b8e429 | ||
|
|
9875565339 | ||
|
|
faa483b57d | ||
|
|
f0711be302 | ||
|
|
1d0f0301b4 | ||
|
|
c73b3fa43b | ||
|
|
772fa69515 | ||
|
|
1ccb01631d | ||
|
|
1ede1347fa | ||
|
|
cfbaed0e90 | ||
|
|
cf9b9be7ea | ||
|
|
aa57f3237a | ||
|
|
fcd98f4f9b | ||
|
|
75b57bc112 | ||
|
|
a7d2f669e7 | ||
|
|
ce569ab36e | ||
|
|
d0aa741d59 | ||
|
|
592f6fc66b | ||
|
|
09ecba6dab | ||
|
|
d6bd6f3fb9 | ||
|
|
92f4278039 | ||
|
|
8ae8a5c296 | ||
|
|
dc804e96fb | ||
|
|
ab76cb3662 | ||
|
|
2965bdadc1 | ||
|
|
40f7061b04 | ||
|
|
8c947cafbe | ||
|
|
717eadf128 | ||
|
|
9e105738fd | ||
|
|
5d806fcefc | ||
|
|
6ae1dd78ed | ||
|
|
43095de162 | ||
|
|
ef7e8206d3 | ||
|
|
87291c0d75 | ||
|
|
51d2766d5c | ||
|
|
a00ba77604 | ||
|
|
3264605c2d | ||
|
|
cfb9cb8951 | ||
|
|
bb00436509 | ||
|
|
1afbc4dd96 | ||
|
|
d745f07044 | ||
|
|
695eaa5450 | ||
|
|
67ad26c35a | ||
|
|
30d448e73c | ||
|
|
d4064e3df4 | ||
|
|
ec354f7a1a | ||
|
|
240e782606 | ||
|
|
fcb0293c0d | ||
|
|
682c4598ee | ||
|
|
a7d105bd69 | ||
|
|
b9eef45305 | ||
|
|
c8f20a66a8 | ||
|
|
1f6a384c9a | ||
|
|
c9fc033cf5 | ||
|
|
32c964d310 | ||
|
|
d60040b222 | ||
|
|
3ce1b4159b | ||
|
|
7516ac4ce7 | ||
|
|
2a73d8c4a3 | ||
|
|
a318dff8b0 | ||
|
|
4a159d5bf5 | ||
|
|
734b040a48 | ||
|
|
10be026ace | ||
|
|
848a620568 | ||
|
|
e18e288fda | ||
|
|
38cfbac8f0 | ||
|
|
5be4d22b9b | ||
|
|
64774a5786 | ||
|
|
16b0a561d7 | ||
|
|
21dde0e352 | ||
|
|
b040a43b81 | ||
|
|
bccefb2905 | ||
|
|
b26ec8162d | ||
|
|
ee0a91f539 | ||
|
|
89b0d53a09 | ||
|
|
fd2b23592e | ||
|
|
4d0804687c | ||
|
|
2021ae3891 | ||
|
|
4883349795 | ||
|
|
5c65938113 | ||
|
|
16be3f0a12 | ||
|
|
7c1c4ee60b | ||
|
|
96c7271448 | ||
|
|
07da781336 | ||
|
|
a53c84d0d1 | ||
|
|
a517290726 | ||
|
|
af3fbd134d | ||
|
|
2f477df97e | ||
|
|
3e7b645346 | ||
|
|
24446a4dc4 | ||
|
|
475f473dab | ||
|
|
8dba32a077 | ||
|
|
1bbbd16df6 | ||
|
|
5cb378256b | ||
|
|
3ac5f05e8c | ||
|
|
58d30369b4 | ||
|
|
7dd93a4a25 | ||
|
|
2a3ee8d0e3 | ||
|
|
41577bce07 | ||
|
|
3d7aca22c0 | ||
|
|
680b3f5010 | ||
|
|
9d42e4b239 | ||
|
|
97af785aad | ||
|
|
0defb68c6c | ||
|
|
d6272d3300 | ||
|
|
c99d0dfb33 | ||
|
|
663b9b35ab | ||
|
|
5dced4c0a6 | ||
|
|
5891785125 | ||
|
|
ac3d47e8c0 | ||
|
|
e5ed2cba4a | ||
|
|
847c2502a5 | ||
|
|
c7196ba7dc | ||
|
|
6f9c23af5e | ||
|
|
2d5d06c809 | ||
|
|
3e20b00357 | ||
|
|
e370f86f63 | ||
|
|
7f266aa19e | ||
|
|
f3f31274e8 | ||
|
|
7061cd6058 | ||
|
|
5da5674ae2 | ||
|
|
7459c2c81a | ||
|
|
cd4706f60e | ||
|
|
359b8de44e | ||
|
|
ea6065f1b1 | ||
|
|
8aaed4cf09 | ||
|
|
c32e013605 | ||
|
|
3839d93ba0 | ||
|
|
a552a45b81 | ||
|
|
f6cf784cd1 | ||
|
|
e783923464 | ||
|
|
e6d7677373 | ||
|
|
d225558dae | ||
|
|
9678be7aa4 | ||
|
|
243bf5c108 | ||
|
|
3569e5779a | ||
|
|
20985d1a10 | ||
|
|
67f553806b | ||
|
|
29044312a4 | ||
|
|
5b3fc092ee | ||
|
|
792e8d09d7 | ||
|
|
eadccb229f | ||
|
|
fed6f3ecd7 | ||
|
|
f8dcd707a6 | ||
|
|
0e91e95287 | ||
|
|
c5dcbc1c1a | ||
|
|
4504ba5329 | ||
|
|
d16599fa1d | ||
|
|
674393ec12 | ||
|
|
9f45806106 | ||
|
|
307ae76ed4 | ||
|
|
735b21394c | ||
|
|
9cdef937af | ||
|
|
3dd0844b98 | ||
|
|
4477c729a4 | ||
|
|
0d89a22aa0 | ||
|
|
9319602812 | ||
|
|
8e95c5e0a8 | ||
|
|
93f0e65cef | ||
|
|
c75e524fe5 | ||
|
|
f58d0faf8c | ||
|
|
df3b00621a | ||
|
|
72cb2689e8 | ||
|
|
ade279d1f2 | ||
|
|
9c5ac2927a | ||
|
|
7980f055fa | ||
|
|
eb2549a782 | ||
|
|
c419264a70 | ||
|
|
6b23e2da74 | ||
|
|
5ab0854b5b | ||
|
|
15981aa412 | ||
|
|
ac4f52c532 | ||
|
|
84fa497169 | ||
|
|
b641d90287 | ||
|
|
32d01a6a7c | ||
|
|
9ef76dcc61 | ||
|
|
4576f9915b | ||
|
|
c945e35983 | ||
|
|
1cd275f4c1 | ||
|
|
4bc1ed6031 | ||
|
|
78989d6c0d | ||
|
|
d6aa1e5ba0 | ||
|
|
50c1c50dbd | ||
|
|
5123cfd47e | ||
|
|
9072accc43 | ||
|
|
0d8134aabe | ||
|
|
4fdbdf7925 | ||
|
|
50c84485c3 | ||
|
|
f335aeeedb | ||
|
|
32a8102d71 | ||
|
|
61f6a612e3 | ||
|
|
42087d5387 | ||
|
|
f2710c03ab | ||
|
|
39abde2413 | ||
|
|
0aa8706ef7 | ||
|
|
5fd4a8b974 | ||
|
|
06e6f0a5f2 | ||
|
|
80f6d6fe7c | ||
|
|
3be6175aec | ||
|
|
599986495b | ||
|
|
cb83985cc7 | ||
|
|
6ec028808f | ||
|
|
71faa19bb4 | ||
|
|
b5ad978d44 | ||
|
|
0508c9fbce | ||
|
|
92bb642e98 | ||
|
|
af82855bed | ||
|
|
a83978f769 | ||
|
|
2513d908be | ||
|
|
4c033b3af7 | ||
|
|
843a81f68d | ||
|
|
f6e713ab6b | ||
|
|
1834c65116 | ||
|
|
fc6aa8ef77 | ||
|
|
c3f88126e6 | ||
|
|
b895018ff5 | ||
|
|
9c6832cc22 | ||
|
|
1ada33ab1d | ||
|
|
78738ca3f0 | ||
|
|
ac01c74c02 | ||
|
|
02e28bbbe9 | ||
|
|
b9c7b9eea5 | ||
|
|
57195fa0f5 | ||
|
|
11f090c223 | ||
|
|
829dd06b42 | ||
|
|
20787cd107 | ||
|
|
1aa568ce45 | ||
|
|
b2cdbbdd47 | ||
|
|
8056af42a3 | ||
|
|
01be94a0de | ||
|
|
d1933075c3 | ||
|
|
a602ae859b | ||
|
|
c5d7137d66 | ||
|
|
d45ebff66b | ||
|
|
d6f671250e | ||
|
|
6d822cf309 | ||
|
|
d03a75dba5 | ||
|
|
9ff21b67a8 | ||
|
|
5546c9d872 | ||
|
|
fb760718e2 | ||
|
|
d6721e4e75 | ||
|
|
514f5a8ad4 | ||
|
|
a68e0dd8aa | ||
|
|
75d7763c5c | ||
|
|
9bb7df7af7 | ||
|
|
43665cb649 | ||
|
|
39337627b9 | ||
|
|
4bc8a52771 | ||
|
|
b727e4e12e | ||
|
|
93588919e5 | ||
|
|
31659c790d | ||
|
|
c62ecc2442 | ||
|
|
b1fee5d266 | ||
|
|
4a10cfacc3 | ||
|
|
bbdd68a8b4 | ||
|
|
ac3ecd567c | ||
|
|
4fd70d5f1a | ||
|
|
49c52a01b0 | ||
|
|
389c8ecef1 | ||
|
|
f1f24f542a | ||
|
|
8ca041cfcf | ||
|
|
eac8b1a27f | ||
|
|
c8029b7166 | ||
|
|
64f4c18fea | ||
|
|
9abcaf177f | ||
|
|
b839e351c4 | ||
|
|
6b413a299b | ||
|
|
4657c98821 | ||
|
|
dd1e0da155 | ||
|
|
cf5476eb23 | ||
|
|
cf9a748159 | ||
|
|
2e328dd462 | ||
|
|
edd4b4d97f | ||
|
|
608d745159 | ||
|
|
fd795caf76 | ||
|
|
9e2d76f3ce | ||
|
|
ae646fba4b | ||
|
|
2eef6875e9 | ||
|
|
12c09f1a46 | ||
|
|
4a31f763af | ||
|
|
6629cadb87 | ||
|
|
41975c9e2b | ||
|
|
c589c0d998 | ||
|
|
7c157d6ab1 | ||
|
|
7c642bee09 | ||
|
|
beba2a7aa0 | ||
|
|
f2201dabfa | ||
|
|
108dcb7f70 | ||
|
|
8858e07d8b | ||
|
|
d33a89b89f | ||
|
|
1d70336a91 | ||
|
|
6080527e9e | ||
|
|
82187bffba | ||
|
|
f4977e5ef6 | ||
|
|
832268cae7 | ||
|
|
f6de2a709f | ||
|
|
de796ac1c2 | ||
|
|
6b5aefc27a | ||
|
|
5010b09329 | ||
|
|
368fd27393 | ||
|
|
b2ca49376c | ||
|
|
6d98a71796 | ||
|
|
1c91823308 | ||
|
|
352a67857b | ||
|
|
644a3ad220 | ||
|
|
19c32f58b2 | ||
|
|
d01c4904ff | ||
|
|
8cfa2282ef | ||
|
|
8e88a61021 | ||
|
|
ad4d045101 | ||
|
|
5888e04654 | ||
|
|
19b10cb894 | ||
|
|
aa25820698 | ||
|
|
9e3b84939f | ||
|
|
1dbb930660 | ||
|
|
6557d9b728 | ||
|
|
250628dae3 | ||
|
|
da72ac1f6d | ||
|
|
f9a170a3c4 | ||
|
|
88f06fc305 | ||
|
|
562a49a194 | ||
|
|
6136a77eb3 | ||
|
|
afff9216ea | ||
|
|
b56edd4db0 | ||
|
|
d512f20c56 | ||
|
|
57c9ba49f4 | ||
|
|
40255b128e | ||
|
|
6524d3a51e | ||
|
|
92c8cd7c72 | ||
|
|
c678ca21d5 | ||
|
|
6d4b43dd7a | ||
|
|
b0f2ad7cfe | ||
|
|
cd0b1be46c | ||
|
|
08856a97fb | ||
|
|
b6d5ce2d4d | ||
|
|
0f55e550cf | ||
|
|
e1de04230f | ||
|
|
a887a337a5 | ||
|
|
2717ba3e50 | ||
|
|
63af4c551d | ||
|
|
c675cf5e72 | ||
|
|
4fd95ead3b | ||
|
|
514add4b85 | ||
|
|
3ca01b60a5 | ||
|
|
39e398ae02 | ||
|
|
9bbe64489f | ||
|
|
7e54156f2f | ||
|
|
9b80820b17 | ||
|
|
e836b4ac10 | ||
|
|
f228a4dcca | ||
|
|
3297f75edd | ||
|
|
25ba042493 | ||
|
|
483229779c | ||
|
|
5a50856fc1 | ||
|
|
cf734f7e7b | ||
|
|
72325f792c | ||
|
|
9761ac5045 | ||
|
|
8fa52e9d31 | ||
|
|
80b6a95eba | ||
|
|
96cebd2a35 | ||
|
|
fc103f6c17 | ||
|
|
a45d2109f3 | ||
|
|
7a30e65175 | ||
|
|
c63dc7fe2f | ||
|
|
830b51c75b | ||
|
|
cc8c46d5de | ||
|
|
a4767fdd8e | ||
|
|
2a274d4a08 | ||
|
|
2175a10932 | ||
|
|
20f3e62529 | ||
|
|
7f2e2fee56 | ||
|
|
9810834f20 | ||
|
|
0d4cb9e9fb |
@@ -13,13 +13,8 @@ Dockerfile
|
||||
docs/*
|
||||
README.md
|
||||
README_CN.md
|
||||
MANAGEMENT_API.md
|
||||
MANAGEMENT_API_CN.md
|
||||
LICENSE
|
||||
|
||||
# Example configuration
|
||||
config.example.yaml
|
||||
|
||||
# Runtime data folders (should be mounted as volumes)
|
||||
auths/*
|
||||
logs/*
|
||||
@@ -28,6 +23,14 @@ config.yaml
|
||||
|
||||
# Development/editor
|
||||
bin/*
|
||||
.claude/*
|
||||
.vscode/*
|
||||
.claude/*
|
||||
.codex/*
|
||||
.gemini/*
|
||||
.serena/*
|
||||
.agent/*
|
||||
.agents/*
|
||||
.opencode/*
|
||||
.bmad/*
|
||||
_bmad/*
|
||||
_bmad-output/*
|
||||
|
||||
34
.env.example
Normal file
34
.env.example
Normal file
@@ -0,0 +1,34 @@
|
||||
# Example environment configuration for CLIProxyAPI.
|
||||
# Copy this file to `.env` and uncomment the variables you need.
|
||||
#
|
||||
# NOTE: Environment variables are only required when using remote storage options.
|
||||
# For local file-based storage (default), no environment variables need to be set.
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Management Web UI
|
||||
# ------------------------------------------------------------------------------
|
||||
# MANAGEMENT_PASSWORD=change-me-to-a-strong-password
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Postgres Token Store (optional)
|
||||
# ------------------------------------------------------------------------------
|
||||
# PGSTORE_DSN=postgresql://user:pass@localhost:5432/cliproxy
|
||||
# PGSTORE_SCHEMA=public
|
||||
# PGSTORE_LOCAL_PATH=/var/lib/cliproxy
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Git-Backed Config Store (optional)
|
||||
# ------------------------------------------------------------------------------
|
||||
# GITSTORE_GIT_URL=https://github.com/your-org/cli-proxy-config.git
|
||||
# GITSTORE_GIT_USERNAME=git-user
|
||||
# GITSTORE_GIT_TOKEN=ghp_your_personal_access_token
|
||||
# GITSTORE_LOCAL_PATH=/data/cliproxy/gitstore
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Object Store Token Store (optional)
|
||||
# ------------------------------------------------------------------------------
|
||||
# OBJECTSTORE_ENDPOINT=https://s3.your-cloud.example.com
|
||||
# OBJECTSTORE_BUCKET=cli-proxy-config
|
||||
# OBJECTSTORE_ACCESS_KEY=your_access_key
|
||||
# OBJECTSTORE_SECRET_KEY=your_secret_key
|
||||
# OBJECTSTORE_LOCAL_PATH=/data/cliproxy/objectstore
|
||||
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
github: [router-for-me]
|
||||
7
.github/ISSUE_TEMPLATE/bug_report.md
vendored
7
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -7,6 +7,13 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is it a request payload issue?**
|
||||
[ ] Yes, this is a request payload issue. I am using a client/cURL to send a request payload, but I received an unexpected error.
|
||||
[ ] No, it's another issue.
|
||||
|
||||
**If it's a request payload issue, you MUST know**
|
||||
Our team doesn't have any GODs or ORACLEs or MIND READERs. Please make sure to attach the request log or curl payload.
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
|
||||
111
.github/workflows/docker-image.yml
vendored
111
.github/workflows/docker-image.yml
vendored
@@ -10,13 +10,11 @@ env:
|
||||
DOCKERHUB_REPO: eceasy/cli-proxy-api
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
docker_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to DockerHub
|
||||
@@ -29,18 +27,113 @@ jobs:
|
||||
echo VERSION=`git describe --tags --always --dirty` >> $GITHUB_ENV
|
||||
echo COMMIT=`git rev-parse --short HEAD` >> $GITHUB_ENV
|
||||
echo BUILD_DATE=`date -u +%Y-%m-%dT%H:%M:%SZ` >> $GITHUB_ENV
|
||||
- name: Build and push
|
||||
- name: Build and push (amd64)
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
build-args: |
|
||||
VERSION=${{ env.VERSION }}
|
||||
COMMIT=${{ env.COMMIT }}
|
||||
BUILD_DATE=${{ env.BUILD_DATE }}
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_REPO }}:latest
|
||||
${{ env.DOCKERHUB_REPO }}:${{ env.VERSION }}
|
||||
${{ env.DOCKERHUB_REPO }}:latest-amd64
|
||||
${{ env.DOCKERHUB_REPO }}:${{ env.VERSION }}-amd64
|
||||
|
||||
docker_arm64:
|
||||
runs-on: ubuntu-24.04-arm
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Generate Build Metadata
|
||||
run: |
|
||||
echo VERSION=`git describe --tags --always --dirty` >> $GITHUB_ENV
|
||||
echo COMMIT=`git rev-parse --short HEAD` >> $GITHUB_ENV
|
||||
echo BUILD_DATE=`date -u +%Y-%m-%dT%H:%M:%SZ` >> $GITHUB_ENV
|
||||
- name: Build and push (arm64)
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/arm64
|
||||
push: true
|
||||
build-args: |
|
||||
VERSION=${{ env.VERSION }}
|
||||
COMMIT=${{ env.COMMIT }}
|
||||
BUILD_DATE=${{ env.BUILD_DATE }}
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_REPO }}:latest-arm64
|
||||
${{ env.DOCKERHUB_REPO }}:${{ env.VERSION }}-arm64
|
||||
|
||||
docker_manifest:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- docker_amd64
|
||||
- docker_arm64
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Generate Build Metadata
|
||||
run: |
|
||||
echo VERSION=`git describe --tags --always --dirty` >> $GITHUB_ENV
|
||||
echo COMMIT=`git rev-parse --short HEAD` >> $GITHUB_ENV
|
||||
echo BUILD_DATE=`date -u +%Y-%m-%dT%H:%M:%SZ` >> $GITHUB_ENV
|
||||
- name: Create and push multi-arch manifests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
--tag "${DOCKERHUB_REPO}:latest" \
|
||||
"${DOCKERHUB_REPO}:latest-amd64" \
|
||||
"${DOCKERHUB_REPO}:latest-arm64"
|
||||
docker buildx imagetools create \
|
||||
--tag "${DOCKERHUB_REPO}:${VERSION}" \
|
||||
"${DOCKERHUB_REPO}:${VERSION}-amd64" \
|
||||
"${DOCKERHUB_REPO}:${VERSION}-arm64"
|
||||
- name: Cleanup temporary tags
|
||||
continue-on-error: true
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
namespace="${DOCKERHUB_REPO%%/*}"
|
||||
repo_name="${DOCKERHUB_REPO#*/}"
|
||||
|
||||
token="$(
|
||||
curl -fsSL \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d "{\"username\":\"${DOCKERHUB_USERNAME}\",\"password\":\"${DOCKERHUB_TOKEN}\"}" \
|
||||
'https://hub.docker.com/v2/users/login/' \
|
||||
| python3 -c 'import json,sys; print(json.load(sys.stdin)["token"])'
|
||||
)"
|
||||
|
||||
delete_tag() {
|
||||
local tag="$1"
|
||||
local url="https://hub.docker.com/v2/repositories/${namespace}/${repo_name}/tags/${tag}/"
|
||||
local http_code
|
||||
http_code="$(curl -sS -o /dev/null -w "%{http_code}" -X DELETE -H "Authorization: JWT ${token}" "${url}" || true)"
|
||||
if [ "${http_code}" = "204" ] || [ "${http_code}" = "404" ]; then
|
||||
echo "Docker Hub tag removed (or missing): ${DOCKERHUB_REPO}:${tag} (HTTP ${http_code})"
|
||||
return 0
|
||||
fi
|
||||
echo "Docker Hub tag delete failed: ${DOCKERHUB_REPO}:${tag} (HTTP ${http_code})"
|
||||
return 0
|
||||
}
|
||||
|
||||
delete_tag "latest-amd64"
|
||||
delete_tag "latest-arm64"
|
||||
delete_tag "${VERSION}-amd64"
|
||||
delete_tag "${VERSION}-arm64"
|
||||
|
||||
28
.github/workflows/pr-path-guard.yml
vendored
Normal file
28
.github/workflows/pr-path-guard.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: translator-path-guard
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
|
||||
jobs:
|
||||
ensure-no-translator-changes:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Detect internal/translator changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: |
|
||||
internal/translator/**
|
||||
- name: Fail when restricted paths change
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
echo "Changes under internal/translator are not allowed in pull requests."
|
||||
echo "You need to create an issue for our maintenance team to make the necessary changes."
|
||||
exit 1
|
||||
23
.github/workflows/pr-test-build.yml
vendored
Normal file
23
.github/workflows/pr-test-build.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: pr-test-build
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: Build
|
||||
run: |
|
||||
go build -o test-output ./cmd/server
|
||||
rm -f test-output
|
||||
48
.gitignore
vendored
48
.gitignore
vendored
@@ -1,14 +1,50 @@
|
||||
# Binaries
|
||||
cli-proxy-api
|
||||
*.exe
|
||||
|
||||
# Configuration
|
||||
config.yaml
|
||||
.env
|
||||
|
||||
# Generated content
|
||||
bin/*
|
||||
docs/*
|
||||
logs/*
|
||||
conv/*
|
||||
temp/*
|
||||
refs/*
|
||||
|
||||
# Storage backends
|
||||
pgstore/*
|
||||
gitstore/*
|
||||
objectstore/*
|
||||
|
||||
# Static assets
|
||||
static/*
|
||||
|
||||
# Authentication data
|
||||
auths/*
|
||||
!auths/.gitkeep
|
||||
.vscode/*
|
||||
.claude/*
|
||||
.serena/*
|
||||
|
||||
# Documentation
|
||||
docs/*
|
||||
AGENTS.md
|
||||
CLAUDE.md
|
||||
*.exe
|
||||
temp/*
|
||||
GEMINI.md
|
||||
|
||||
# Tooling metadata
|
||||
.vscode/*
|
||||
.codex/*
|
||||
.claude/*
|
||||
.gemini/*
|
||||
.serena/*
|
||||
.agent/*
|
||||
.agents/*
|
||||
.agents/*
|
||||
.opencode/*
|
||||
.bmad/*
|
||||
_bmad/*
|
||||
_bmad-output/*
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
._*
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
builds:
|
||||
- id: "cli-proxy-api"
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
|
||||
@@ -22,6 +22,8 @@ RUN mkdir /CLIProxyAPI
|
||||
|
||||
COPY --from=builder ./app/CLIProxyAPI /CLIProxyAPI/CLIProxyAPI
|
||||
|
||||
COPY config.example.yaml /CLIProxyAPI/config.example.yaml
|
||||
|
||||
WORKDIR /CLIProxyAPI
|
||||
|
||||
EXPOSE 8317
|
||||
|
||||
3
LICENSE
3
LICENSE
@@ -1,6 +1,7 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Luis Pater
|
||||
Copyright (c) 2025-2005.9 Luis Pater
|
||||
Copyright (c) 2025.9-present Router-For.ME
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -1,711 +0,0 @@
|
||||
# Management API
|
||||
|
||||
Base path: `http://localhost:8317/v0/management`
|
||||
|
||||
This API manages the CLI Proxy API’s runtime configuration and authentication files. All changes are persisted to the YAML config file and hot‑reloaded by the service.
|
||||
|
||||
Note: The following options cannot be modified via API and must be set in the config file (restart if needed):
|
||||
- `allow-remote-management`
|
||||
- `remote-management-key` (if plaintext is detected at startup, it is automatically bcrypt‑hashed and written back to the config)
|
||||
|
||||
## Authentication
|
||||
|
||||
- All requests (including localhost) must provide a valid management key.
|
||||
- Remote access requires enabling remote management in the config: `allow-remote-management: true`.
|
||||
- Provide the management key (in plaintext) via either:
|
||||
- `Authorization: Bearer <plaintext-key>`
|
||||
- `X-Management-Key: <plaintext-key>`
|
||||
|
||||
Additional notes:
|
||||
- If `remote-management.secret-key` is empty, the entire Management API is disabled (all `/v0/management` routes return 404).
|
||||
- For remote IPs, 5 consecutive authentication failures trigger a temporary ban (~30 minutes) before further attempts are allowed.
|
||||
|
||||
If a plaintext key is detected in the config at startup, it will be bcrypt‑hashed and written back to the config file automatically.
|
||||
|
||||
## Request/Response Conventions
|
||||
|
||||
- Content-Type: `application/json` (unless otherwise noted).
|
||||
- Boolean/int/string updates: request body is `{ "value": <type> }`.
|
||||
- Array PUT: either a raw array (e.g. `["a","b"]`) or `{ "items": [ ... ] }`.
|
||||
- Array PATCH: supports `{ "old": "k1", "new": "k2" }` or `{ "index": 0, "value": "k2" }`.
|
||||
- Object-array PATCH: supports matching by index or by key field (specified per endpoint).
|
||||
|
||||
## Endpoints
|
||||
|
||||
### Usage Statistics
|
||||
- GET `/usage` — Retrieve aggregated in-memory request metrics
|
||||
- Response:
|
||||
```json
|
||||
{
|
||||
"usage": {
|
||||
"total_requests": 24,
|
||||
"success_count": 22,
|
||||
"failure_count": 2,
|
||||
"total_tokens": 13890,
|
||||
"requests_by_day": {
|
||||
"2024-05-20": 12
|
||||
},
|
||||
"requests_by_hour": {
|
||||
"09": 4,
|
||||
"18": 8
|
||||
},
|
||||
"tokens_by_day": {
|
||||
"2024-05-20": 9876
|
||||
},
|
||||
"tokens_by_hour": {
|
||||
"09": 1234,
|
||||
"18": 865
|
||||
},
|
||||
"apis": {
|
||||
"POST /v1/chat/completions": {
|
||||
"total_requests": 12,
|
||||
"total_tokens": 9021,
|
||||
"models": {
|
||||
"gpt-4o-mini": {
|
||||
"total_requests": 8,
|
||||
"total_tokens": 7123,
|
||||
"details": [
|
||||
{
|
||||
"timestamp": "2024-05-20T09:15:04.123456Z",
|
||||
"tokens": {
|
||||
"input_tokens": 523,
|
||||
"output_tokens": 308,
|
||||
"reasoning_tokens": 0,
|
||||
"cached_tokens": 0,
|
||||
"total_tokens": 831
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
- Notes:
|
||||
- Statistics are recalculated for every request that reports token usage; data resets when the server restarts.
|
||||
- Hourly counters fold all days into the same hour bucket (`00`–`23`).
|
||||
|
||||
### Config
|
||||
- GET `/config` — Get the full config
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/config
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01", "AI...02", "AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api"},{"api-key":"sk-...q2","base-url":"https://example.com"}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1"}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk...01"],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-keys":["sk...7e"],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}],"allow-localhost-unauthenticated":true}
|
||||
```
|
||||
|
||||
### Debug
|
||||
- GET `/debug` — Get the current debug state
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/debug
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "debug": false }
|
||||
```
|
||||
- PUT/PATCH `/debug` — Set debug (boolean)
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/debug
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Force GPT-5 Codex
|
||||
- GET `/force-gpt-5-codex` — Get current flag
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/force-gpt-5-codex
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "gpt-5-codex": false }
|
||||
```
|
||||
- PUT/PATCH `/force-gpt-5-codex` — Set boolean
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/force-gpt-5-codex
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Proxy Server URL
|
||||
- GET `/proxy-url` — Get the proxy URL string
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/proxy-url
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "proxy-url": "socks5://user:pass@127.0.0.1:1080/" }
|
||||
```
|
||||
- PUT/PATCH `/proxy-url` — Set the proxy URL string
|
||||
- Request (PUT):
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":"socks5://user:pass@127.0.0.1:1080/"}' \
|
||||
http://localhost:8317/v0/management/proxy-url
|
||||
```
|
||||
- Request (PATCH):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":"http://127.0.0.1:8080"}' \
|
||||
http://localhost:8317/v0/management/proxy-url
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/proxy-url` — Clear the proxy URL
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE http://localhost:8317/v0/management/proxy-url
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Quota Exceeded Behavior
|
||||
- GET `/quota-exceeded/switch-project`
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/quota-exceeded/switch-project
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "switch-project": true }
|
||||
```
|
||||
- PUT/PATCH `/quota-exceeded/switch-project` — Boolean
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":false}' \
|
||||
http://localhost:8317/v0/management/quota-exceeded/switch-project
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- GET `/quota-exceeded/switch-preview-model`
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/quota-exceeded/switch-preview-model
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "switch-preview-model": true }
|
||||
```
|
||||
- PUT/PATCH `/quota-exceeded/switch-preview-model` — Boolean
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/quota-exceeded/switch-preview-model
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### API Keys (proxy service auth)
|
||||
These endpoints update the inline `config-api-key` provider inside the `auth.providers` section of the configuration. Legacy top-level `api-keys` remain in sync automatically.
|
||||
- GET `/api-keys` — Return the full list
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/api-keys
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "api-keys": ["k1","k2","k3"] }
|
||||
```
|
||||
- PUT `/api-keys` — Replace the full list
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '["k1","k2","k3"]' \
|
||||
http://localhost:8317/v0/management/api-keys
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/api-keys` — Modify one item (`old/new` or `index/value`)
|
||||
- Request (by old/new):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"old":"k2","new":"k2b"}' \
|
||||
http://localhost:8317/v0/management/api-keys
|
||||
```
|
||||
- Request (by index/value):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":0,"value":"k1b"}' \
|
||||
http://localhost:8317/v0/management/api-keys
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/api-keys` — Delete one (`?value=` or `?index=`)
|
||||
- Request (by value):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/api-keys?value=k1'
|
||||
```
|
||||
- Request (by index):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/api-keys?index=0'
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Gemini API Key (Generative Language)
|
||||
- GET `/generative-language-api-key`
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/generative-language-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "generative-language-api-key": ["AIzaSy...01","AIzaSy...02"] }
|
||||
```
|
||||
- PUT `/generative-language-api-key`
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '["AIzaSy-1","AIzaSy-2"]' \
|
||||
http://localhost:8317/v0/management/generative-language-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/generative-language-api-key`
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"old":"AIzaSy-1","new":"AIzaSy-1b"}' \
|
||||
http://localhost:8317/v0/management/generative-language-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/generative-language-api-key`
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/generative-language-api-key?value=AIzaSy-2'
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Codex API KEY (object array)
|
||||
- GET `/codex-api-key` — List all
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
||||
```
|
||||
- PUT `/codex-api-key` — Replace the list
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/codex-api-key` — Modify one (by `index` or `match`)
|
||||
- Request (by index):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- Request (by match):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/codex-api-key` — Delete one (`?api-key=` or `?index=`)
|
||||
- Request (by api-key):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/codex-api-key?api-key=sk-b2'
|
||||
```
|
||||
- Request (by index):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/codex-api-key?index=0'
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Request Retry Count
|
||||
- GET `/request-retry` — Get integer
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/request-retry
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "request-retry": 3 }
|
||||
```
|
||||
- PUT/PATCH `/request-retry` — Set integer
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":5}' \
|
||||
http://localhost:8317/v0/management/request-retry
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Request Log
|
||||
- GET `/request-log` — Get boolean
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/request-log
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "request-log": false }
|
||||
```
|
||||
- PUT/PATCH `/request-log` — Set boolean
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/request-log
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Allow Localhost Unauthenticated
|
||||
- GET `/allow-localhost-unauthenticated` — Get boolean
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/allow-localhost-unauthenticated
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "allow-localhost-unauthenticated": false }
|
||||
```
|
||||
- PUT/PATCH `/allow-localhost-unauthenticated` — Set boolean
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/allow-localhost-unauthenticated
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Claude API KEY (object array)
|
||||
- GET `/claude-api-key` — List all
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
||||
```
|
||||
- PUT `/claude-api-key` — Replace the list
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/claude-api-key` — Modify one (by `index` or `match`)
|
||||
- Request (by index):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Request (by match):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/claude-api-key` — Delete one (`?api-key=` or `?index=`)
|
||||
- Request (by api-key):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/claude-api-key?api-key=sk-b2'
|
||||
```
|
||||
- Request (by index):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/claude-api-key?index=0'
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### OpenAI Compatibility Providers (object array)
|
||||
- GET `/openai-compatibility` — List all
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-keys": [], "models": [] } ] }
|
||||
```
|
||||
- PUT `/openai-compatibility` — Replace the list
|
||||
- Request:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk"],"models":[{"name":"m","alias":"a"}]}]' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/openai-compatibility` — Modify one (by `index` or `name`)
|
||||
- Request (by name):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- Request (by index):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/openai-compatibility` — Delete (`?name=` or `?index=`)
|
||||
- Request (by name):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/openai-compatibility?name=openrouter'
|
||||
```
|
||||
- Request (by index):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/openai-compatibility?index=0'
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Auth File Management
|
||||
|
||||
Manage JSON token files under `auth-dir`: list, download, upload, delete.
|
||||
|
||||
- GET `/auth-files` — List
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/auth-files
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "files": [ { "name": "acc1.json", "size": 1234, "modtime": "2025-08-30T12:34:56Z", "type": "google" } ] }
|
||||
```
|
||||
|
||||
- GET `/auth-files/download?name=<file.json>` — Download a single file
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -OJ 'http://localhost:8317/v0/management/auth-files/download?name=acc1.json'
|
||||
```
|
||||
|
||||
- POST `/auth-files` — Upload
|
||||
- Request (multipart):
|
||||
```bash
|
||||
curl -X POST -F 'file=@/path/to/acc1.json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/auth-files
|
||||
```
|
||||
- Request (raw JSON):
|
||||
```bash
|
||||
curl -X POST -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d @/path/to/acc1.json \
|
||||
'http://localhost:8317/v0/management/auth-files?name=acc1.json'
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
- DELETE `/auth-files?name=<file.json>` — Delete a single file
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/auth-files?name=acc1.json'
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
- DELETE `/auth-files?all=true` — Delete all `.json` files under `auth-dir`
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/auth-files?all=true'
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok", "deleted": 3 }
|
||||
```
|
||||
|
||||
### Login/OAuth URLs
|
||||
|
||||
These endpoints initiate provider login flows and return a URL to open in a browser. Tokens are saved under `auths/` once the flow completes.
|
||||
|
||||
- GET `/anthropic-auth-url` — Start Anthropic (Claude) login
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/anthropic-auth-url
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/codex-auth-url` — Start Codex login
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/codex-auth-url
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/gemini-cli-auth-url` — Start Google (Gemini CLI) login
|
||||
- Query params:
|
||||
- `project_id` (optional): Google Cloud project ID.
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
'http://localhost:8317/v0/management/gemini-cli-auth-url?project_id=<PROJECT_ID>'
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- POST `/gemini-web-token` — Save Gemini Web cookies directly
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"secure_1psid": "<__Secure-1PSID>", "secure_1psidts": "<__Secure-1PSIDTS>"}' \
|
||||
http://localhost:8317/v0/management/gemini-web-token
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok", "file": "gemini-web-<hash>.json" }
|
||||
```
|
||||
|
||||
- GET `/qwen-auth-url` — Start Qwen login (device flow)
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/qwen-auth-url
|
||||
```
|
||||
- Response:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/get-auth-status?state=<state>` — Poll OAuth flow status
|
||||
- Request:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
'http://localhost:8317/v0/management/get-auth-status?state=<STATE_FROM_AUTH_URL>'
|
||||
```
|
||||
- Response examples:
|
||||
```json
|
||||
{ "status": "wait" }
|
||||
{ "status": "ok" }
|
||||
{ "status": "error", "error": "Authentication failed" }
|
||||
```
|
||||
|
||||
## Error Responses
|
||||
|
||||
Generic error format:
|
||||
- 400 Bad Request: `{ "error": "invalid body" }`
|
||||
- 401 Unauthorized: `{ "error": "missing management key" }` or `{ "error": "invalid management key" }`
|
||||
- 403 Forbidden: `{ "error": "remote management disabled" }`
|
||||
- 404 Not Found: `{ "error": "item not found" }` or `{ "error": "file not found" }`
|
||||
- 500 Internal Server Error: `{ "error": "failed to save config: ..." }`
|
||||
|
||||
## Notes
|
||||
|
||||
- Changes are written back to the YAML config file and hot‑reloaded by the file watcher and clients.
|
||||
- `allow-remote-management` and `remote-management-key` cannot be changed via the API; configure them in the config file.
|
||||
@@ -1,711 +0,0 @@
|
||||
# 管理 API
|
||||
|
||||
基础路径:`http://localhost:8317/v0/management`
|
||||
|
||||
该 API 用于管理 CLI Proxy API 的运行时配置与认证文件。所有变更会持久化写入 YAML 配置文件,并由服务自动热重载。
|
||||
|
||||
注意:以下选项不能通过 API 修改,需在配置文件中设置(如有必要可重启):
|
||||
- `allow-remote-management`
|
||||
- `remote-management-key`(若在启动时检测到明文,会自动进行 bcrypt 加密并写回配置)
|
||||
|
||||
## 认证
|
||||
|
||||
- 所有请求(包括本地访问)都必须提供有效的管理密钥.
|
||||
- 远程访问需要在配置文件中开启远程访问: `allow-remote-management: true`
|
||||
- 通过以下任意方式提供管理密钥(明文):
|
||||
- `Authorization: Bearer <plaintext-key>`
|
||||
- `X-Management-Key: <plaintext-key>`
|
||||
|
||||
若在启动时检测到配置中的管理密钥为明文,会自动使用 bcrypt 加密并回写到配置文件中。
|
||||
|
||||
其它说明:
|
||||
- 若 `remote-management.secret-key` 为空,则管理 API 整体被禁用(所有 `/v0/management` 路由均返回 404)。
|
||||
- 对于远程 IP,连续 5 次认证失败会触发临时封禁(约 30 分钟)。
|
||||
|
||||
## 请求/响应约定
|
||||
|
||||
- Content-Type:`application/json`(除非另有说明)。
|
||||
- 布尔/整数/字符串更新:请求体为 `{ "value": <type> }`。
|
||||
- 数组 PUT:既可使用原始数组(如 `["a","b"]`),也可使用 `{ "items": [ ... ] }`。
|
||||
- 数组 PATCH:支持 `{ "old": "k1", "new": "k2" }` 或 `{ "index": 0, "value": "k2" }`。
|
||||
- 对象数组 PATCH:支持按索引或按关键字段匹配(各端点中单独说明)。
|
||||
|
||||
## 端点说明
|
||||
|
||||
### Usage(请求统计)
|
||||
- GET `/usage` — 获取内存中的请求统计
|
||||
- 响应:
|
||||
```json
|
||||
{
|
||||
"usage": {
|
||||
"total_requests": 24,
|
||||
"success_count": 22,
|
||||
"failure_count": 2,
|
||||
"total_tokens": 13890,
|
||||
"requests_by_day": {
|
||||
"2024-05-20": 12
|
||||
},
|
||||
"requests_by_hour": {
|
||||
"09": 4,
|
||||
"18": 8
|
||||
},
|
||||
"tokens_by_day": {
|
||||
"2024-05-20": 9876
|
||||
},
|
||||
"tokens_by_hour": {
|
||||
"09": 1234,
|
||||
"18": 865
|
||||
},
|
||||
"apis": {
|
||||
"POST /v1/chat/completions": {
|
||||
"total_requests": 12,
|
||||
"total_tokens": 9021,
|
||||
"models": {
|
||||
"gpt-4o-mini": {
|
||||
"total_requests": 8,
|
||||
"total_tokens": 7123,
|
||||
"details": [
|
||||
{
|
||||
"timestamp": "2024-05-20T09:15:04.123456Z",
|
||||
"tokens": {
|
||||
"input_tokens": 523,
|
||||
"output_tokens": 308,
|
||||
"reasoning_tokens": 0,
|
||||
"cached_tokens": 0,
|
||||
"total_tokens": 831
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
- 说明:
|
||||
- 仅统计带有 token 使用信息的请求,服务重启后数据会被清空。
|
||||
- 小时维度会将所有日期折叠到 `00`–`23` 的统一小时桶中。
|
||||
|
||||
### Config
|
||||
- GET `/config` — 获取完整的配置
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/config
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{"debug":true,"proxy-url":"","api-keys":["1...5","JS...W"],"quota-exceeded":{"switch-project":true,"switch-preview-model":true},"generative-language-api-key":["AI...01", "AI...02", "AI...03"],"request-log":true,"request-retry":3,"claude-api-key":[{"api-key":"cr...56","base-url":"https://example.com/api"},{"api-key":"cr...e3","base-url":"http://example.com:3000/api"},{"api-key":"sk-...q2","base-url":"https://example.com"}],"codex-api-key":[{"api-key":"sk...01","base-url":"https://example/v1"}],"openai-compatibility":[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk...01"],"models":[{"name":"moonshotai/kimi-k2:free","alias":"kimi-k2"}]},{"name":"iflow","base-url":"https://apis.iflow.cn/v1","api-keys":["sk...7e"],"models":[{"name":"deepseek-v3.1","alias":"deepseek-v3.1"},{"name":"glm-4.5","alias":"glm-4.5"},{"name":"kimi-k2","alias":"kimi-k2"}]}],"allow-localhost-unauthenticated":true}
|
||||
```
|
||||
|
||||
### Debug
|
||||
- GET `/debug` — 获取当前 debug 状态
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/debug
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "debug": false }
|
||||
```
|
||||
- PUT/PATCH `/debug` — 设置 debug(布尔值)
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/debug
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### 强制 GPT-5 Codex
|
||||
- GET `/force-gpt-5-codex` — 获取当前标志
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/force-gpt-5-codex
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "gpt-5-codex": false }
|
||||
```
|
||||
- PUT/PATCH `/force-gpt-5-codex` — 设置布尔值
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/force-gpt-5-codex
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### 代理服务器 URL
|
||||
- GET `/proxy-url` — 获取代理 URL 字符串
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/proxy-url
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "proxy-url": "socks5://user:pass@127.0.0.1:1080/" }
|
||||
```
|
||||
- PUT/PATCH `/proxy-url` — 设置代理 URL 字符串
|
||||
- 请求(PUT):
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":"socks5://user:pass@127.0.0.1:1080/"}' \
|
||||
http://localhost:8317/v0/management/proxy-url
|
||||
```
|
||||
- 请求(PATCH):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":"http://127.0.0.1:8080"}' \
|
||||
http://localhost:8317/v0/management/proxy-url
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/proxy-url` — 清空代理 URL
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE http://localhost:8317/v0/management/proxy-url
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### 超出配额行为
|
||||
- GET `/quota-exceeded/switch-project`
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/quota-exceeded/switch-project
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "switch-project": true }
|
||||
```
|
||||
- PUT/PATCH `/quota-exceeded/switch-project` — 布尔值
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":false}' \
|
||||
http://localhost:8317/v0/management/quota-exceeded/switch-project
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- GET `/quota-exceeded/switch-preview-model`
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/quota-exceeded/switch-preview-model
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "switch-preview-model": true }
|
||||
```
|
||||
- PUT/PATCH `/quota-exceeded/switch-preview-model` — 布尔值
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/quota-exceeded/switch-preview-model
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### API Keys(代理服务认证)
|
||||
这些接口会更新配置中 `auth.providers` 内置的 `config-api-key` 提供方,旧版顶层 `api-keys` 会自动保持同步。
|
||||
- GET `/api-keys` — 返回完整列表
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/api-keys
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "api-keys": ["k1","k2","k3"] }
|
||||
```
|
||||
- PUT `/api-keys` — 完整改写列表
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '["k1","k2","k3"]' \
|
||||
http://localhost:8317/v0/management/api-keys
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/api-keys` — 修改其中一个(`old/new` 或 `index/value`)
|
||||
- 请求(按 old/new):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"old":"k2","new":"k2b"}' \
|
||||
http://localhost:8317/v0/management/api-keys
|
||||
```
|
||||
- 请求(按 index/value):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":0,"value":"k1b"}' \
|
||||
http://localhost:8317/v0/management/api-keys
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/api-keys` — 删除其中一个(`?value=` 或 `?index=`)
|
||||
- 请求(按值删除):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/api-keys?value=k1'
|
||||
```
|
||||
- 请求(按索引删除):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/api-keys?index=0'
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Gemini API Key(生成式语言)
|
||||
- GET `/generative-language-api-key`
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/generative-language-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "generative-language-api-key": ["AIzaSy...01","AIzaSy...02"] }
|
||||
```
|
||||
- PUT `/generative-language-api-key`
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '["AIzaSy-1","AIzaSy-2"]' \
|
||||
http://localhost:8317/v0/management/generative-language-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/generative-language-api-key`
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"old":"AIzaSy-1","new":"AIzaSy-1b"}' \
|
||||
http://localhost:8317/v0/management/generative-language-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/generative-language-api-key`
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/generative-language-api-key?value=AIzaSy-2'
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Codex API KEY(对象数组)
|
||||
- GET `/codex-api-key` — 列出全部
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "codex-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
||||
```
|
||||
- PUT `/codex-api-key` — 完整改写列表
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/codex-api-key` — 修改其中一个(按 `index` 或 `match`)
|
||||
- 请求(按索引):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- 请求(按匹配):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
||||
http://localhost:8317/v0/management/codex-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/codex-api-key` — 删除其中一个(`?api-key=` 或 `?index=`)
|
||||
- 请求(按 api-key):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/codex-api-key?api-key=sk-b2'
|
||||
```
|
||||
- 请求(按索引):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/codex-api-key?index=0'
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### 请求重试次数
|
||||
- GET `/request-retry` — 获取整数
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/request-retry
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "request-retry": 3 }
|
||||
```
|
||||
- PUT/PATCH `/request-retry` — 设置整数
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":5}' \
|
||||
http://localhost:8317/v0/management/request-retry
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### 请求日志开关
|
||||
- GET `/request-log` — 获取布尔值
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/request-log
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "request-log": false }
|
||||
```
|
||||
- PUT/PATCH `/request-log` — 设置布尔值
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/request-log
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### 允许本地未认证访问
|
||||
- GET `/allow-localhost-unauthenticated` — 获取布尔值
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/allow-localhost-unauthenticated
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "allow-localhost-unauthenticated": false }
|
||||
```
|
||||
- PUT/PATCH `/allow-localhost-unauthenticated` — 设置布尔值
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"value":true}' \
|
||||
http://localhost:8317/v0/management/allow-localhost-unauthenticated
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### Claude API KEY(对象数组)
|
||||
- GET `/claude-api-key` — 列出全部
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "claude-api-key": [ { "api-key": "sk-a", "base-url": "" } ] }
|
||||
```
|
||||
- PUT `/claude-api-key` — 完整改写列表
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"api-key":"sk-a"},{"api-key":"sk-b","base-url":"https://c.example.com"}]' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/claude-api-key` — 修改其中一个(按 `index` 或 `match`)
|
||||
- 请求(按索引):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":1,"value":{"api-key":"sk-b2","base-url":"https://c.example.com"}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 请求(按匹配):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"match":"sk-a","value":{"api-key":"sk-a","base-url":""}}' \
|
||||
http://localhost:8317/v0/management/claude-api-key
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/claude-api-key` — 删除其中一个(`?api-key=` 或 `?index=`)
|
||||
- 请求(按 api-key):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/claude-api-key?api-key=sk-b2'
|
||||
```
|
||||
- 请求(按索引):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/claude-api-key?index=0'
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### OpenAI 兼容提供商(对象数组)
|
||||
- GET `/openai-compatibility` — 列出全部
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "openai-compatibility": [ { "name": "openrouter", "base-url": "https://openrouter.ai/api/v1", "api-keys": [], "models": [] } ] }
|
||||
```
|
||||
- PUT `/openai-compatibility` — 完整改写列表
|
||||
- 请求:
|
||||
```bash
|
||||
curl -X PUT -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '[{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":["sk"],"models":[{"name":"m","alias":"a"}]}]' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- PATCH `/openai-compatibility` — 修改其中一个(按 `index` 或 `name`)
|
||||
- 请求(按名称):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"name":"openrouter","value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- 请求(按索引):
|
||||
```bash
|
||||
curl -X PATCH -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d '{"index":0,"value":{"name":"openrouter","base-url":"https://openrouter.ai/api/v1","api-keys":[],"models":[]}}' \
|
||||
http://localhost:8317/v0/management/openai-compatibility
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
- DELETE `/openai-compatibility` — 删除(`?name=` 或 `?index=`)
|
||||
- 请求(按名称):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/openai-compatibility?name=openrouter'
|
||||
```
|
||||
- 请求(按索引):
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/openai-compatibility?index=0'
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
### 认证文件管理
|
||||
|
||||
管理 `auth-dir` 下的 JSON 令牌文件:列出、下载、上传、删除。
|
||||
|
||||
- GET `/auth-files` — 列表
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' http://localhost:8317/v0/management/auth-files
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "files": [ { "name": "acc1.json", "size": 1234, "modtime": "2025-08-30T12:34:56Z", "type": "google" } ] }
|
||||
```
|
||||
|
||||
- GET `/auth-files/download?name=<file.json>` — 下载单个文件
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -OJ 'http://localhost:8317/v0/management/auth-files/download?name=acc1.json'
|
||||
```
|
||||
|
||||
- POST `/auth-files` — 上传
|
||||
- 请求(multipart):
|
||||
```bash
|
||||
curl -X POST -F 'file=@/path/to/acc1.json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/auth-files
|
||||
```
|
||||
- 请求(原始 JSON):
|
||||
```bash
|
||||
curl -X POST -H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-d @/path/to/acc1.json \
|
||||
'http://localhost:8317/v0/management/auth-files?name=acc1.json'
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
- DELETE `/auth-files?name=<file.json>` — 删除单个文件
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/auth-files?name=acc1.json'
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok" }
|
||||
```
|
||||
|
||||
- DELETE `/auth-files?all=true` — 删除 `auth-dir` 下所有 `.json` 文件
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' -X DELETE 'http://localhost:8317/v0/management/auth-files?all=true'
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok", "deleted": 3 }
|
||||
```
|
||||
|
||||
### 登录/授权 URL
|
||||
|
||||
以下端点用于发起各提供商的登录流程,并返回需要在浏览器中打开的 URL。流程完成后,令牌会保存到 `auths/` 目录。
|
||||
|
||||
- GET `/anthropic-auth-url` — 开始 Anthropic(Claude)登录
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/anthropic-auth-url
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/codex-auth-url` — 开始 Codex 登录
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/codex-auth-url
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/gemini-cli-auth-url` — 开始 Google(Gemini CLI)登录
|
||||
- 查询参数:
|
||||
- `project_id`(可选):Google Cloud 项目 ID。
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
'http://localhost:8317/v0/management/gemini-cli-auth-url?project_id=<PROJECT_ID>'
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- POST `/gemini-web-token` — 直接保存 Gemini Web Cookie
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"secure_1psid": "<__Secure-1PSID>", "secure_1psidts": "<__Secure-1PSIDTS>"}' \
|
||||
http://localhost:8317/v0/management/gemini-web-token
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok", "file": "gemini-web-<hash>.json" }
|
||||
```
|
||||
|
||||
- GET `/qwen-auth-url` — 开始 Qwen 登录(设备授权流程)
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
http://localhost:8317/v0/management/qwen-auth-url
|
||||
```
|
||||
- 响应:
|
||||
```json
|
||||
{ "status": "ok", "url": "https://..." }
|
||||
```
|
||||
|
||||
- GET `/get-auth-status?state=<state>` — 轮询 OAuth 流程状态
|
||||
- 请求:
|
||||
```bash
|
||||
curl -H 'Authorization: Bearer <MANAGEMENT_KEY>' \
|
||||
'http://localhost:8317/v0/management/get-auth-status?state=<STATE_FROM_AUTH_URL>'
|
||||
```
|
||||
- 响应示例:
|
||||
```json
|
||||
{ "status": "wait" }
|
||||
{ "status": "ok" }
|
||||
{ "status": "error", "error": "Authentication failed" }
|
||||
```
|
||||
|
||||
## 错误响应
|
||||
|
||||
通用错误格式:
|
||||
- 400 Bad Request: `{ "error": "invalid body" }`
|
||||
- 401 Unauthorized: `{ "error": "missing management key" }` 或 `{ "error": "invalid management key" }`
|
||||
- 403 Forbidden: `{ "error": "remote management disabled" }`
|
||||
- 404 Not Found: `{ "error": "item not found" }` 或 `{ "error": "file not found" }`
|
||||
- 500 Internal Server Error: `{ "error": "failed to save config: ..." }`
|
||||
|
||||
## 说明
|
||||
|
||||
- 变更会写回 YAML 配置文件,并由文件监控器热重载配置与客户端。
|
||||
- `allow-remote-management` 与 `remote-management-key` 不能通过 API 修改,需在配置文件中设置。
|
||||
711
README.md
711
README.md
@@ -8,626 +8,81 @@ It now also supports OpenAI Codex (GPT models) and Claude Code via OAuth.
|
||||
|
||||
So you can use local or multi-account CLI access with OpenAI(include Responses)/Gemini/Claude-compatible clients and SDKs.
|
||||
|
||||
The first Chinese provider has now been added: [Qwen Code](https://github.com/QwenLM/qwen-code).
|
||||
## Sponsor
|
||||
|
||||
## Features
|
||||
[](https://z.ai/subscribe?ic=8JVLJQFSKB)
|
||||
|
||||
This project is sponsored by Z.ai, supporting us with their GLM CODING PLAN.
|
||||
|
||||
GLM CODING PLAN is a subscription service designed for AI coding, starting at just $3/month. It provides access to their flagship GLM-4.7 model across 10+ popular AI coding tools (Claude Code, Cline, Roo Code, etc.), offering developers top-tier, fast, and stable coding experiences.
|
||||
|
||||
Get 10% OFF GLM CODING PLAN:https://z.ai/subscribe?ic=8JVLJQFSKB
|
||||
|
||||
---
|
||||
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td width="180"><a href="https://www.packyapi.com/register?aff=cliproxyapi"><img src="./assets/packycode.png" alt="PackyCode" width="150"></a></td>
|
||||
<td>Thanks to PackyCode for sponsoring this project! PackyCode is a reliable and efficient API relay service provider, offering relay services for Claude Code, Codex, Gemini, and more. PackyCode provides special discounts for our software users: register using <a href="https://www.packyapi.com/register?aff=cliproxyapi">this link</a> and enter the "cliproxyapi" promo code during recharge to get 10% off.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="180"><a href="https://cubence.com/signup?code=CLIPROXYAPI&source=cpa"><img src="./assets/cubence.png" alt="Cubence" width="150"></a></td>
|
||||
<td>Thanks to Cubence for sponsoring this project! Cubence is a reliable and efficient API relay service provider, offering relay services for Claude Code, Codex, Gemini, and more. Cubence provides special discounts for our software users: register using <a href="https://cubence.com/signup?code=CLIPROXYAPI&source=cpa">this link</a> and enter the "CLIPROXYAPI" promo code during recharge to get 10% off.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## Overview
|
||||
|
||||
- OpenAI/Gemini/Claude compatible API endpoints for CLI models
|
||||
- OpenAI Codex support (GPT models) via OAuth login
|
||||
- Claude Code support via OAuth login
|
||||
- Qwen Code support via OAuth login
|
||||
- Gemini Web support via cookie-based login
|
||||
- iFlow support via OAuth login
|
||||
- Amp CLI and IDE extensions support with provider routing
|
||||
- Streaming and non-streaming responses
|
||||
- Function calling/tools support
|
||||
- Multimodal input support (text and images)
|
||||
- Multiple accounts with round-robin load balancing (Gemini, OpenAI, Claude and Qwen)
|
||||
- Simple CLI authentication flows (Gemini, OpenAI, Claude and Qwen)
|
||||
- Multiple accounts with round-robin load balancing (Gemini, OpenAI, Claude, Qwen and iFlow)
|
||||
- Simple CLI authentication flows (Gemini, OpenAI, Claude, Qwen and iFlow)
|
||||
- Generative Language API Key support
|
||||
- AI Studio Build multi-account load balancing
|
||||
- Gemini CLI multi-account load balancing
|
||||
- Claude Code multi-account load balancing
|
||||
- Qwen Code multi-account load balancing
|
||||
- iFlow multi-account load balancing
|
||||
- OpenAI Codex multi-account load balancing
|
||||
- OpenAI-compatible upstream providers via config (e.g., OpenRouter)
|
||||
- Reusable Go SDK for embedding the proxy (see `docs/sdk-usage.md`, 中文: `docs/sdk-usage_CN.md`)
|
||||
- Reusable Go SDK for embedding the proxy (see `docs/sdk-usage.md`)
|
||||
|
||||
## Installation
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Go 1.24 or higher
|
||||
- A Google account with access to Gemini CLI models (optional)
|
||||
- An OpenAI account for Codex/GPT access (optional)
|
||||
- An Anthropic account for Claude Code access (optional)
|
||||
- A Qwen Chat account for Qwen Code access (optional)
|
||||
|
||||
### Building from Source
|
||||
|
||||
1. Clone the repository:
|
||||
```bash
|
||||
git clone https://github.com/luispater/CLIProxyAPI.git
|
||||
cd CLIProxyAPI
|
||||
```
|
||||
|
||||
2. Build the application:
|
||||
|
||||
Linux, macOS:
|
||||
```bash
|
||||
go build -o cli-proxy-api ./cmd/server
|
||||
```
|
||||
Windows:
|
||||
```bash
|
||||
go build -o cli-proxy-api.exe ./cmd/server
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
### Authentication
|
||||
|
||||
You can authenticate for Gemini, OpenAI, and/or Claude. All can coexist in the same `auth-dir` and will be load balanced.
|
||||
|
||||
- Gemini (Google):
|
||||
```bash
|
||||
./cli-proxy-api --login
|
||||
```
|
||||
If you are an existing Gemini Code user, you may need to specify a project ID:
|
||||
```bash
|
||||
./cli-proxy-api --login --project_id <your_project_id>
|
||||
```
|
||||
The local OAuth callback uses port `8085`.
|
||||
|
||||
Options: add `--no-browser` to print the login URL instead of opening a browser. The local OAuth callback uses port `8085`.
|
||||
|
||||
- Gemini Web (via Cookies):
|
||||
This method authenticates by simulating a browser, using cookies obtained from the Gemini website.
|
||||
```bash
|
||||
./cli-proxy-api --gemini-web-auth
|
||||
```
|
||||
You will be prompted to enter your `__Secure-1PSID` and `__Secure-1PSIDTS` values. Please retrieve these cookies from your browser's developer tools.
|
||||
|
||||
- OpenAI (Codex/GPT via OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --codex-login
|
||||
```
|
||||
Options: add `--no-browser` to print the login URL instead of opening a browser. The local OAuth callback uses port `1455`.
|
||||
|
||||
- Claude (Anthropic via OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --claude-login
|
||||
```
|
||||
Options: add `--no-browser` to print the login URL instead of opening a browser. The local OAuth callback uses port `54545`.
|
||||
|
||||
- Qwen (Qwen Chat via OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --qwen-login
|
||||
```
|
||||
Options: add `--no-browser` to print the login URL instead of opening a browser. Use the Qwen Chat's OAuth device flow.
|
||||
|
||||
|
||||
### Starting the Server
|
||||
|
||||
Once authenticated, start the server:
|
||||
|
||||
```bash
|
||||
./cli-proxy-api
|
||||
```
|
||||
|
||||
By default, the server runs on port 8317.
|
||||
|
||||
### API Endpoints
|
||||
|
||||
#### List Models
|
||||
|
||||
```
|
||||
GET http://localhost:8317/v1/models
|
||||
```
|
||||
|
||||
#### Chat Completions
|
||||
|
||||
```
|
||||
POST http://localhost:8317/v1/chat/completions
|
||||
```
|
||||
|
||||
Request body example:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "gemini-2.5-pro",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hello, how are you?"
|
||||
}
|
||||
],
|
||||
"stream": true
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
- Use a `gemini-*` model for Gemini (e.g., "gemini-2.5-pro"), a `gpt-*` model for OpenAI (e.g., "gpt-5"), a `claude-*` model for Claude (e.g., "claude-3-5-sonnet-20241022"), or a `qwen-*` model for Qwen (e.g., "qwen3-coder-plus"). The proxy will route to the correct provider automatically.
|
||||
|
||||
#### Claude Messages (SSE-compatible)
|
||||
|
||||
```
|
||||
POST http://localhost:8317/v1/messages
|
||||
```
|
||||
|
||||
### Using with OpenAI Libraries
|
||||
|
||||
You can use this proxy with any OpenAI-compatible library by setting the base URL to your local server:
|
||||
|
||||
#### Python (with OpenAI library)
|
||||
|
||||
```python
|
||||
from openai import OpenAI
|
||||
|
||||
client = OpenAI(
|
||||
api_key="dummy", # Not used but required
|
||||
base_url="http://localhost:8317/v1"
|
||||
)
|
||||
|
||||
# Gemini example
|
||||
gemini = client.chat.completions.create(
|
||||
model="gemini-2.5-pro",
|
||||
messages=[{"role": "user", "content": "Hello, how are you?"}]
|
||||
)
|
||||
|
||||
# Codex/GPT example
|
||||
gpt = client.chat.completions.create(
|
||||
model="gpt-5",
|
||||
messages=[{"role": "user", "content": "Summarize this project in one sentence."}]
|
||||
)
|
||||
|
||||
# Claude example (using messages endpoint)
|
||||
import requests
|
||||
claude_response = requests.post(
|
||||
"http://localhost:8317/v1/messages",
|
||||
json={
|
||||
"model": "claude-3-5-sonnet-20241022",
|
||||
"messages": [{"role": "user", "content": "Summarize this project in one sentence."}],
|
||||
"max_tokens": 1000
|
||||
}
|
||||
)
|
||||
|
||||
print(gemini.choices[0].message.content)
|
||||
print(gpt.choices[0].message.content)
|
||||
print(claude_response.json())
|
||||
```
|
||||
|
||||
#### JavaScript/TypeScript
|
||||
|
||||
```javascript
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: 'dummy', // Not used but required
|
||||
baseURL: 'http://localhost:8317/v1',
|
||||
});
|
||||
|
||||
// Gemini
|
||||
const gemini = await openai.chat.completions.create({
|
||||
model: 'gemini-2.5-pro',
|
||||
messages: [{ role: 'user', content: 'Hello, how are you?' }],
|
||||
});
|
||||
|
||||
// Codex/GPT
|
||||
const gpt = await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [{ role: 'user', content: 'Summarize this project in one sentence.' }],
|
||||
});
|
||||
|
||||
// Claude example (using messages endpoint)
|
||||
const claudeResponse = await fetch('http://localhost:8317/v1/messages', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: 'claude-3-5-sonnet-20241022',
|
||||
messages: [{ role: 'user', content: 'Summarize this project in one sentence.' }],
|
||||
max_tokens: 1000
|
||||
})
|
||||
});
|
||||
|
||||
console.log(gemini.choices[0].message.content);
|
||||
console.log(gpt.choices[0].message.content);
|
||||
console.log(await claudeResponse.json());
|
||||
```
|
||||
|
||||
## Supported Models
|
||||
|
||||
- gemini-2.5-pro
|
||||
- gemini-2.5-flash
|
||||
- gemini-2.5-flash-lite
|
||||
- gpt-5
|
||||
- gpt-5-codex
|
||||
- claude-opus-4-1-20250805
|
||||
- claude-opus-4-20250514
|
||||
- claude-sonnet-4-20250514
|
||||
- claude-3-7-sonnet-20250219
|
||||
- claude-3-5-haiku-20241022
|
||||
- qwen3-coder-plus
|
||||
- qwen3-coder-flash
|
||||
- Gemini models auto-switch to preview variants when needed
|
||||
|
||||
## Configuration
|
||||
|
||||
The server uses a YAML configuration file (`config.yaml`) located in the project root directory by default. You can specify a different configuration file path using the `--config` flag:
|
||||
|
||||
```bash
|
||||
./cli-proxy-api --config /path/to/your/config.yaml
|
||||
```
|
||||
|
||||
### Configuration Options
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------------------------------------|----------|--------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `port` | integer | 8317 | The port number on which the server will listen. |
|
||||
| `auth-dir` | string | "~/.cli-proxy-api" | Directory where authentication tokens are stored. Supports using `~` for the home directory. If you use Windows, please set the directory like this: `C:/cli-proxy-api/` |
|
||||
| `proxy-url` | string | "" | Proxy URL. Supports socks5/http/https protocols. Example: socks5://user:pass@192.168.1.1:1080/ |
|
||||
| `request-retry` | integer | 0 | Number of times to retry a request. Retries will occur if the HTTP response code is 403, 408, 500, 502, 503, or 504. |
|
||||
| `remote-management.allow-remote` | boolean | false | Whether to allow remote (non-localhost) access to the management API. If false, only localhost can access. A management key is still required for localhost. |
|
||||
| `remote-management.secret-key` | string | "" | Management key. If a plaintext value is provided, it will be hashed on startup using bcrypt and persisted back to the config file. If empty, the entire management API is disabled (404). |
|
||||
| `quota-exceeded` | object | {} | Configuration for handling quota exceeded. |
|
||||
| `quota-exceeded.switch-project` | boolean | true | Whether to automatically switch to another project when a quota is exceeded. |
|
||||
| `quota-exceeded.switch-preview-model` | boolean | true | Whether to automatically switch to a preview model when a quota is exceeded. |
|
||||
| `debug` | boolean | false | Enable debug mode for verbose logging. |
|
||||
| `auth` | object | {} | Request authentication configuration. |
|
||||
| `auth.providers` | object[] | [] | Authentication providers. Includes built-in `config-api-key` for inline keys. |
|
||||
| `auth.providers.*.name` | string | "" | Provider instance name. |
|
||||
| `auth.providers.*.type` | string | "" | Provider implementation identifier (for example `config-api-key`). |
|
||||
| `auth.providers.*.api-keys` | string[] | [] | Inline API keys consumed by the `config-api-key` provider. |
|
||||
| `api-keys` | string[] | [] | Legacy shorthand for inline API keys. Values are mirrored into the `config-api-key` provider for backwards compatibility. |
|
||||
| `generative-language-api-key` | string[] | [] | List of Generative Language API keys. |
|
||||
| `codex-api-key` | object | {} | List of Codex API keys. |
|
||||
| `codex-api-key.api-key` | string | "" | Codex API key. |
|
||||
| `codex-api-key.base-url` | string | "" | Custom Codex API endpoint, if you use a third-party API endpoint. |
|
||||
| `claude-api-key` | object | {} | List of Claude API keys. |
|
||||
| `claude-api-key.api-key` | string | "" | Claude API key. |
|
||||
| `claude-api-key.base-url` | string | "" | Custom Claude API endpoint, if you use a third-party API endpoint. |
|
||||
| `openai-compatibility` | object[] | [] | Upstream OpenAI-compatible providers configuration (name, base-url, api-keys, models). |
|
||||
| `openai-compatibility.*.name` | string | "" | The name of the provider. It will be used in the user agent and other places. |
|
||||
| `openai-compatibility.*.base-url` | string | "" | The base URL of the provider. |
|
||||
| `openai-compatibility.*.api-keys` | string[] | [] | The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed. |
|
||||
| `openai-compatibility.*.models` | object[] | [] | The actual model name. |
|
||||
| `openai-compatibility.*.models.*.name` | string | "" | The models supported by the provider. |
|
||||
| `openai-compatibility.*.models.*.alias` | string | "" | The alias used in the API. |
|
||||
| `gemini-web` | object | {} | Configuration specific to the Gemini Web client. |
|
||||
| `gemini-web.context` | boolean | true | Enables conversation context reuse for continuous dialogue. |
|
||||
| `gemini-web.code-mode` | boolean | false | Enables code mode for optimized responses in coding-related tasks. |
|
||||
| `gemini-web.max-chars-per-request` | integer | 1,000,000 | The maximum number of characters to send to Gemini Web in a single request. |
|
||||
| `gemini-web.disable-continuation-hint` | boolean | false | Disables the continuation hint for split prompts. |
|
||||
|
||||
### Example Configuration File
|
||||
|
||||
```yaml
|
||||
# Server port
|
||||
port: 8317
|
||||
|
||||
# Management API settings
|
||||
remote-management:
|
||||
# Whether to allow remote (non-localhost) management access.
|
||||
# When false, only localhost can access management endpoints (a key is still required).
|
||||
allow-remote: false
|
||||
|
||||
# Management key. If a plaintext value is provided here, it will be hashed on startup.
|
||||
# All management requests (even from localhost) require this key.
|
||||
# Leave empty to disable the Management API entirely (404 for all /v0/management routes).
|
||||
secret-key: ""
|
||||
|
||||
# Authentication directory (supports ~ for home directory). If you use Windows, please set the directory like this: `C:/cli-proxy-api/`
|
||||
auth-dir: "~/.cli-proxy-api"
|
||||
|
||||
# Enable debug logging
|
||||
debug: false
|
||||
|
||||
# Proxy URL. Supports socks5/http/https protocols. Example: socks5://user:pass@192.168.1.1:1080/
|
||||
proxy-url: ""
|
||||
|
||||
# Number of times to retry a request. Retries will occur if the HTTP response code is 403, 408, 500, 502, 503, or 504.
|
||||
request-retry: 3
|
||||
|
||||
# Quota exceeded behavior
|
||||
quota-exceeded:
|
||||
switch-project: true # Whether to automatically switch to another project when a quota is exceeded
|
||||
switch-preview-model: true # Whether to automatically switch to a preview model when a quota is exceeded
|
||||
|
||||
# Gemini Web client configuration
|
||||
gemini-web:
|
||||
context: true # Enable conversation context reuse
|
||||
code-mode: false # Enable code mode
|
||||
max-chars-per-request: 1000000 # Max characters per request
|
||||
|
||||
# Request authentication providers
|
||||
auth:
|
||||
providers:
|
||||
- name: "default"
|
||||
type: "config-api-key"
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
|
||||
# API keys for official Generative Language API
|
||||
generative-language-api-key:
|
||||
- "AIzaSy...01"
|
||||
- "AIzaSy...02"
|
||||
- "AIzaSy...03"
|
||||
- "AIzaSy...04"
|
||||
|
||||
# Codex API keys
|
||||
codex-api-key:
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||
|
||||
# Claude API keys
|
||||
claude-api-key:
|
||||
- api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||
|
||||
# OpenAI compatibility providers
|
||||
openai-compatibility:
|
||||
- name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||
base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||
api-keys: # The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed.
|
||||
- "sk-or-v1-...b780"
|
||||
- "sk-or-v1-...b781"
|
||||
models: # The models supported by the provider.
|
||||
- name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||
alias: "kimi-k2" # The alias used in the API.
|
||||
```
|
||||
|
||||
### OpenAI Compatibility Providers
|
||||
|
||||
Configure upstream OpenAI-compatible providers (e.g., OpenRouter) via `openai-compatibility`.
|
||||
|
||||
- name: provider identifier used internally
|
||||
- base-url: provider base URL
|
||||
- api-keys: optional list of API keys (omit if provider allows unauthenticated requests)
|
||||
- models: list of mappings from upstream model `name` to local `alias`
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
openai-compatibility:
|
||||
- name: "openrouter"
|
||||
base-url: "https://openrouter.ai/api/v1"
|
||||
api-keys:
|
||||
- "sk-or-v1-...b780"
|
||||
- "sk-or-v1-...b781"
|
||||
models:
|
||||
- name: "moonshotai/kimi-k2:free"
|
||||
alias: "kimi-k2"
|
||||
```
|
||||
|
||||
Usage:
|
||||
|
||||
Call OpenAI's endpoint `/v1/chat/completions` with `model` set to the alias (e.g., `kimi-k2`). The proxy routes to the configured provider/model automatically.
|
||||
|
||||
Also, you may call Claude's endpoint `/v1/messages`, Gemini's `/v1beta/models/model-name:streamGenerateContent` or `/v1beta/models/model-name:generateContent`.
|
||||
|
||||
And you can always use Gemini CLI with `CODE_ASSIST_ENDPOINT` set to `http://127.0.0.1:8317` for these OpenAI-compatible provider's models.
|
||||
|
||||
|
||||
### Authentication Directory
|
||||
|
||||
The `auth-dir` parameter specifies where authentication tokens are stored. When you run the login command, the application will create JSON files in this directory containing the authentication tokens for your Google accounts. Multiple accounts can be used for load balancing.
|
||||
|
||||
### Request Authentication Providers
|
||||
|
||||
Configure inbound authentication through the `auth.providers` section. The built-in `config-api-key` provider works with inline keys:
|
||||
|
||||
```
|
||||
auth:
|
||||
providers:
|
||||
- name: default
|
||||
type: config-api-key
|
||||
api-keys:
|
||||
- your-api-key-1
|
||||
```
|
||||
|
||||
Clients should send requests with an `Authorization: Bearer your-api-key-1` header (or `X-Goog-Api-Key`, `X-Api-Key`, or `?key=` as before). The legacy top-level `api-keys` array is still accepted and automatically synced to the default provider for backwards compatibility.
|
||||
|
||||
### Official Generative Language API
|
||||
|
||||
The `generative-language-api-key` parameter allows you to define a list of API keys that can be used to authenticate requests to the official Generative Language API.
|
||||
|
||||
## Hot Reloading
|
||||
|
||||
The server watches the config file and the `auth-dir` for changes and reloads clients and settings automatically. You can add or remove Gemini/OpenAI token JSON files while the server is running; no restart is required.
|
||||
|
||||
## Gemini CLI with multiple account load balancing
|
||||
|
||||
Start CLI Proxy API server, and then set the `CODE_ASSIST_ENDPOINT` environment variable to the URL of the CLI Proxy API server.
|
||||
|
||||
```bash
|
||||
export CODE_ASSIST_ENDPOINT="http://127.0.0.1:8317"
|
||||
```
|
||||
|
||||
The server will relay the `loadCodeAssist`, `onboardUser`, and `countTokens` requests. And automatically load balance the text generation requests between the multiple accounts.
|
||||
|
||||
> [!NOTE]
|
||||
> This feature only allows local access because there is currently no way to authenticate the requests.
|
||||
> 127.0.0.1 is hardcoded for load balancing.
|
||||
|
||||
## Claude Code with multiple account load balancing
|
||||
|
||||
Start CLI Proxy API server, and then set the `ANTHROPIC_BASE_URL`, `ANTHROPIC_AUTH_TOKEN`, `ANTHROPIC_MODEL`, `ANTHROPIC_SMALL_FAST_MODEL` environment variables.
|
||||
|
||||
Using Gemini models:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=gemini-2.5-pro
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=gemini-2.5-flash
|
||||
```
|
||||
|
||||
Using OpenAI GPT 5 models:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=gpt-5
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=gpt-5-minimal
|
||||
```
|
||||
|
||||
Using OpenAI GPT 5 Codex models:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=gpt-5-codex
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=gpt-5-codex-low
|
||||
```
|
||||
|
||||
Using Claude models:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=claude-sonnet-4-20250514
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=claude-3-5-haiku-20241022
|
||||
```
|
||||
|
||||
Using Qwen models:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=qwen3-coder-plus
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-coder-flash
|
||||
```
|
||||
|
||||
## Codex with multiple account load balancing
|
||||
|
||||
Start CLI Proxy API server, and then edit the `~/.codex/config.toml` and `~/.codex/auth.json` files.
|
||||
|
||||
config.toml:
|
||||
```toml
|
||||
model_provider = "cliproxyapi"
|
||||
model = "gpt-5-codex" # Or gpt-5, you can also use any of the models that we support.
|
||||
model_reasoning_effort = "high"
|
||||
|
||||
[model_providers.cliproxyapi]
|
||||
name = "cliproxyapi"
|
||||
base_url = "http://127.0.0.1:8317/v1"
|
||||
wire_api = "responses"
|
||||
```
|
||||
|
||||
auth.json:
|
||||
```json
|
||||
{
|
||||
"OPENAI_API_KEY": "sk-dummy"
|
||||
}
|
||||
```
|
||||
|
||||
## Run with Docker
|
||||
|
||||
Run the following command to login (Gemini OAuth on port 8085):
|
||||
|
||||
```bash
|
||||
docker run --rm -p 8085:8085 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --login
|
||||
```
|
||||
|
||||
Run the following command to login (Gemini Web Cookies):
|
||||
|
||||
```bash
|
||||
docker run -it --rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --gemini-web-auth
|
||||
```
|
||||
|
||||
Run the following command to login (OpenAI OAuth on port 1455):
|
||||
|
||||
```bash
|
||||
docker run --rm -p 1455:1455 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --codex-login
|
||||
```
|
||||
|
||||
Run the following command to logi (Claude OAuth on port 54545):
|
||||
|
||||
```bash
|
||||
docker run -rm -p 54545:54545 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --claude-login
|
||||
```
|
||||
|
||||
Run the following command to login (Qwen OAuth):
|
||||
|
||||
```bash
|
||||
docker run -it -rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --qwen-login
|
||||
```
|
||||
|
||||
Run the following command to start the server:
|
||||
|
||||
```bash
|
||||
docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest
|
||||
```
|
||||
|
||||
## Run with Docker Compose
|
||||
|
||||
1. Clone the repository and navigate into the directory:
|
||||
```bash
|
||||
git clone https://github.com/luispater/CLIProxyAPI.git
|
||||
cd CLIProxyAPI
|
||||
```
|
||||
|
||||
2. Prepare the configuration file:
|
||||
Create a `config.yaml` file by copying the example and customize it to your needs.
|
||||
```bash
|
||||
cp config.example.yaml config.yaml
|
||||
```
|
||||
*(Note for Windows users: You can use `copy config.example.yaml config.yaml` in CMD or PowerShell.)*
|
||||
|
||||
3. Start the service:
|
||||
- **For most users (recommended):**
|
||||
Run the following command to start the service using the pre-built image from Docker Hub. The service will run in the background.
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
- **For advanced users:**
|
||||
If you have modified the source code and need to build a new image, use the interactive helper scripts:
|
||||
- For Windows (PowerShell):
|
||||
```powershell
|
||||
.\docker-build.ps1
|
||||
```
|
||||
- For Linux/macOS:
|
||||
```bash
|
||||
bash docker-build.sh
|
||||
```
|
||||
The script will prompt you to choose how to run the application:
|
||||
- **Option 1: Run using Pre-built Image (Recommended)**: Pulls the latest official image from the registry and starts the container. This is the easiest way to get started.
|
||||
- **Option 2: Build from Source and Run (For Developers)**: Builds the image from the local source code, tags it as `cli-proxy-api:local`, and then starts the container. This is useful if you are making changes to the source code.
|
||||
|
||||
4. To authenticate with providers, run the login command inside the container:
|
||||
- **Gemini**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --login
|
||||
```
|
||||
- **Gemini Web**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI --gemini-web-auth
|
||||
```
|
||||
- **OpenAI (Codex)**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --codex-login
|
||||
```
|
||||
- **Claude**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --claude-login
|
||||
```
|
||||
- **Qwen**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --qwen-login
|
||||
```
|
||||
|
||||
5. To view the server logs:
|
||||
```bash
|
||||
docker compose logs -f
|
||||
```
|
||||
|
||||
6. To stop the application:
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
CLIProxyAPI Guides: [https://help.router-for.me/](https://help.router-for.me/)
|
||||
|
||||
## Management API
|
||||
|
||||
see [MANAGEMENT_API.md](MANAGEMENT_API.md)
|
||||
see [MANAGEMENT_API.md](https://help.router-for.me/management/api)
|
||||
|
||||
## Amp CLI Support
|
||||
|
||||
CLIProxyAPI includes integrated support for [Amp CLI](https://ampcode.com) and Amp IDE extensions, enabling you to use your Google/ChatGPT/Claude OAuth subscriptions with Amp's coding tools:
|
||||
|
||||
- Provider route aliases for Amp's API patterns (`/api/provider/{provider}/v1...`)
|
||||
- Management proxy for OAuth authentication and account features
|
||||
- Smart model fallback with automatic routing
|
||||
- **Model mapping** to route unavailable models to alternatives (e.g., `claude-opus-4.5` → `claude-sonnet-4`)
|
||||
- Security-first design with localhost-only management endpoints
|
||||
|
||||
**→ [Complete Amp CLI Integration Guide](https://help.router-for.me/agent-client/amp-cli.html)**
|
||||
|
||||
## SDK Docs
|
||||
|
||||
- Usage: `docs/sdk-usage.md` (中文: `docs/sdk-usage_CN.md`)
|
||||
- Advanced (executors & translators): `docs/sdk-advanced.md` (中文: `docs/sdk-advanced_CN.md`)
|
||||
- Usage: [docs/sdk-usage.md](docs/sdk-usage.md)
|
||||
- Advanced (executors & translators): [docs/sdk-advanced.md](docs/sdk-advanced.md)
|
||||
- Access: [docs/sdk-access.md](docs/sdk-access.md)
|
||||
- Watcher: [docs/sdk-watcher.md](docs/sdk-watcher.md)
|
||||
- Custom Provider Example: `examples/custom-provider`
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -639,6 +94,68 @@ Contributions are welcome! Please feel free to submit a Pull Request.
|
||||
4. Push to the branch (`git push origin feature/amazing-feature`)
|
||||
5. Open a Pull Request
|
||||
|
||||
## Who is with us?
|
||||
|
||||
Those projects are based on CLIProxyAPI:
|
||||
|
||||
### [vibeproxy](https://github.com/automazeio/vibeproxy)
|
||||
|
||||
Native macOS menu bar app to use your Claude Code & ChatGPT subscriptions with AI coding tools - no API keys needed
|
||||
|
||||
### [Subtitle Translator](https://github.com/VjayC/SRT-Subtitle-Translator-Validator)
|
||||
|
||||
Browser-based tool to translate SRT subtitles using your Gemini subscription via CLIProxyAPI with automatic validation/error correction - no API keys needed
|
||||
|
||||
### [CCS (Claude Code Switch)](https://github.com/kaitranntt/ccs)
|
||||
|
||||
CLI wrapper for instant switching between multiple Claude accounts and alternative models (Gemini, Codex, Antigravity) via CLIProxyAPI OAuth - no API keys needed
|
||||
|
||||
### [ProxyPal](https://github.com/heyhuynhgiabuu/proxypal)
|
||||
|
||||
Native macOS GUI for managing CLIProxyAPI: configure providers, model mappings, and endpoints via OAuth - no API keys needed.
|
||||
|
||||
### [Quotio](https://github.com/nguyenphutrong/quotio)
|
||||
|
||||
Native macOS menu bar app that unifies Claude, Gemini, OpenAI, Qwen, and Antigravity subscriptions with real-time quota tracking and smart auto-failover for AI coding tools like Claude Code, OpenCode, and Droid - no API keys needed.
|
||||
|
||||
### [CodMate](https://github.com/loocor/CodMate)
|
||||
|
||||
Native macOS SwiftUI app for managing CLI AI sessions (Codex, Claude Code, Gemini CLI) with unified provider management, Git review, project organization, global search, and terminal integration. Integrates CLIProxyAPI to provide OAuth authentication for Codex, Claude, Gemini, Antigravity, and Qwen Code, with built-in and third-party provider rerouting through a single proxy endpoint - no API keys needed for OAuth providers.
|
||||
|
||||
### [ProxyPilot](https://github.com/Finesssee/ProxyPilot)
|
||||
|
||||
Windows-native CLIProxyAPI fork with TUI, system tray, and multi-provider OAuth for AI coding tools - no API keys needed.
|
||||
|
||||
### [Claude Proxy VSCode](https://github.com/uzhao/claude-proxy-vscode)
|
||||
|
||||
VSCode extension for quick switching between Claude Code models, featuring integrated CLIProxyAPI as its backend with automatic background lifecycle management.
|
||||
|
||||
### [ZeroLimit](https://github.com/0xtbug/zero-limit)
|
||||
|
||||
Windows desktop app built with Tauri + React for monitoring AI coding assistant quotas via CLIProxyAPI. Track usage across Gemini, Claude, OpenAI Codex, and Antigravity accounts with real-time dashboard, system tray integration, and one-click proxy control - no API keys needed.
|
||||
|
||||
### [CPA-XXX Panel](https://github.com/ferretgeek/CPA-X)
|
||||
|
||||
A lightweight web admin panel for CLIProxyAPI with health checks, resource monitoring, real-time logs, auto-update, request statistics and pricing display. Supports one-click installation and systemd service.
|
||||
|
||||
### [CLIProxyAPI Tray](https://github.com/kitephp/CLIProxyAPI_Tray)
|
||||
|
||||
A Windows tray application implemented using PowerShell scripts, without relying on any third-party libraries. The main features include: automatic creation of shortcuts, silent running, password management, channel switching (Main / Plus), and automatic downloading and updating.
|
||||
|
||||
> [!NOTE]
|
||||
> If you developed a project based on CLIProxyAPI, please open a PR to add it to this list.
|
||||
|
||||
## More choices
|
||||
|
||||
Those projects are ports of CLIProxyAPI or inspired by it:
|
||||
|
||||
### [9Router](https://github.com/decolua/9router)
|
||||
|
||||
A Next.js implementation inspired by CLIProxyAPI, easy to install and use, built from scratch with format translation (OpenAI/Claude/Gemini/Ollama), combo system with auto-fallback, multi-account management with exponential backoff, a Next.js web dashboard, and support for CLI tools (Cursor, Claude Code, Cline, RooCode) - no API keys needed.
|
||||
|
||||
> [!NOTE]
|
||||
> If you have developed a port of CLIProxyAPI or a project inspired by it, please open a PR to add it to this list.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
||||
|
||||
724
README_CN.md
724
README_CN.md
@@ -1,23 +1,3 @@
|
||||
# 写给所有中国网友的
|
||||
|
||||
对于项目前期的确有很多用户使用上遇到各种各样的奇怪问题,大部分是因为配置或我说明文档不全导致的。
|
||||
|
||||
对说明文档我已经尽可能的修补,有些重要的地方我甚至已经写到了打包的配置文件里。
|
||||
|
||||
已经写在 README 中的功能,都是**可用**的,经过**验证**的,并且我自己**每天**都在使用的。
|
||||
|
||||
可能在某些场景中使用上效果并不是很出色,但那基本上是模型和工具的原因,比如用 Claude Code 的时候,有的模型就无法正确使用工具,比如 Gemini,就在 Claude Code 和 Codex 的下使用的相当扭捏,有时能完成大部分工作,但有时候却只说不做。
|
||||
|
||||
目前来说 Claude 和 GPT-5 是目前使用各种第三方CLI工具运用的最好的模型,我自己也是多个账号做均衡负载使用。
|
||||
|
||||
实事求是的说,最初的几个版本我根本就没有中文文档,我至今所有文档也都是使用英文更新让后让 Gemini 翻译成中文的。但是无论如何都不会出现中文文档无法理解的问题。因为所有的中英文文档我都是再三校对,并且发现未及时更改的更新的地方都快速更新掉了。
|
||||
|
||||
最后,烦请在发 Issue 之前请认真阅读这篇文档。
|
||||
|
||||
另外中文需要交流的用户可以加 QQ 群:188637136
|
||||
|
||||
或 Telegram 群:https://t.me/CLIProxyAPI
|
||||
|
||||
# CLI 代理 API
|
||||
|
||||
[English](README.md) | 中文
|
||||
@@ -28,7 +8,31 @@
|
||||
|
||||
您可以使用本地或多账户的CLI方式,通过任何与 OpenAI(包括Responses)/Gemini/Claude 兼容的客户端和SDK进行访问。
|
||||
|
||||
现已新增首个中国提供商:[Qwen Code](https://github.com/QwenLM/qwen-code)。
|
||||
## 赞助商
|
||||
|
||||
[](https://www.bigmodel.cn/claude-code?ic=RRVJPB5SII)
|
||||
|
||||
本项目由 Z智谱 提供赞助, 他们通过 GLM CODING PLAN 对本项目提供技术支持。
|
||||
|
||||
GLM CODING PLAN 是专为AI编码打造的订阅套餐,每月最低仅需20元,即可在十余款主流AI编码工具如 Claude Code、Cline、Roo Code 中畅享智谱旗舰模型GLM-4.7,为开发者提供顶尖的编码体验。
|
||||
|
||||
智谱AI为本软件提供了特别优惠,使用以下链接购买可以享受九折优惠:https://www.bigmodel.cn/claude-code?ic=RRVJPB5SII
|
||||
|
||||
---
|
||||
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td width="180"><a href="https://www.packyapi.com/register?aff=cliproxyapi"><img src="./assets/packycode.png" alt="PackyCode" width="150"></a></td>
|
||||
<td>感谢 PackyCode 对本项目的赞助!PackyCode 是一家可靠高效的 API 中转服务商,提供 Claude Code、Codex、Gemini 等多种服务的中转。PackyCode 为本软件用户提供了特别优惠:使用<a href="https://www.packyapi.com/register?aff=cliproxyapi">此链接</a>注册,并在充值时输入 "cliproxyapi" 优惠码即可享受九折优惠。</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="180"><a href="https://cubence.com/signup?code=CLIPROXYAPI&source=cpa"><img src="./assets/cubence.png" alt="Cubence" width="150"></a></td>
|
||||
<td>感谢 Cubence 对本项目的赞助!Cubence 是一家可靠高效的 API 中转服务商,提供 Claude Code、Codex、Gemini 等多种服务的中转。Cubence 为本软件用户提供了特别优惠:使用<a href="https://cubence.com/signup?code=CLIPROXYAPI&source=cpa">此链接</a>注册,并在充值时输入 "CLIPROXYAPI" 优惠码即可享受九折优惠。</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
|
||||
## 功能特性
|
||||
|
||||
@@ -36,607 +40,47 @@
|
||||
- 新增 OpenAI Codex(GPT 系列)支持(OAuth 登录)
|
||||
- 新增 Claude Code 支持(OAuth 登录)
|
||||
- 新增 Qwen Code 支持(OAuth 登录)
|
||||
- 新增 Gemini Web 支持(通过 Cookie 登录)
|
||||
- 新增 iFlow 支持(OAuth 登录)
|
||||
- 支持流式与非流式响应
|
||||
- 函数调用/工具支持
|
||||
- 多模态输入(文本、图片)
|
||||
- 多账户支持与轮询负载均衡(Gemini、OpenAI、Claude 与 Qwen)
|
||||
- 简单的 CLI 身份验证流程(Gemini、OpenAI、Claude 与 Qwen)
|
||||
- 多账户支持与轮询负载均衡(Gemini、OpenAI、Claude、Qwen 与 iFlow)
|
||||
- 简单的 CLI 身份验证流程(Gemini、OpenAI、Claude、Qwen 与 iFlow)
|
||||
- 支持 Gemini AIStudio API 密钥
|
||||
- 支持 AI Studio Build 多账户轮询
|
||||
- 支持 Gemini CLI 多账户轮询
|
||||
- 支持 Claude Code 多账户轮询
|
||||
- 支持 Qwen Code 多账户轮询
|
||||
- 支持 iFlow 多账户轮询
|
||||
- 支持 OpenAI Codex 多账户轮询
|
||||
- 通过配置接入上游 OpenAI 兼容提供商(例如 OpenRouter)
|
||||
- 可复用的 Go SDK(见 `docs/sdk-usage.md`)
|
||||
- 可复用的 Go SDK(见 `docs/sdk-usage_CN.md`)
|
||||
|
||||
## 安装
|
||||
## 新手入门
|
||||
|
||||
### 前置要求
|
||||
|
||||
- Go 1.24 或更高版本
|
||||
- 有权访问 Gemini CLI 模型的 Google 账户(可选)
|
||||
- 有权访问 OpenAI Codex/GPT 的 OpenAI 账户(可选)
|
||||
- 有权访问 Claude Code 的 Anthropic 账户(可选)
|
||||
- 有权访问 Qwen Code 的 Qwen Chat 账户(可选)
|
||||
|
||||
### 从源码构建
|
||||
|
||||
1. 克隆仓库:
|
||||
```bash
|
||||
git clone https://github.com/luispater/CLIProxyAPI.git
|
||||
cd CLIProxyAPI
|
||||
```
|
||||
|
||||
2. 构建应用程序:
|
||||
```bash
|
||||
go build -o cli-proxy-api ./cmd/server
|
||||
```
|
||||
|
||||
## 使用方法
|
||||
|
||||
### 身份验证
|
||||
|
||||
您可以分别为 Gemini、OpenAI 和 Claude 进行身份验证,三者可同时存在于同一个 `auth-dir` 中并参与负载均衡。
|
||||
|
||||
- Gemini(Google):
|
||||
```bash
|
||||
./cli-proxy-api --login
|
||||
```
|
||||
如果您是现有的 Gemini Code 用户,可能需要指定一个项目ID:
|
||||
```bash
|
||||
./cli-proxy-api --login --project_id <your_project_id>
|
||||
```
|
||||
本地 OAuth 回调端口为 `8085`。
|
||||
|
||||
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。本地 OAuth 回调端口为 `8085`。
|
||||
|
||||
- Gemini Web (通过 Cookie):
|
||||
此方法通过模拟浏览器行为,使用从 Gemini 网站获取的 Cookie 进行身份验证。
|
||||
```bash
|
||||
./cli-proxy-api --gemini-web-auth
|
||||
```
|
||||
程序将提示您输入 `__Secure-1PSID` 和 `__Secure-1PSIDTS` 的值。请从您的浏览器开发者工具中获取这些 Cookie。
|
||||
|
||||
- OpenAI(Codex/GPT,OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --codex-login
|
||||
```
|
||||
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。本地 OAuth 回调端口为 `1455`。
|
||||
|
||||
- Claude(Anthropic,OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --claude-login
|
||||
```
|
||||
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。本地 OAuth 回调端口为 `54545`。
|
||||
|
||||
- Qwen(Qwen Chat,OAuth):
|
||||
```bash
|
||||
./cli-proxy-api --qwen-login
|
||||
```
|
||||
选项:加上 `--no-browser` 可打印登录地址而不自动打开浏览器。使用 Qwen Chat 的 OAuth 设备登录流程。
|
||||
|
||||
### 启动服务器
|
||||
|
||||
身份验证完成后,启动服务器:
|
||||
|
||||
```bash
|
||||
./cli-proxy-api
|
||||
```
|
||||
|
||||
默认情况下,服务器在端口 8317 上运行。
|
||||
|
||||
### API 端点
|
||||
|
||||
#### 列出模型
|
||||
|
||||
```
|
||||
GET http://localhost:8317/v1/models
|
||||
```
|
||||
|
||||
#### 聊天补全
|
||||
|
||||
```
|
||||
POST http://localhost:8317/v1/chat/completions
|
||||
```
|
||||
|
||||
请求体示例:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "gemini-2.5-pro",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "你好,你好吗?"
|
||||
}
|
||||
],
|
||||
"stream": true
|
||||
}
|
||||
```
|
||||
|
||||
说明:
|
||||
- 使用 "gemini-*" 模型(例如 "gemini-2.5-pro")来调用 Gemini,使用 "gpt-*" 模型(例如 "gpt-5")来调用 OpenAI,使用 "claude-*" 模型(例如 "claude-3-5-sonnet-20241022")来调用 Claude,或者使用 "qwen-*" 模型(例如 "qwen3-coder-plus")来调用 Qwen。代理服务会自动将请求路由到相应的提供商。
|
||||
|
||||
#### Claude 消息(SSE 兼容)
|
||||
|
||||
```
|
||||
POST http://localhost:8317/v1/messages
|
||||
```
|
||||
|
||||
### 与 OpenAI 库一起使用
|
||||
|
||||
您可以通过将基础 URL 设置为本地服务器来将此代理与任何 OpenAI 兼容的库一起使用:
|
||||
|
||||
#### Python(使用 OpenAI 库)
|
||||
|
||||
```python
|
||||
from openai import OpenAI
|
||||
|
||||
client = OpenAI(
|
||||
api_key="dummy", # 不使用但必需
|
||||
base_url="http://localhost:8317/v1"
|
||||
)
|
||||
|
||||
# Gemini 示例
|
||||
gemini = client.chat.completions.create(
|
||||
model="gemini-2.5-pro",
|
||||
messages=[{"role": "user", "content": "你好,你好吗?"}]
|
||||
)
|
||||
|
||||
# Codex/GPT 示例
|
||||
gpt = client.chat.completions.create(
|
||||
model="gpt-5",
|
||||
messages=[{"role": "user", "content": "用一句话总结这个项目"}]
|
||||
)
|
||||
|
||||
# Claude 示例(使用 messages 端点)
|
||||
import requests
|
||||
claude_response = requests.post(
|
||||
"http://localhost:8317/v1/messages",
|
||||
json={
|
||||
"model": "claude-3-5-sonnet-20241022",
|
||||
"messages": [{"role": "user", "content": "用一句话总结这个项目"}],
|
||||
"max_tokens": 1000
|
||||
}
|
||||
)
|
||||
|
||||
print(gemini.choices[0].message.content)
|
||||
print(gpt.choices[0].message.content)
|
||||
print(claude_response.json())
|
||||
```
|
||||
|
||||
#### JavaScript/TypeScript
|
||||
|
||||
```javascript
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const openai = new OpenAI({
|
||||
apiKey: 'dummy', // 不使用但必需
|
||||
baseURL: 'http://localhost:8317/v1',
|
||||
});
|
||||
|
||||
// Gemini
|
||||
const gemini = await openai.chat.completions.create({
|
||||
model: 'gemini-2.5-pro',
|
||||
messages: [{ role: 'user', content: '你好,你好吗?' }],
|
||||
});
|
||||
|
||||
// Codex/GPT
|
||||
const gpt = await openai.chat.completions.create({
|
||||
model: 'gpt-5',
|
||||
messages: [{ role: 'user', content: '用一句话总结这个项目' }],
|
||||
});
|
||||
|
||||
// Claude 示例(使用 messages 端点)
|
||||
const claudeResponse = await fetch('http://localhost:8317/v1/messages', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: 'claude-3-5-sonnet-20241022',
|
||||
messages: [{ role: 'user', content: '用一句话总结这个项目' }],
|
||||
max_tokens: 1000
|
||||
})
|
||||
});
|
||||
|
||||
console.log(gemini.choices[0].message.content);
|
||||
console.log(gpt.choices[0].message.content);
|
||||
console.log(await claudeResponse.json());
|
||||
```
|
||||
|
||||
## 支持的模型
|
||||
|
||||
- gemini-2.5-pro
|
||||
- gemini-2.5-flash
|
||||
- gemini-2.5-flash-lite
|
||||
- gpt-5
|
||||
- gpt-5-codex
|
||||
- claude-opus-4-1-20250805
|
||||
- claude-opus-4-20250514
|
||||
- claude-sonnet-4-20250514
|
||||
- claude-3-7-sonnet-20250219
|
||||
- claude-3-5-haiku-20241022
|
||||
- qwen3-coder-plus
|
||||
- qwen3-coder-flash
|
||||
- Gemini 模型在需要时自动切换到对应的 preview 版本
|
||||
|
||||
## 配置
|
||||
|
||||
服务器默认使用位于项目根目录的 YAML 配置文件(`config.yaml`)。您可以使用 `--config` 标志指定不同的配置文件路径:
|
||||
|
||||
```bash
|
||||
./cli-proxy-api --config /path/to/your/config.yaml
|
||||
```
|
||||
|
||||
### 配置选项
|
||||
|
||||
| 参数 | 类型 | 默认值 | 描述 |
|
||||
|-----------------------------------------|----------|--------------------|---------------------------------------------------------------------|
|
||||
| `port` | integer | 8317 | 服务器将监听的端口号。 |
|
||||
| `auth-dir` | string | "~/.cli-proxy-api" | 存储身份验证令牌的目录。支持使用 `~` 来表示主目录。如果你使用Windows,建议设置成`C:/cli-proxy-api/`。 |
|
||||
| `proxy-url` | string | "" | 代理URL。支持socks5/http/https协议。例如:socks5://user:pass@192.168.1.1:1080/ |
|
||||
| `request-retry` | integer | 0 | 请求重试次数。如果HTTP响应码为403、408、500、502、503或504,将会触发重试。 |
|
||||
| `remote-management.allow-remote` | boolean | false | 是否允许远程(非localhost)访问管理接口。为false时仅允许本地访问;本地访问同样需要管理密钥。 |
|
||||
| `remote-management.secret-key` | string | "" | 管理密钥。若配置为明文,启动时会自动进行bcrypt加密并写回配置文件。若为空,管理接口整体不可用(404)。 |
|
||||
| `quota-exceeded` | object | {} | 用于处理配额超限的配置。 |
|
||||
| `quota-exceeded.switch-project` | boolean | true | 当配额超限时,是否自动切换到另一个项目。 |
|
||||
| `quota-exceeded.switch-preview-model` | boolean | true | 当配额超限时,是否自动切换到预览模型。 |
|
||||
| `debug` | boolean | false | 启用调试模式以获取详细日志。 |
|
||||
| `auth` | object | {} | 请求鉴权配置。 |
|
||||
| `auth.providers` | object[] | [] | 鉴权提供方列表,内置 `config-api-key` 支持内联密钥。 |
|
||||
| `auth.providers.*.name` | string | "" | 提供方实例名称。 |
|
||||
| `auth.providers.*.type` | string | "" | 提供方实现标识(例如 `config-api-key`)。 |
|
||||
| `auth.providers.*.api-keys` | string[] | [] | `config-api-key` 提供方使用的内联密钥。 |
|
||||
| `api-keys` | string[] | [] | 兼容旧配置的简写,会自动同步到默认 `config-api-key` 提供方。 |
|
||||
| `generative-language-api-key` | string[] | [] | 生成式语言API密钥列表。 |
|
||||
| `codex-api-key` | object | {} | Codex API密钥列表。 |
|
||||
| `codex-api-key.api-key` | string | "" | Codex API密钥。 |
|
||||
| `codex-api-key.base-url` | string | "" | 自定义的Codex API端点 |
|
||||
| `claude-api-key` | object | {} | Claude API密钥列表。 |
|
||||
| `claude-api-key.api-key` | string | "" | Claude API密钥。 |
|
||||
| `claude-api-key.base-url` | string | "" | 自定义的Claude API端点,如果您使用第三方的API端点。 |
|
||||
| `openai-compatibility` | object[] | [] | 上游OpenAI兼容提供商的配置(名称、基础URL、API密钥、模型)。 |
|
||||
| `openai-compatibility.*.name` | string | "" | 提供商的名称。它将被用于用户代理(User Agent)和其他地方。 |
|
||||
| `openai-compatibility.*.base-url` | string | "" | 提供商的基础URL。 |
|
||||
| `openai-compatibility.*.api-keys` | string[] | [] | 提供商的API密钥。如果需要,可以添加多个密钥。如果允许未经身份验证的访问,则可以省略。 |
|
||||
| `openai-compatibility.*.models` | object[] | [] | 实际的模型名称。 |
|
||||
| `openai-compatibility.*.models.*.name` | string | "" | 提供商支持的模型。 |
|
||||
| `openai-compatibility.*.models.*.alias` | string | "" | 在API中使用的别名。 |
|
||||
| `gemini-web` | object | {} | Gemini Web 客户端的特定配置。 |
|
||||
| `gemini-web.context` | boolean | true | 是否启用会话上下文重用,以实现连续对话。 |
|
||||
| `gemini-web.code-mode` | boolean | false | 是否启用代码模式,优化代码相关任务的响应。 |
|
||||
| `gemini-web.max-chars-per-request` | integer | 1,000,000 | 单次请求发送给 Gemini Web 的最大字符数。 |
|
||||
| `gemini-web.disable-continuation-hint` | boolean | false | 当提示被拆分时,是否禁用连续提示的暗示。 |
|
||||
|
||||
### 配置文件示例
|
||||
|
||||
```yaml
|
||||
# 服务器端口
|
||||
port: 8317
|
||||
|
||||
# 管理 API 设置
|
||||
remote-management:
|
||||
# 是否允许远程(非localhost)访问管理接口。为false时仅允许本地访问(但本地访问同样需要管理密钥)。
|
||||
allow-remote: false
|
||||
|
||||
# 管理密钥。若配置为明文,启动时会自动进行bcrypt加密并写回配置文件。
|
||||
# 所有管理请求(包括本地)都需要该密钥。
|
||||
# 若为空,/v0/management 整体处于 404(禁用)。
|
||||
secret-key: ""
|
||||
|
||||
# 身份验证目录(支持 ~ 表示主目录)。如果你使用Windows,建议设置成`C:/cli-proxy-api/`。
|
||||
auth-dir: "~/.cli-proxy-api"
|
||||
|
||||
# 启用调试日志
|
||||
debug: false
|
||||
|
||||
# 代理URL。支持socks5/http/https协议。例如:socks5://user:pass@192.168.1.1:1080/
|
||||
proxy-url: ""
|
||||
|
||||
# 请求重试次数。如果HTTP响应码为403、408、500、502、503或504,将会触发重试。
|
||||
request-retry: 3
|
||||
|
||||
|
||||
# 配额超限行为
|
||||
quota-exceeded:
|
||||
switch-project: true # 当配额超限时是否自动切换到另一个项目
|
||||
switch-preview-model: true # 当配额超限时是否自动切换到预览模型
|
||||
|
||||
# Gemini Web 客户端配置
|
||||
gemini-web:
|
||||
context: true # 启用会话上下文重用
|
||||
code-mode: false # 启用代码模式
|
||||
max-chars-per-request: 1000000 # 单次请求最大字符数
|
||||
|
||||
# 请求鉴权提供方
|
||||
auth:
|
||||
providers:
|
||||
- name: "default"
|
||||
type: "config-api-key"
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
|
||||
# AIStduio Gemini API 的 API 密钥
|
||||
generative-language-api-key:
|
||||
- "AIzaSy...01"
|
||||
- "AIzaSy...02"
|
||||
- "AIzaSy...03"
|
||||
- "AIzaSy...04"
|
||||
|
||||
# Codex API 密钥
|
||||
codex-api-key:
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # 第三方 Codex API 中转服务端点
|
||||
|
||||
# Claude API 密钥
|
||||
claude-api-key:
|
||||
- api-key: "sk-atSM..." # 如果使用官方 Claude API,无需设置 base-url
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # 第三方 Claude API 中转服务端点
|
||||
|
||||
# OpenAI 兼容提供商
|
||||
openai-compatibility:
|
||||
- name: "openrouter" # 提供商的名称;它将被用于用户代理和其它地方。
|
||||
base-url: "https://openrouter.ai/api/v1" # 提供商的基础URL。
|
||||
api-keys: # 提供商的API密钥。如果需要,可以添加多个密钥。如果允许未经身份验证的访问,则可以省略。
|
||||
- "sk-or-v1-...b780"
|
||||
- "sk-or-v1-...b781"
|
||||
models: # 提供商支持的模型。
|
||||
- name: "moonshotai/kimi-k2:free" # 实际的模型名称。
|
||||
alias: "kimi-k2" # 在API中使用的别名。
|
||||
```
|
||||
|
||||
### OpenAI 兼容上游提供商
|
||||
|
||||
通过 `openai-compatibility` 配置上游 OpenAI 兼容提供商(例如 OpenRouter)。
|
||||
|
||||
- name:内部识别名
|
||||
- base-url:提供商基础地址
|
||||
- api-keys:可选,多密钥轮询(若提供商支持无鉴权可省略)
|
||||
- models:将上游模型 `name` 映射为本地可用 `alias`
|
||||
|
||||
示例:
|
||||
|
||||
```yaml
|
||||
openai-compatibility:
|
||||
- name: "openrouter"
|
||||
base-url: "https://openrouter.ai/api/v1"
|
||||
api-keys:
|
||||
- "sk-or-v1-...b780"
|
||||
- "sk-or-v1-...b781"
|
||||
models:
|
||||
- name: "moonshotai/kimi-k2:free"
|
||||
alias: "kimi-k2"
|
||||
```
|
||||
|
||||
使用方式:在 `/v1/chat/completions` 中将 `model` 设为别名(如 `kimi-k2`),代理将自动路由到对应提供商与模型。
|
||||
|
||||
并且,对于这些与OpenAI兼容的提供商模型,您始终可以通过将CODE_ASSIST_ENDPOINT设置为 http://127.0.0.1:8317 来使用Gemini CLI。
|
||||
|
||||
### 身份验证目录
|
||||
|
||||
`auth-dir` 参数指定身份验证令牌的存储位置。当您运行登录命令时,应用程序将在此目录中创建包含 Google 账户身份验证令牌的 JSON 文件。多个账户可用于轮询。
|
||||
|
||||
### 请求鉴权提供方
|
||||
|
||||
通过 `auth.providers` 配置接入请求鉴权。内置的 `config-api-key` 提供方支持内联密钥:
|
||||
|
||||
```
|
||||
auth:
|
||||
providers:
|
||||
- name: default
|
||||
type: config-api-key
|
||||
api-keys:
|
||||
- your-api-key-1
|
||||
```
|
||||
|
||||
调用时可在 `Authorization` 标头中携带密钥(或继续使用 `X-Goog-Api-Key`、`X-Api-Key`、查询参数 `key`)。为了兼容旧版本,顶层的 `api-keys` 字段仍然可用,并会自动同步到默认的 `config-api-key` 提供方。
|
||||
|
||||
### 官方生成式语言 API
|
||||
|
||||
`generative-language-api-key` 参数允许您定义可用于验证对官方 AIStudio Gemini API 请求的 API 密钥列表。
|
||||
|
||||
## 热更新
|
||||
|
||||
服务会监听配置文件与 `auth-dir` 目录的变化并自动重新加载客户端与配置。您可以在运行中新增/移除 Gemini/OpenAI 的令牌 JSON 文件,无需重启服务。
|
||||
|
||||
## Gemini CLI 多账户负载均衡
|
||||
|
||||
启动 CLI 代理 API 服务器,然后将 `CODE_ASSIST_ENDPOINT` 环境变量设置为 CLI 代理 API 服务器的 URL。
|
||||
|
||||
```bash
|
||||
export CODE_ASSIST_ENDPOINT="http://127.0.0.1:8317"
|
||||
```
|
||||
|
||||
服务器将中继 `loadCodeAssist`、`onboardUser` 和 `countTokens` 请求。并自动在多个账户之间轮询文本生成请求。
|
||||
|
||||
> [!NOTE]
|
||||
> 此功能仅允许本地访问,因为找不到一个可以验证请求的方法。
|
||||
> 所以只能强制只有 `127.0.0.1` 可以访问。
|
||||
|
||||
## Claude Code 的使用方法
|
||||
|
||||
启动 CLI Proxy API 服务器, 设置如下系统环境变量 `ANTHROPIC_BASE_URL`, `ANTHROPIC_AUTH_TOKEN`, `ANTHROPIC_MODEL`, `ANTHROPIC_SMALL_FAST_MODEL`
|
||||
|
||||
使用 Gemini 模型:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=gemini-2.5-pro
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=gemini-2.5-flash
|
||||
```
|
||||
|
||||
使用 OpenAI GPT 5 模型:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=gpt-5
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=gpt-5-minimal
|
||||
```
|
||||
|
||||
使用 OpenAI GPT 5 Codex 模型:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=gpt-5-codex
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=gpt-5-codex-low
|
||||
```
|
||||
|
||||
|
||||
使用 Claude 模型:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=claude-sonnet-4-20250514
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=claude-3-5-haiku-20241022
|
||||
```
|
||||
|
||||
使用 Qwen 模型:
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL=http://127.0.0.1:8317
|
||||
export ANTHROPIC_AUTH_TOKEN=sk-dummy
|
||||
export ANTHROPIC_MODEL=qwen3-coder-plus
|
||||
export ANTHROPIC_SMALL_FAST_MODEL=qwen3-coder-flash
|
||||
```
|
||||
|
||||
## Codex 多账户负载均衡
|
||||
|
||||
启动 CLI Proxy API 服务器, 修改 `~/.codex/config.toml` 和 `~/.codex/auth.json` 文件。
|
||||
|
||||
config.toml:
|
||||
```toml
|
||||
model_provider = "cliproxyapi"
|
||||
model = "gpt-5-codex" # 或者是gpt-5,你也可以使用任何我们支持的模型
|
||||
model_reasoning_effort = "high"
|
||||
|
||||
[model_providers.cliproxyapi]
|
||||
name = "cliproxyapi"
|
||||
base_url = "http://127.0.0.1:8317/v1"
|
||||
wire_api = "responses"
|
||||
```
|
||||
|
||||
auth.json:
|
||||
```json
|
||||
{
|
||||
"OPENAI_API_KEY": "sk-dummy"
|
||||
}
|
||||
```
|
||||
|
||||
## 使用 Docker 运行
|
||||
|
||||
运行以下命令进行登录(Gemini OAuth,端口 8085):
|
||||
|
||||
```bash
|
||||
docker run --rm -p 8085:8085 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --login
|
||||
```
|
||||
|
||||
运行以下命令进行登录(Gemini Web Cookie):
|
||||
|
||||
```bash
|
||||
docker run -it --rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --gemini-web-auth
|
||||
```
|
||||
|
||||
运行以下命令进行登录(OpenAI OAuth,端口 1455):
|
||||
|
||||
```bash
|
||||
docker run --rm -p 1455:1455 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --codex-login
|
||||
```
|
||||
|
||||
运行以下命令进行登录(Claude OAuth,端口 54545):
|
||||
|
||||
```bash
|
||||
docker run --rm -p 54545:54545 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --claude-login
|
||||
```
|
||||
|
||||
运行以下命令进行登录(Qwen OAuth):
|
||||
|
||||
```bash
|
||||
docker run -it -rm -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest /CLIProxyAPI/CLIProxyAPI --qwen-login
|
||||
```
|
||||
|
||||
|
||||
运行以下命令启动服务器:
|
||||
|
||||
```bash
|
||||
docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.yaml -v /path/to/your/auth-dir:/root/.cli-proxy-api eceasy/cli-proxy-api:latest
|
||||
```
|
||||
|
||||
## 使用 Docker Compose 运行
|
||||
|
||||
1. 克隆仓库并进入目录:
|
||||
```bash
|
||||
git clone https://github.com/luispater/CLIProxyAPI.git
|
||||
cd CLIProxyAPI
|
||||
```
|
||||
|
||||
2. 准备配置文件:
|
||||
通过复制示例文件来创建 `config.yaml` 文件,并根据您的需求进行自定义。
|
||||
```bash
|
||||
cp config.example.yaml config.yaml
|
||||
```
|
||||
*(Windows 用户请注意:您可以在 CMD 或 PowerShell 中使用 `copy config.example.yaml config.yaml`。)*
|
||||
|
||||
3. 启动服务:
|
||||
- **适用于大多数用户(推荐):**
|
||||
运行以下命令,使用 Docker Hub 上的预构建镜像启动服务。服务将在后台运行。
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
- **适用于进阶用户:**
|
||||
如果您修改了源代码并需要构建新镜像,请使用交互式辅助脚本:
|
||||
- 对于 Windows (PowerShell):
|
||||
```powershell
|
||||
.\docker-build.ps1
|
||||
```
|
||||
- 对于 Linux/macOS:
|
||||
```bash
|
||||
bash docker-build.sh
|
||||
```
|
||||
脚本将提示您选择运行方式:
|
||||
- **选项 1:使用预构建的镜像运行 (推荐)**:从镜像仓库拉取最新的官方镜像并启动容器。这是最简单的开始方式。
|
||||
- **选项 2:从源码构建并运行 (适用于开发者)**:从本地源代码构建镜像,将其标记为 `cli-proxy-api:local`,然后启动容器。如果您需要修改源代码,此选项很有用。
|
||||
|
||||
4. 要在容器内运行登录命令进行身份验证:
|
||||
- **Gemini**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --login
|
||||
```
|
||||
- **Gemini Web**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI --gemini-web-auth
|
||||
```
|
||||
- **OpenAI (Codex)**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --codex-login
|
||||
```
|
||||
- **Claude**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --claude-login
|
||||
```
|
||||
- **Qwen**:
|
||||
```bash
|
||||
docker compose exec cli-proxy-api /CLIProxyAPI/CLIProxyAPI -no-browser --qwen-login
|
||||
```
|
||||
|
||||
5. 查看服务器日志:
|
||||
```bash
|
||||
docker compose logs -f
|
||||
```
|
||||
|
||||
6. 停止应用程序:
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
CLIProxyAPI 用户手册: [https://help.router-for.me/](https://help.router-for.me/cn/)
|
||||
|
||||
## 管理 API 文档
|
||||
|
||||
请参见 [MANAGEMENT_API_CN.md](MANAGEMENT_API_CN.md)
|
||||
请参见 [MANAGEMENT_API_CN.md](https://help.router-for.me/cn/management/api)
|
||||
|
||||
## Amp CLI 支持
|
||||
|
||||
CLIProxyAPI 已内置对 [Amp CLI](https://ampcode.com) 和 Amp IDE 扩展的支持,可让你使用自己的 Google/ChatGPT/Claude OAuth 订阅来配合 Amp 编码工具:
|
||||
|
||||
- 提供商路由别名,兼容 Amp 的 API 路径模式(`/api/provider/{provider}/v1...`)
|
||||
- 管理代理,处理 OAuth 认证和账号功能
|
||||
- 智能模型回退与自动路由
|
||||
- 以安全为先的设计,管理端点仅限 localhost
|
||||
|
||||
**→ [Amp CLI 完整集成指南](https://help.router-for.me/cn/agent-client/amp-cli.html)**
|
||||
|
||||
## SDK 文档
|
||||
|
||||
- 使用文档:`docs/sdk-usage_CN.md`(English: `docs/sdk-usage.md`)
|
||||
- 高级(执行器与翻译器):`docs/sdk-advanced_CN.md`(English: `docs/sdk-advanced.md`)
|
||||
- 使用文档:[docs/sdk-usage_CN.md](docs/sdk-usage_CN.md)
|
||||
- 高级(执行器与翻译器):[docs/sdk-advanced_CN.md](docs/sdk-advanced_CN.md)
|
||||
- 认证: [docs/sdk-access_CN.md](docs/sdk-access_CN.md)
|
||||
- 凭据加载/更新: [docs/sdk-watcher_CN.md](docs/sdk-watcher_CN.md)
|
||||
- 自定义 Provider 示例:`examples/custom-provider`
|
||||
|
||||
## 贡献
|
||||
@@ -649,6 +93,76 @@ docker run --rm -p 8317:8317 -v /path/to/your/config.yaml:/CLIProxyAPI/config.ya
|
||||
4. 推送到分支(`git push origin feature/amazing-feature`)
|
||||
5. 打开 Pull Request
|
||||
|
||||
## 谁与我们在一起?
|
||||
|
||||
这些项目基于 CLIProxyAPI:
|
||||
|
||||
### [vibeproxy](https://github.com/automazeio/vibeproxy)
|
||||
|
||||
一个原生 macOS 菜单栏应用,让您可以使用 Claude Code & ChatGPT 订阅服务和 AI 编程工具,无需 API 密钥。
|
||||
|
||||
### [Subtitle Translator](https://github.com/VjayC/SRT-Subtitle-Translator-Validator)
|
||||
|
||||
一款基于浏览器的 SRT 字幕翻译工具,可通过 CLI 代理 API 使用您的 Gemini 订阅。内置自动验证与错误修正功能,无需 API 密钥。
|
||||
|
||||
### [CCS (Claude Code Switch)](https://github.com/kaitranntt/ccs)
|
||||
|
||||
CLI 封装器,用于通过 CLIProxyAPI OAuth 即时切换多个 Claude 账户和替代模型(Gemini, Codex, Antigravity),无需 API 密钥。
|
||||
|
||||
### [ProxyPal](https://github.com/heyhuynhgiabuu/proxypal)
|
||||
|
||||
基于 macOS 平台的原生 CLIProxyAPI GUI:配置供应商、模型映射以及OAuth端点,无需 API 密钥。
|
||||
|
||||
### [Quotio](https://github.com/nguyenphutrong/quotio)
|
||||
|
||||
原生 macOS 菜单栏应用,统一管理 Claude、Gemini、OpenAI、Qwen 和 Antigravity 订阅,提供实时配额追踪和智能自动故障转移,支持 Claude Code、OpenCode 和 Droid 等 AI 编程工具,无需 API 密钥。
|
||||
|
||||
### [CodMate](https://github.com/loocor/CodMate)
|
||||
|
||||
原生 macOS SwiftUI 应用,用于管理 CLI AI 会话(Claude Code、Codex、Gemini CLI),提供统一的提供商管理、Git 审查、项目组织、全局搜索和终端集成。集成 CLIProxyAPI 为 Codex、Claude、Gemini、Antigravity 和 Qwen Code 提供统一的 OAuth 认证,支持内置和第三方提供商通过单一代理端点重路由 - OAuth 提供商无需 API 密钥。
|
||||
|
||||
### [ProxyPilot](https://github.com/Finesssee/ProxyPilot)
|
||||
|
||||
原生 Windows CLIProxyAPI 分支,集成 TUI、系统托盘及多服务商 OAuth 认证,专为 AI 编程工具打造,无需 API 密钥。
|
||||
|
||||
### [Claude Proxy VSCode](https://github.com/uzhao/claude-proxy-vscode)
|
||||
|
||||
一款 VSCode 扩展,提供了在 VSCode 中快速切换 Claude Code 模型的功能,内置 CLIProxyAPI 作为其后端,支持后台自动启动和关闭。
|
||||
|
||||
### [ZeroLimit](https://github.com/0xtbug/zero-limit)
|
||||
|
||||
Windows 桌面应用,基于 Tauri + React 构建,用于通过 CLIProxyAPI 监控 AI 编程助手配额。支持跨 Gemini、Claude、OpenAI Codex 和 Antigravity 账户的使用量追踪,提供实时仪表盘、系统托盘集成和一键代理控制,无需 API 密钥。
|
||||
|
||||
### [CPA-XXX Panel](https://github.com/ferretgeek/CPA-X)
|
||||
|
||||
面向 CLIProxyAPI 的 Web 管理面板,提供健康检查、资源监控、日志查看、自动更新、请求统计与定价展示,支持一键安装与 systemd 服务。
|
||||
|
||||
> [!NOTE]
|
||||
> 如果你开发了基于 CLIProxyAPI 的项目,请提交一个 PR(拉取请求)将其添加到此列表中。
|
||||
|
||||
## 更多选择
|
||||
|
||||
以下项目是 CLIProxyAPI 的移植版或受其启发:
|
||||
|
||||
### [9Router](https://github.com/decolua/9router)
|
||||
|
||||
基于 Next.js 的实现,灵感来自 CLIProxyAPI,易于安装使用;自研格式转换(OpenAI/Claude/Gemini/Ollama)、组合系统与自动回退、多账户管理(指数退避)、Next.js Web 控制台,并支持 Cursor、Claude Code、Cline、RooCode 等 CLI 工具,无需 API 密钥。
|
||||
|
||||
### [CLIProxyAPI Tray](https://github.com/kitephp/CLIProxyAPI_Tray)
|
||||
|
||||
Windows 托盘应用,基于 PowerShell 脚本实现,不依赖任何第三方库。主要功能包括:自动创建快捷方式、静默运行、密码管理、通道切换(Main / Plus)以及自动下载与更新。
|
||||
|
||||
> [!NOTE]
|
||||
> 如果你开发了 CLIProxyAPI 的移植或衍生项目,请提交 PR 将其添加到此列表中。
|
||||
|
||||
## 许可证
|
||||
|
||||
此项目根据 MIT 许可证授权 - 有关详细信息,请参阅 [LICENSE](LICENSE) 文件。
|
||||
|
||||
## 写给所有中国网友的
|
||||
|
||||
QQ 群:188637136
|
||||
|
||||
或
|
||||
|
||||
Telegram 群:https://t.me/CLIProxyAPI
|
||||
|
||||
BIN
assets/cubence.png
Normal file
BIN
assets/cubence.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 51 KiB |
BIN
assets/packycode.png
Normal file
BIN
assets/packycode.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 8.1 KiB |
@@ -4,134 +4,111 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/joho/godotenv"
|
||||
configaccess "github.com/router-for-me/CLIProxyAPI/v6/internal/access/config_access"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/buildinfo"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/cmd"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/managementasset"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/store"
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/internal/translator"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/usage"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
Version = "dev"
|
||||
Commit = "none"
|
||||
BuildDate = "unknown"
|
||||
logWriter *lumberjack.Logger
|
||||
ginInfoWriter *io.PipeWriter
|
||||
ginErrorWriter *io.PipeWriter
|
||||
DefaultConfigPath = ""
|
||||
)
|
||||
|
||||
// LogFormatter defines a custom log format for logrus.
|
||||
// This formatter adds timestamp, log level, and source location information
|
||||
// to each log entry for better debugging and monitoring.
|
||||
type LogFormatter struct {
|
||||
}
|
||||
|
||||
// Format renders a single log entry with custom formatting.
|
||||
// It includes timestamp, log level, source file and line number, and the log message.
|
||||
func (m *LogFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||
var b *bytes.Buffer
|
||||
if entry.Buffer != nil {
|
||||
b = entry.Buffer
|
||||
} else {
|
||||
b = &bytes.Buffer{}
|
||||
}
|
||||
|
||||
timestamp := entry.Time.Format("2006-01-02 15:04:05")
|
||||
var newLog string
|
||||
// Ensure message doesn't carry trailing newlines; formatter appends one.
|
||||
msg := strings.TrimRight(entry.Message, "\r\n")
|
||||
// Customize the log format to include timestamp, level, caller file/line, and message.
|
||||
newLog = fmt.Sprintf("[%s] [%s] [%s:%d] %s\n", timestamp, entry.Level, filepath.Base(entry.Caller.File), entry.Caller.Line, msg)
|
||||
|
||||
b.WriteString(newLog)
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// init initializes the logger configuration.
|
||||
// It sets up the custom log formatter, enables caller reporting,
|
||||
// and configures the log output destination.
|
||||
// init initializes the shared logger setup.
|
||||
func init() {
|
||||
logDir := "logs"
|
||||
if err := os.MkdirAll(logDir, 0755); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to create log directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logWriter = &lumberjack.Logger{
|
||||
Filename: filepath.Join(logDir, "main.log"),
|
||||
MaxSize: 10,
|
||||
MaxBackups: 0,
|
||||
MaxAge: 0,
|
||||
Compress: false,
|
||||
}
|
||||
|
||||
log.SetOutput(logWriter)
|
||||
// Enable reporting the caller function's file and line number.
|
||||
log.SetReportCaller(true)
|
||||
// Set the custom log formatter.
|
||||
log.SetFormatter(&LogFormatter{})
|
||||
|
||||
ginInfoWriter = log.StandardLogger().Writer()
|
||||
gin.DefaultWriter = ginInfoWriter
|
||||
ginErrorWriter = log.StandardLogger().WriterLevel(log.ErrorLevel)
|
||||
gin.DefaultErrorWriter = ginErrorWriter
|
||||
gin.DebugPrintFunc = func(format string, values ...interface{}) {
|
||||
// Trim trailing newlines from Gin's formatted messages to avoid blank lines.
|
||||
// Gin's debug prints usually include a trailing "\n"; our formatter also appends one.
|
||||
// Removing it here ensures a single newline per entry.
|
||||
format = strings.TrimRight(format, "\r\n")
|
||||
log.StandardLogger().Infof(format, values...)
|
||||
}
|
||||
log.RegisterExitHandler(func() {
|
||||
if logWriter != nil {
|
||||
_ = logWriter.Close()
|
||||
}
|
||||
if ginInfoWriter != nil {
|
||||
_ = ginInfoWriter.Close()
|
||||
}
|
||||
if ginErrorWriter != nil {
|
||||
_ = ginErrorWriter.Close()
|
||||
}
|
||||
})
|
||||
logging.SetupBaseLogger()
|
||||
buildinfo.Version = Version
|
||||
buildinfo.Commit = Commit
|
||||
buildinfo.BuildDate = BuildDate
|
||||
}
|
||||
|
||||
// main is the entry point of the application.
|
||||
// It parses command-line flags, loads configuration, and starts the appropriate
|
||||
// service based on the provided flags (login, codex-login, or server mode).
|
||||
func main() {
|
||||
fmt.Printf("CLIProxyAPI Version: %s, Commit: %s, BuiltAt: %s\n", Version, Commit, BuildDate)
|
||||
log.Infof("CLIProxyAPI Version: %s, Commit: %s, BuiltAt: %s", Version, Commit, BuildDate)
|
||||
fmt.Printf("CLIProxyAPI Version: %s, Commit: %s, BuiltAt: %s\n", buildinfo.Version, buildinfo.Commit, buildinfo.BuildDate)
|
||||
|
||||
// Command-line flags to control the application's behavior.
|
||||
var login bool
|
||||
var codexLogin bool
|
||||
var claudeLogin bool
|
||||
var qwenLogin bool
|
||||
var geminiWebAuth bool
|
||||
var iflowLogin bool
|
||||
var iflowCookie bool
|
||||
var noBrowser bool
|
||||
var oauthCallbackPort int
|
||||
var antigravityLogin bool
|
||||
var projectID string
|
||||
var vertexImport string
|
||||
var configPath string
|
||||
var password string
|
||||
|
||||
// Define command-line flags for different operation modes.
|
||||
flag.BoolVar(&login, "login", false, "Login Google Account")
|
||||
flag.BoolVar(&codexLogin, "codex-login", false, "Login to Codex using OAuth")
|
||||
flag.BoolVar(&claudeLogin, "claude-login", false, "Login to Claude using OAuth")
|
||||
flag.BoolVar(&qwenLogin, "qwen-login", false, "Login to Qwen using OAuth")
|
||||
flag.BoolVar(&geminiWebAuth, "gemini-web-auth", false, "Auth Gemini Web using cookies")
|
||||
flag.BoolVar(&iflowLogin, "iflow-login", false, "Login to iFlow using OAuth")
|
||||
flag.BoolVar(&iflowCookie, "iflow-cookie", false, "Login to iFlow using Cookie")
|
||||
flag.BoolVar(&noBrowser, "no-browser", false, "Don't open browser automatically for OAuth")
|
||||
flag.IntVar(&oauthCallbackPort, "oauth-callback-port", 0, "Override OAuth callback port (defaults to provider-specific port)")
|
||||
flag.BoolVar(&antigravityLogin, "antigravity-login", false, "Login to Antigravity using OAuth")
|
||||
flag.StringVar(&projectID, "project_id", "", "Project ID (Gemini only, not required)")
|
||||
flag.StringVar(&configPath, "config", "", "Configure File Path")
|
||||
flag.StringVar(&configPath, "config", DefaultConfigPath, "Configure File Path")
|
||||
flag.StringVar(&vertexImport, "vertex-import", "", "Import Vertex service account key JSON file")
|
||||
flag.StringVar(&password, "password", "", "")
|
||||
|
||||
flag.CommandLine.Usage = func() {
|
||||
out := flag.CommandLine.Output()
|
||||
_, _ = fmt.Fprintf(out, "Usage of %s\n", os.Args[0])
|
||||
flag.CommandLine.VisitAll(func(f *flag.Flag) {
|
||||
if f.Name == "password" {
|
||||
return
|
||||
}
|
||||
s := fmt.Sprintf(" -%s", f.Name)
|
||||
name, unquoteUsage := flag.UnquoteUsage(f)
|
||||
if name != "" {
|
||||
s += " " + name
|
||||
}
|
||||
if len(s) <= 4 {
|
||||
s += " "
|
||||
} else {
|
||||
s += "\n "
|
||||
}
|
||||
if unquoteUsage != "" {
|
||||
s += unquoteUsage
|
||||
}
|
||||
if f.DefValue != "" && f.DefValue != "false" && f.DefValue != "0" {
|
||||
s += fmt.Sprintf(" (default %s)", f.DefValue)
|
||||
}
|
||||
_, _ = fmt.Fprint(out, s+"\n")
|
||||
})
|
||||
}
|
||||
|
||||
// Parse the command-line flags.
|
||||
flag.Parse()
|
||||
@@ -139,61 +116,346 @@ func main() {
|
||||
// Core application variables.
|
||||
var err error
|
||||
var cfg *config.Config
|
||||
var wd string
|
||||
var isCloudDeploy bool
|
||||
var (
|
||||
usePostgresStore bool
|
||||
pgStoreDSN string
|
||||
pgStoreSchema string
|
||||
pgStoreLocalPath string
|
||||
pgStoreInst *store.PostgresStore
|
||||
useGitStore bool
|
||||
gitStoreRemoteURL string
|
||||
gitStoreUser string
|
||||
gitStorePassword string
|
||||
gitStoreLocalPath string
|
||||
gitStoreInst *store.GitTokenStore
|
||||
gitStoreRoot string
|
||||
useObjectStore bool
|
||||
objectStoreEndpoint string
|
||||
objectStoreAccess string
|
||||
objectStoreSecret string
|
||||
objectStoreBucket string
|
||||
objectStoreLocalPath string
|
||||
objectStoreInst *store.ObjectTokenStore
|
||||
)
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Errorf("failed to get working directory: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Load environment variables from .env if present.
|
||||
if errLoad := godotenv.Load(filepath.Join(wd, ".env")); errLoad != nil {
|
||||
if !errors.Is(errLoad, os.ErrNotExist) {
|
||||
log.WithError(errLoad).Warn("failed to load .env file")
|
||||
}
|
||||
}
|
||||
|
||||
lookupEnv := func(keys ...string) (string, bool) {
|
||||
for _, key := range keys {
|
||||
if value, ok := os.LookupEnv(key); ok {
|
||||
if trimmed := strings.TrimSpace(value); trimmed != "" {
|
||||
return trimmed, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
writableBase := util.WritablePath()
|
||||
if value, ok := lookupEnv("PGSTORE_DSN", "pgstore_dsn"); ok {
|
||||
usePostgresStore = true
|
||||
pgStoreDSN = value
|
||||
}
|
||||
if usePostgresStore {
|
||||
if value, ok := lookupEnv("PGSTORE_SCHEMA", "pgstore_schema"); ok {
|
||||
pgStoreSchema = value
|
||||
}
|
||||
if value, ok := lookupEnv("PGSTORE_LOCAL_PATH", "pgstore_local_path"); ok {
|
||||
pgStoreLocalPath = value
|
||||
}
|
||||
if pgStoreLocalPath == "" {
|
||||
if writableBase != "" {
|
||||
pgStoreLocalPath = writableBase
|
||||
} else {
|
||||
pgStoreLocalPath = wd
|
||||
}
|
||||
}
|
||||
useGitStore = false
|
||||
}
|
||||
if value, ok := lookupEnv("GITSTORE_GIT_URL", "gitstore_git_url"); ok {
|
||||
useGitStore = true
|
||||
gitStoreRemoteURL = value
|
||||
}
|
||||
if value, ok := lookupEnv("GITSTORE_GIT_USERNAME", "gitstore_git_username"); ok {
|
||||
gitStoreUser = value
|
||||
}
|
||||
if value, ok := lookupEnv("GITSTORE_GIT_TOKEN", "gitstore_git_token"); ok {
|
||||
gitStorePassword = value
|
||||
}
|
||||
if value, ok := lookupEnv("GITSTORE_LOCAL_PATH", "gitstore_local_path"); ok {
|
||||
gitStoreLocalPath = value
|
||||
}
|
||||
if value, ok := lookupEnv("OBJECTSTORE_ENDPOINT", "objectstore_endpoint"); ok {
|
||||
useObjectStore = true
|
||||
objectStoreEndpoint = value
|
||||
}
|
||||
if value, ok := lookupEnv("OBJECTSTORE_ACCESS_KEY", "objectstore_access_key"); ok {
|
||||
objectStoreAccess = value
|
||||
}
|
||||
if value, ok := lookupEnv("OBJECTSTORE_SECRET_KEY", "objectstore_secret_key"); ok {
|
||||
objectStoreSecret = value
|
||||
}
|
||||
if value, ok := lookupEnv("OBJECTSTORE_BUCKET", "objectstore_bucket"); ok {
|
||||
objectStoreBucket = value
|
||||
}
|
||||
if value, ok := lookupEnv("OBJECTSTORE_LOCAL_PATH", "objectstore_local_path"); ok {
|
||||
objectStoreLocalPath = value
|
||||
}
|
||||
|
||||
// Check for cloud deploy mode only on first execution
|
||||
// Read env var name in uppercase: DEPLOY
|
||||
deployEnv := os.Getenv("DEPLOY")
|
||||
if deployEnv == "cloud" {
|
||||
isCloudDeploy = true
|
||||
}
|
||||
|
||||
// Determine and load the configuration file.
|
||||
// If a config path is provided via flags, it is used directly.
|
||||
// Otherwise, it defaults to "config.yaml" in the current working directory.
|
||||
// Prefer the Postgres store when configured, otherwise fallback to git or local files.
|
||||
var configFilePath string
|
||||
if configPath != "" {
|
||||
if usePostgresStore {
|
||||
if pgStoreLocalPath == "" {
|
||||
pgStoreLocalPath = wd
|
||||
}
|
||||
pgStoreLocalPath = filepath.Join(pgStoreLocalPath, "pgstore")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
pgStoreInst, err = store.NewPostgresStore(ctx, store.PostgresStoreConfig{
|
||||
DSN: pgStoreDSN,
|
||||
Schema: pgStoreSchema,
|
||||
SpoolDir: pgStoreLocalPath,
|
||||
})
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Errorf("failed to initialize postgres token store: %v", err)
|
||||
return
|
||||
}
|
||||
examplePath := filepath.Join(wd, "config.example.yaml")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second)
|
||||
if errBootstrap := pgStoreInst.Bootstrap(ctx, examplePath); errBootstrap != nil {
|
||||
cancel()
|
||||
log.Errorf("failed to bootstrap postgres-backed config: %v", errBootstrap)
|
||||
return
|
||||
}
|
||||
cancel()
|
||||
configFilePath = pgStoreInst.ConfigPath()
|
||||
cfg, err = config.LoadConfigOptional(configFilePath, isCloudDeploy)
|
||||
if err == nil {
|
||||
cfg.AuthDir = pgStoreInst.AuthDir()
|
||||
log.Infof("postgres-backed token store enabled, workspace path: %s", pgStoreInst.WorkDir())
|
||||
}
|
||||
} else if useObjectStore {
|
||||
if objectStoreLocalPath == "" {
|
||||
if writableBase != "" {
|
||||
objectStoreLocalPath = writableBase
|
||||
} else {
|
||||
objectStoreLocalPath = wd
|
||||
}
|
||||
}
|
||||
objectStoreRoot := filepath.Join(objectStoreLocalPath, "objectstore")
|
||||
resolvedEndpoint := strings.TrimSpace(objectStoreEndpoint)
|
||||
useSSL := true
|
||||
if strings.Contains(resolvedEndpoint, "://") {
|
||||
parsed, errParse := url.Parse(resolvedEndpoint)
|
||||
if errParse != nil {
|
||||
log.Errorf("failed to parse object store endpoint %q: %v", objectStoreEndpoint, errParse)
|
||||
return
|
||||
}
|
||||
switch strings.ToLower(parsed.Scheme) {
|
||||
case "http":
|
||||
useSSL = false
|
||||
case "https":
|
||||
useSSL = true
|
||||
default:
|
||||
log.Errorf("unsupported object store scheme %q (only http and https are allowed)", parsed.Scheme)
|
||||
return
|
||||
}
|
||||
if parsed.Host == "" {
|
||||
log.Errorf("object store endpoint %q is missing host information", objectStoreEndpoint)
|
||||
return
|
||||
}
|
||||
resolvedEndpoint = parsed.Host
|
||||
if parsed.Path != "" && parsed.Path != "/" {
|
||||
resolvedEndpoint = strings.TrimSuffix(parsed.Host+parsed.Path, "/")
|
||||
}
|
||||
}
|
||||
resolvedEndpoint = strings.TrimRight(resolvedEndpoint, "/")
|
||||
objCfg := store.ObjectStoreConfig{
|
||||
Endpoint: resolvedEndpoint,
|
||||
Bucket: objectStoreBucket,
|
||||
AccessKey: objectStoreAccess,
|
||||
SecretKey: objectStoreSecret,
|
||||
LocalRoot: objectStoreRoot,
|
||||
UseSSL: useSSL,
|
||||
PathStyle: true,
|
||||
}
|
||||
objectStoreInst, err = store.NewObjectTokenStore(objCfg)
|
||||
if err != nil {
|
||||
log.Errorf("failed to initialize object token store: %v", err)
|
||||
return
|
||||
}
|
||||
examplePath := filepath.Join(wd, "config.example.yaml")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
if errBootstrap := objectStoreInst.Bootstrap(ctx, examplePath); errBootstrap != nil {
|
||||
cancel()
|
||||
log.Errorf("failed to bootstrap object-backed config: %v", errBootstrap)
|
||||
return
|
||||
}
|
||||
cancel()
|
||||
configFilePath = objectStoreInst.ConfigPath()
|
||||
cfg, err = config.LoadConfigOptional(configFilePath, isCloudDeploy)
|
||||
if err == nil {
|
||||
if cfg == nil {
|
||||
cfg = &config.Config{}
|
||||
}
|
||||
cfg.AuthDir = objectStoreInst.AuthDir()
|
||||
log.Infof("object-backed token store enabled, bucket: %s", objectStoreBucket)
|
||||
}
|
||||
} else if useGitStore {
|
||||
if gitStoreLocalPath == "" {
|
||||
if writableBase != "" {
|
||||
gitStoreLocalPath = writableBase
|
||||
} else {
|
||||
gitStoreLocalPath = wd
|
||||
}
|
||||
}
|
||||
gitStoreRoot = filepath.Join(gitStoreLocalPath, "gitstore")
|
||||
authDir := filepath.Join(gitStoreRoot, "auths")
|
||||
gitStoreInst = store.NewGitTokenStore(gitStoreRemoteURL, gitStoreUser, gitStorePassword)
|
||||
gitStoreInst.SetBaseDir(authDir)
|
||||
if errRepo := gitStoreInst.EnsureRepository(); errRepo != nil {
|
||||
log.Errorf("failed to prepare git token store: %v", errRepo)
|
||||
return
|
||||
}
|
||||
configFilePath = gitStoreInst.ConfigPath()
|
||||
if configFilePath == "" {
|
||||
configFilePath = filepath.Join(gitStoreRoot, "config", "config.yaml")
|
||||
}
|
||||
if _, statErr := os.Stat(configFilePath); errors.Is(statErr, fs.ErrNotExist) {
|
||||
examplePath := filepath.Join(wd, "config.example.yaml")
|
||||
if _, errExample := os.Stat(examplePath); errExample != nil {
|
||||
log.Errorf("failed to find template config file: %v", errExample)
|
||||
return
|
||||
}
|
||||
if errCopy := misc.CopyConfigTemplate(examplePath, configFilePath); errCopy != nil {
|
||||
log.Errorf("failed to bootstrap git-backed config: %v", errCopy)
|
||||
return
|
||||
}
|
||||
if errCommit := gitStoreInst.PersistConfig(context.Background()); errCommit != nil {
|
||||
log.Errorf("failed to commit initial git-backed config: %v", errCommit)
|
||||
return
|
||||
}
|
||||
log.Infof("git-backed config initialized from template: %s", configFilePath)
|
||||
} else if statErr != nil {
|
||||
log.Errorf("failed to inspect git-backed config: %v", statErr)
|
||||
return
|
||||
}
|
||||
cfg, err = config.LoadConfigOptional(configFilePath, isCloudDeploy)
|
||||
if err == nil {
|
||||
cfg.AuthDir = gitStoreInst.AuthDir()
|
||||
log.Infof("git-backed token store enabled, repository path: %s", gitStoreRoot)
|
||||
}
|
||||
} else if configPath != "" {
|
||||
configFilePath = configPath
|
||||
cfg, err = config.LoadConfig(configPath)
|
||||
cfg, err = config.LoadConfigOptional(configPath, isCloudDeploy)
|
||||
} else {
|
||||
wd, err = os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to get working directory: %v", err)
|
||||
log.Errorf("failed to get working directory: %v", err)
|
||||
return
|
||||
}
|
||||
configFilePath = filepath.Join(wd, "config.yaml")
|
||||
cfg, err = config.LoadConfig(configFilePath)
|
||||
cfg, err = config.LoadConfigOptional(configFilePath, isCloudDeploy)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("failed to load config: %v", err)
|
||||
log.Errorf("failed to load config: %v", err)
|
||||
return
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = &config.Config{}
|
||||
}
|
||||
|
||||
// In cloud deploy mode, check if we have a valid configuration
|
||||
var configFileExists bool
|
||||
if isCloudDeploy {
|
||||
if info, errStat := os.Stat(configFilePath); errStat != nil {
|
||||
// Don't mislead: API server will not start until configuration is provided.
|
||||
log.Info("Cloud deploy mode: No configuration file detected; standing by for configuration")
|
||||
configFileExists = false
|
||||
} else if info.IsDir() {
|
||||
log.Info("Cloud deploy mode: Config path is a directory; standing by for configuration")
|
||||
configFileExists = false
|
||||
} else if cfg.Port == 0 {
|
||||
// LoadConfigOptional returns empty config when file is empty or invalid.
|
||||
// Config file exists but is empty or invalid; treat as missing config
|
||||
log.Info("Cloud deploy mode: Configuration file is empty or invalid; standing by for valid configuration")
|
||||
configFileExists = false
|
||||
} else {
|
||||
log.Info("Cloud deploy mode: Configuration file detected; starting service")
|
||||
configFileExists = true
|
||||
}
|
||||
}
|
||||
usage.SetStatisticsEnabled(cfg.UsageStatisticsEnabled)
|
||||
coreauth.SetQuotaCooldownDisabled(cfg.DisableCooling)
|
||||
|
||||
if err = logging.ConfigureLogOutput(cfg); err != nil {
|
||||
log.Errorf("failed to configure log output: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("CLIProxyAPI Version: %s, Commit: %s, BuiltAt: %s", buildinfo.Version, buildinfo.Commit, buildinfo.BuildDate)
|
||||
|
||||
// Set the log level based on the configuration.
|
||||
util.SetLogLevel(cfg)
|
||||
|
||||
// Expand the tilde (~) in the auth directory path to the user's home directory.
|
||||
if strings.HasPrefix(cfg.AuthDir, "~") {
|
||||
home, errUserHomeDir := os.UserHomeDir()
|
||||
if errUserHomeDir != nil {
|
||||
log.Fatalf("failed to get home directory: %v", errUserHomeDir)
|
||||
}
|
||||
// Reconstruct the path by replacing the tilde with the user's home directory.
|
||||
remainder := strings.TrimPrefix(cfg.AuthDir, "~")
|
||||
remainder = strings.TrimLeft(remainder, "/\\")
|
||||
if remainder == "" {
|
||||
cfg.AuthDir = home
|
||||
if resolvedAuthDir, errResolveAuthDir := util.ResolveAuthDir(cfg.AuthDir); errResolveAuthDir != nil {
|
||||
log.Errorf("failed to resolve auth directory: %v", errResolveAuthDir)
|
||||
return
|
||||
} else {
|
||||
// Normalize any slash style in the remainder so Windows paths keep nested directories.
|
||||
normalized := strings.ReplaceAll(remainder, "\\", "/")
|
||||
cfg.AuthDir = filepath.Join(home, filepath.FromSlash(normalized))
|
||||
}
|
||||
cfg.AuthDir = resolvedAuthDir
|
||||
}
|
||||
managementasset.SetCurrentConfig(cfg)
|
||||
|
||||
// Create login options to be used in authentication flows.
|
||||
options := &cmd.LoginOptions{
|
||||
NoBrowser: noBrowser,
|
||||
CallbackPort: oauthCallbackPort,
|
||||
}
|
||||
|
||||
// Register the shared token store once so all components use the same persistence backend.
|
||||
if usePostgresStore {
|
||||
sdkAuth.RegisterTokenStore(pgStoreInst)
|
||||
} else if useObjectStore {
|
||||
sdkAuth.RegisterTokenStore(objectStoreInst)
|
||||
} else if useGitStore {
|
||||
sdkAuth.RegisterTokenStore(gitStoreInst)
|
||||
} else {
|
||||
sdkAuth.RegisterTokenStore(sdkAuth.NewFileTokenStore())
|
||||
}
|
||||
|
||||
// Register built-in access providers before constructing services.
|
||||
configaccess.Register()
|
||||
|
||||
// Handle different command modes based on the provided flags.
|
||||
|
||||
if login {
|
||||
if vertexImport != "" {
|
||||
// Handle Vertex service account import
|
||||
cmd.DoVertexImport(cfg, vertexImport)
|
||||
} else if login {
|
||||
// Handle Google/Gemini login
|
||||
cmd.DoLogin(cfg, projectID, options)
|
||||
} else if antigravityLogin {
|
||||
// Handle Antigravity login
|
||||
cmd.DoAntigravityLogin(cfg, options)
|
||||
} else if codexLogin {
|
||||
// Handle Codex login
|
||||
cmd.DoCodexLogin(cfg, options)
|
||||
@@ -202,10 +464,19 @@ func main() {
|
||||
cmd.DoClaudeLogin(cfg, options)
|
||||
} else if qwenLogin {
|
||||
cmd.DoQwenLogin(cfg, options)
|
||||
} else if geminiWebAuth {
|
||||
cmd.DoGeminiWebAuth(cfg)
|
||||
} else if iflowLogin {
|
||||
cmd.DoIFlowLogin(cfg, options)
|
||||
} else if iflowCookie {
|
||||
cmd.DoIFlowCookieAuth(cfg, options)
|
||||
} else {
|
||||
// In cloud deploy mode without config file, just wait for shutdown signals
|
||||
if isCloudDeploy && !configFileExists {
|
||||
// No config file available, just wait for shutdown
|
||||
cmd.WaitForCloudDeploy()
|
||||
return
|
||||
}
|
||||
// Start the main proxy service
|
||||
cmd.StartService(cfg, configFilePath)
|
||||
managementasset.StartAutoUpdater(context.Background(), configFilePath)
|
||||
cmd.StartService(cfg, configFilePath, password)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
# Server host/interface to bind to. Default is empty ("") to bind all interfaces (IPv4 + IPv6).
|
||||
# Use "127.0.0.1" or "localhost" to restrict access to local machine only.
|
||||
host: ""
|
||||
|
||||
# Server port
|
||||
port: 8317
|
||||
|
||||
# TLS settings for HTTPS. When enabled, the server listens with the provided certificate and key.
|
||||
tls:
|
||||
enable: false
|
||||
cert: ""
|
||||
key: ""
|
||||
|
||||
# Management API settings
|
||||
remote-management:
|
||||
# Whether to allow remote (non-localhost) management access.
|
||||
@@ -12,75 +22,298 @@ remote-management:
|
||||
# Leave empty to disable the Management API entirely (404 for all /v0/management routes).
|
||||
secret-key: ""
|
||||
|
||||
# Disable the bundled management control panel asset download and HTTP route when true.
|
||||
disable-control-panel: false
|
||||
|
||||
# GitHub repository for the management control panel. Accepts a repository URL or releases API URL.
|
||||
panel-github-repository: "https://github.com/router-for-me/Cli-Proxy-API-Management-Center"
|
||||
|
||||
# Authentication directory (supports ~ for home directory)
|
||||
auth-dir: "~/.cli-proxy-api"
|
||||
|
||||
# API keys for authentication
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
- "your-api-key-3"
|
||||
|
||||
# Enable debug logging
|
||||
debug: false
|
||||
|
||||
# When true, disable high-overhead HTTP middleware features to reduce per-request memory usage under high concurrency.
|
||||
commercial-mode: false
|
||||
|
||||
# When true, write application logs to rotating files instead of stdout
|
||||
logging-to-file: false
|
||||
|
||||
# Maximum total size (MB) of log files under the logs directory. When exceeded, the oldest log
|
||||
# files are deleted until within the limit. Set to 0 to disable.
|
||||
logs-max-total-size-mb: 0
|
||||
|
||||
# Maximum number of error log files retained when request logging is disabled.
|
||||
# When exceeded, the oldest error log files are deleted. Default is 10. Set to 0 to disable cleanup.
|
||||
error-logs-max-files: 10
|
||||
|
||||
# When false, disable in-memory usage statistics aggregation
|
||||
usage-statistics-enabled: false
|
||||
|
||||
# Proxy URL. Supports socks5/http/https protocols. Example: socks5://user:pass@192.168.1.1:1080/
|
||||
proxy-url: ""
|
||||
|
||||
# When true, unprefixed model requests only use credentials without a prefix (except when prefix == model name).
|
||||
force-model-prefix: false
|
||||
|
||||
# Number of times to retry a request. Retries will occur if the HTTP response code is 403, 408, 500, 502, 503, or 504.
|
||||
request-retry: 3
|
||||
|
||||
# Maximum wait time in seconds for a cooled-down credential before triggering a retry.
|
||||
max-retry-interval: 30
|
||||
|
||||
# Quota exceeded behavior
|
||||
quota-exceeded:
|
||||
switch-project: true # Whether to automatically switch to another project when a quota is exceeded
|
||||
switch-preview-model: true # Whether to automatically switch to a preview model when a quota is exceeded
|
||||
|
||||
# Request authentication providers
|
||||
auth:
|
||||
providers:
|
||||
- name: "default"
|
||||
type: "config-api-key"
|
||||
api-keys:
|
||||
- "your-api-key-1"
|
||||
- "your-api-key-2"
|
||||
# Routing strategy for selecting credentials when multiple match.
|
||||
routing:
|
||||
strategy: "round-robin" # round-robin (default), fill-first
|
||||
|
||||
# API keys for official Generative Language API
|
||||
generative-language-api-key:
|
||||
- "AIzaSy...01"
|
||||
- "AIzaSy...02"
|
||||
- "AIzaSy...03"
|
||||
- "AIzaSy...04"
|
||||
# When true, enable authentication for the WebSocket API (/v1/ws).
|
||||
ws-auth: false
|
||||
|
||||
# When > 0, emit blank lines every N seconds for non-streaming responses to prevent idle timeouts.
|
||||
nonstream-keepalive-interval: 0
|
||||
|
||||
# Streaming behavior (SSE keep-alives + safe bootstrap retries).
|
||||
# streaming:
|
||||
# keepalive-seconds: 15 # Default: 0 (disabled). <= 0 disables keep-alives.
|
||||
# bootstrap-retries: 1 # Default: 0 (disabled). Retries before first byte is sent.
|
||||
|
||||
# When true, enable official Codex instructions injection for Codex API requests.
|
||||
# When false (default), CodexInstructionsForModel returns immediately without modification.
|
||||
codex-instructions-enabled: false
|
||||
|
||||
# Gemini API keys
|
||||
# gemini-api-key:
|
||||
# - api-key: "AIzaSy...01"
|
||||
# prefix: "test" # optional: require calls like "test/gemini-3-pro-preview" to target this credential
|
||||
# base-url: "https://generativelanguage.googleapis.com"
|
||||
# headers:
|
||||
# X-Custom-Header: "custom-value"
|
||||
# proxy-url: "socks5://proxy.example.com:1080"
|
||||
# models:
|
||||
# - name: "gemini-2.5-flash" # upstream model name
|
||||
# alias: "gemini-flash" # client alias mapped to the upstream model
|
||||
# excluded-models:
|
||||
# - "gemini-2.5-pro" # exclude specific models from this provider (exact match)
|
||||
# - "gemini-2.5-*" # wildcard matching prefix (e.g. gemini-2.5-flash, gemini-2.5-pro)
|
||||
# - "*-preview" # wildcard matching suffix (e.g. gemini-3-pro-preview)
|
||||
# - "*flash*" # wildcard matching substring (e.g. gemini-2.5-flash-lite)
|
||||
# - api-key: "AIzaSy...02"
|
||||
|
||||
# Codex API keys
|
||||
codex-api-key:
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||
# codex-api-key:
|
||||
# - api-key: "sk-atSM..."
|
||||
# prefix: "test" # optional: require calls like "test/gpt-5-codex" to target this credential
|
||||
# base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||
# headers:
|
||||
# X-Custom-Header: "custom-value"
|
||||
# proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||
# models:
|
||||
# - name: "gpt-5-codex" # upstream model name
|
||||
# alias: "codex-latest" # client alias mapped to the upstream model
|
||||
# excluded-models:
|
||||
# - "gpt-5.1" # exclude specific models (exact match)
|
||||
# - "gpt-5-*" # wildcard matching prefix (e.g. gpt-5-medium, gpt-5-codex)
|
||||
# - "*-mini" # wildcard matching suffix (e.g. gpt-5-codex-mini)
|
||||
# - "*codex*" # wildcard matching substring (e.g. gpt-5-codex-low)
|
||||
|
||||
# Claude API keys
|
||||
claude-api-key:
|
||||
- api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||
# claude-api-key:
|
||||
# - api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||
# - api-key: "sk-atSM..."
|
||||
# prefix: "test" # optional: require calls like "test/claude-sonnet-latest" to target this credential
|
||||
# base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||
# headers:
|
||||
# X-Custom-Header: "custom-value"
|
||||
# proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||
# models:
|
||||
# - name: "claude-3-5-sonnet-20241022" # upstream model name
|
||||
# alias: "claude-sonnet-latest" # client alias mapped to the upstream model
|
||||
# excluded-models:
|
||||
# - "claude-opus-4-5-20251101" # exclude specific models (exact match)
|
||||
# - "claude-3-*" # wildcard matching prefix (e.g. claude-3-7-sonnet-20250219)
|
||||
# - "*-thinking" # wildcard matching suffix (e.g. claude-opus-4-5-thinking)
|
||||
# - "*haiku*" # wildcard matching substring (e.g. claude-3-5-haiku-20241022)
|
||||
# cloak: # optional: request cloaking for non-Claude-Code clients
|
||||
# mode: "auto" # "auto" (default): cloak only when client is not Claude Code
|
||||
# # "always": always apply cloaking
|
||||
# # "never": never apply cloaking
|
||||
# strict-mode: false # false (default): prepend Claude Code prompt to user system messages
|
||||
# # true: strip all user system messages, keep only Claude Code prompt
|
||||
# sensitive-words: # optional: words to obfuscate with zero-width characters
|
||||
# - "API"
|
||||
# - "proxy"
|
||||
|
||||
# OpenAI compatibility providers
|
||||
openai-compatibility:
|
||||
- name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||
base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||
api-keys: # The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed.
|
||||
- "sk-or-v1-...b780"
|
||||
- "sk-or-v1-...b781"
|
||||
models: # The models supported by the provider.
|
||||
- name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||
alias: "kimi-k2" # The alias used in the API.
|
||||
# openai-compatibility:
|
||||
# - name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||
# prefix: "test" # optional: require calls like "test/kimi-k2" to target this provider's credentials
|
||||
# base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||
# headers:
|
||||
# X-Custom-Header: "custom-value"
|
||||
# api-key-entries:
|
||||
# - api-key: "sk-or-v1-...b780"
|
||||
# proxy-url: "socks5://proxy.example.com:1080" # optional: per-key proxy override
|
||||
# - api-key: "sk-or-v1-...b781" # without proxy-url
|
||||
# models: # The models supported by the provider.
|
||||
# - name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||
# alias: "kimi-k2" # The alias used in the API.
|
||||
|
||||
# Gemini Web settings
|
||||
gemini-web:
|
||||
# Conversation reuse: set to true to enable (default), false to disable.
|
||||
context: true
|
||||
# Maximum characters per single request to Gemini Web. Requests exceeding this
|
||||
# size split into chunks. Only the last chunk carries files and yields the final answer.
|
||||
max-chars-per-request: 1000000
|
||||
# Disable the short continuation hint appended to intermediate chunks
|
||||
# when splitting long prompts. Default is false (hint enabled by default).
|
||||
disable-continuation-hint: false
|
||||
# Code mode:
|
||||
# - true: enable XML wrapping hint and attach the coding-partner Gem.
|
||||
# Thought merging (<think> into visible content) applies to STREAMING only;
|
||||
# non-stream responses keep reasoning/thought parts separate for clients
|
||||
# that expect explicit reasoning fields.
|
||||
# - false: disable XML hint and keep <think> separate
|
||||
code-mode: false
|
||||
# Vertex API keys (Vertex-compatible endpoints, use API key + base URL)
|
||||
# vertex-api-key:
|
||||
# - api-key: "vk-123..." # x-goog-api-key header
|
||||
# prefix: "test" # optional: require calls like "test/vertex-pro" to target this credential
|
||||
# base-url: "https://example.com/api" # e.g. https://zenmux.ai/api
|
||||
# proxy-url: "socks5://proxy.example.com:1080" # optional per-key proxy override
|
||||
# headers:
|
||||
# X-Custom-Header: "custom-value"
|
||||
# models: # optional: map aliases to upstream model names
|
||||
# - name: "gemini-2.5-flash" # upstream model name
|
||||
# alias: "vertex-flash" # client-visible alias
|
||||
# - name: "gemini-2.5-pro"
|
||||
# alias: "vertex-pro"
|
||||
|
||||
# Amp Integration
|
||||
# ampcode:
|
||||
# # Configure upstream URL for Amp CLI OAuth and management features
|
||||
# upstream-url: "https://ampcode.com"
|
||||
# # Optional: Override API key for Amp upstream (otherwise uses env or file)
|
||||
# upstream-api-key: ""
|
||||
# # Per-client upstream API key mapping
|
||||
# # Maps client API keys (from top-level api-keys) to different Amp upstream API keys.
|
||||
# # Useful when different clients need to use different Amp accounts/quotas.
|
||||
# # If a client key isn't mapped, falls back to upstream-api-key (default behavior).
|
||||
# upstream-api-keys:
|
||||
# - upstream-api-key: "amp_key_for_team_a" # Upstream key to use for these clients
|
||||
# api-keys: # Client keys that use this upstream key
|
||||
# - "your-api-key-1"
|
||||
# - "your-api-key-2"
|
||||
# - upstream-api-key: "amp_key_for_team_b"
|
||||
# api-keys:
|
||||
# - "your-api-key-3"
|
||||
# # Restrict Amp management routes (/api/auth, /api/user, etc.) to localhost only (default: false)
|
||||
# restrict-management-to-localhost: false
|
||||
# # Force model mappings to run before checking local API keys (default: false)
|
||||
# force-model-mappings: false
|
||||
# # Amp Model Mappings
|
||||
# # Route unavailable Amp models to alternative models available in your local proxy.
|
||||
# # Useful when Amp CLI requests models you don't have access to (e.g., Claude Opus 4.5)
|
||||
# # but you have a similar model available (e.g., Claude Sonnet 4).
|
||||
# model-mappings:
|
||||
# - from: "claude-opus-4-5-20251101" # Model requested by Amp CLI
|
||||
# to: "gemini-claude-opus-4-5-thinking" # Route to this available model instead
|
||||
# - from: "claude-sonnet-4-5-20250929"
|
||||
# to: "gemini-claude-sonnet-4-5-thinking"
|
||||
# - from: "claude-haiku-4-5-20251001"
|
||||
# to: "gemini-2.5-flash"
|
||||
|
||||
# Global OAuth model name aliases (per channel)
|
||||
# These aliases rename model IDs for both model listing and request routing.
|
||||
# Supported channels: gemini-cli, vertex, aistudio, antigravity, claude, codex, qwen, iflow.
|
||||
# NOTE: Aliases do not apply to gemini-api-key, codex-api-key, claude-api-key, openai-compatibility, vertex-api-key, or ampcode.
|
||||
# You can repeat the same name with different aliases to expose multiple client model names.
|
||||
oauth-model-alias:
|
||||
antigravity:
|
||||
- name: "rev19-uic3-1p"
|
||||
alias: "gemini-2.5-computer-use-preview-10-2025"
|
||||
- name: "gemini-3-pro-image"
|
||||
alias: "gemini-3-pro-image-preview"
|
||||
- name: "gemini-3-pro-high"
|
||||
alias: "gemini-3-pro-preview"
|
||||
- name: "gemini-3-flash"
|
||||
alias: "gemini-3-flash-preview"
|
||||
- name: "claude-sonnet-4-5"
|
||||
alias: "gemini-claude-sonnet-4-5"
|
||||
- name: "claude-sonnet-4-5-thinking"
|
||||
alias: "gemini-claude-sonnet-4-5-thinking"
|
||||
- name: "claude-opus-4-5-thinking"
|
||||
alias: "gemini-claude-opus-4-5-thinking"
|
||||
# gemini-cli:
|
||||
# - name: "gemini-2.5-pro" # original model name under this channel
|
||||
# alias: "g2.5p" # client-visible alias
|
||||
# fork: true # when true, keep original and also add the alias as an extra model (default: false)
|
||||
# vertex:
|
||||
# - name: "gemini-2.5-pro"
|
||||
# alias: "g2.5p"
|
||||
# aistudio:
|
||||
# - name: "gemini-2.5-pro"
|
||||
# alias: "g2.5p"
|
||||
# claude:
|
||||
# - name: "claude-sonnet-4-5-20250929"
|
||||
# alias: "cs4.5"
|
||||
# codex:
|
||||
# - name: "gpt-5"
|
||||
# alias: "g5"
|
||||
# qwen:
|
||||
# - name: "qwen3-coder-plus"
|
||||
# alias: "qwen-plus"
|
||||
# iflow:
|
||||
# - name: "glm-4.7"
|
||||
# alias: "glm-god"
|
||||
|
||||
# OAuth provider excluded models
|
||||
# oauth-excluded-models:
|
||||
# gemini-cli:
|
||||
# - "gemini-2.5-pro" # exclude specific models (exact match)
|
||||
# - "gemini-2.5-*" # wildcard matching prefix (e.g. gemini-2.5-flash, gemini-2.5-pro)
|
||||
# - "*-preview" # wildcard matching suffix (e.g. gemini-3-pro-preview)
|
||||
# - "*flash*" # wildcard matching substring (e.g. gemini-2.5-flash-lite)
|
||||
# vertex:
|
||||
# - "gemini-3-pro-preview"
|
||||
# aistudio:
|
||||
# - "gemini-3-pro-preview"
|
||||
# antigravity:
|
||||
# - "gemini-3-pro-preview"
|
||||
# claude:
|
||||
# - "claude-3-5-haiku-20241022"
|
||||
# codex:
|
||||
# - "gpt-5-codex-mini"
|
||||
# qwen:
|
||||
# - "vision-model"
|
||||
# iflow:
|
||||
# - "tstars2.0"
|
||||
|
||||
# Optional payload configuration
|
||||
# payload:
|
||||
# default: # Default rules only set parameters when they are missing in the payload.
|
||||
# - models:
|
||||
# - name: "gemini-2.5-pro" # Supports wildcards (e.g., "gemini-*")
|
||||
# protocol: "gemini" # restricts the rule to a specific protocol, options: openai, gemini, claude, codex, antigravity
|
||||
# params: # JSON path (gjson/sjson syntax) -> value
|
||||
# "generationConfig.thinkingConfig.thinkingBudget": 32768
|
||||
# default-raw: # Default raw rules set parameters using raw JSON when missing (must be valid JSON).
|
||||
# - models:
|
||||
# - name: "gemini-2.5-pro" # Supports wildcards (e.g., "gemini-*")
|
||||
# protocol: "gemini" # restricts the rule to a specific protocol, options: openai, gemini, claude, codex, antigravity
|
||||
# params: # JSON path (gjson/sjson syntax) -> raw JSON value (strings are used as-is, must be valid JSON)
|
||||
# "generationConfig.responseJsonSchema": "{\"type\":\"object\",\"properties\":{\"answer\":{\"type\":\"string\"}}}"
|
||||
# override: # Override rules always set parameters, overwriting any existing values.
|
||||
# - models:
|
||||
# - name: "gpt-*" # Supports wildcards (e.g., "gpt-*")
|
||||
# protocol: "codex" # restricts the rule to a specific protocol, options: openai, gemini, claude, codex, antigravity
|
||||
# params: # JSON path (gjson/sjson syntax) -> value
|
||||
# "reasoning.effort": "high"
|
||||
# override-raw: # Override raw rules always set parameters using raw JSON (must be valid JSON).
|
||||
# - models:
|
||||
# - name: "gpt-*" # Supports wildcards (e.g., "gpt-*")
|
||||
# protocol: "codex" # restricts the rule to a specific protocol, options: openai, gemini, claude, codex, antigravity
|
||||
# params: # JSON path (gjson/sjson syntax) -> raw JSON value (strings are used as-is, must be valid JSON)
|
||||
# "response_format": "{\"type\":\"json_schema\",\"json_schema\":{\"name\":\"answer\",\"schema\":{\"type\":\"object\"}}}"
|
||||
# filter: # Filter rules remove specified parameters from the payload.
|
||||
# - models:
|
||||
# - name: "gemini-2.5-pro" # Supports wildcards (e.g., "gemini-*")
|
||||
# protocol: "gemini" # restricts the rule to a specific protocol, options: openai, gemini, claude, codex, antigravity
|
||||
# params: # JSON paths (gjson/sjson syntax) to remove from the payload
|
||||
# - "generationConfig.thinkingConfig.thinkingBudget"
|
||||
# - "generationConfig.responseJsonSchema"
|
||||
|
||||
124
docker-build.sh
124
docker-build.sh
@@ -5,9 +5,115 @@
|
||||
# This script automates the process of building and running the Docker container
|
||||
# with version information dynamically injected at build time.
|
||||
|
||||
# Exit immediately if a command exits with a non-zero status.
|
||||
# Hidden feature: Preserve usage statistics across rebuilds
|
||||
# Usage: ./docker-build.sh --with-usage
|
||||
# First run prompts for management API key, saved to temp/stats/.api_secret
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
STATS_DIR="temp/stats"
|
||||
STATS_FILE="${STATS_DIR}/.usage_backup.json"
|
||||
SECRET_FILE="${STATS_DIR}/.api_secret"
|
||||
WITH_USAGE=false
|
||||
|
||||
get_port() {
|
||||
if [[ -f "config.yaml" ]]; then
|
||||
grep -E "^port:" config.yaml | sed -E 's/^port: *["'"'"']?([0-9]+)["'"'"']?.*$/\1/'
|
||||
else
|
||||
echo "8317"
|
||||
fi
|
||||
}
|
||||
|
||||
export_stats_api_secret() {
|
||||
if [[ -f "${SECRET_FILE}" ]]; then
|
||||
API_SECRET=$(cat "${SECRET_FILE}")
|
||||
else
|
||||
if [[ ! -d "${STATS_DIR}" ]]; then
|
||||
mkdir -p "${STATS_DIR}"
|
||||
fi
|
||||
echo "First time using --with-usage. Management API key required."
|
||||
read -r -p "Enter management key: " -s API_SECRET
|
||||
echo
|
||||
echo "${API_SECRET}" > "${SECRET_FILE}"
|
||||
chmod 600 "${SECRET_FILE}"
|
||||
fi
|
||||
}
|
||||
|
||||
check_container_running() {
|
||||
local port
|
||||
port=$(get_port)
|
||||
|
||||
if ! curl -s -o /dev/null -w "%{http_code}" "http://localhost:${port}/" | grep -q "200"; then
|
||||
echo "Error: cli-proxy-api service is not responding at localhost:${port}"
|
||||
echo "Please start the container first or use without --with-usage flag."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
export_stats() {
|
||||
local port
|
||||
port=$(get_port)
|
||||
|
||||
if [[ ! -d "${STATS_DIR}" ]]; then
|
||||
mkdir -p "${STATS_DIR}"
|
||||
fi
|
||||
check_container_running
|
||||
echo "Exporting usage statistics..."
|
||||
EXPORT_RESPONSE=$(curl -s -w "\n%{http_code}" -H "X-Management-Key: ${API_SECRET}" \
|
||||
"http://localhost:${port}/v0/management/usage/export")
|
||||
HTTP_CODE=$(echo "${EXPORT_RESPONSE}" | tail -n1)
|
||||
RESPONSE_BODY=$(echo "${EXPORT_RESPONSE}" | sed '$d')
|
||||
|
||||
if [[ "${HTTP_CODE}" != "200" ]]; then
|
||||
echo "Export failed (HTTP ${HTTP_CODE}): ${RESPONSE_BODY}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "${RESPONSE_BODY}" > "${STATS_FILE}"
|
||||
echo "Statistics exported to ${STATS_FILE}"
|
||||
}
|
||||
|
||||
import_stats() {
|
||||
local port
|
||||
port=$(get_port)
|
||||
|
||||
echo "Importing usage statistics..."
|
||||
IMPORT_RESPONSE=$(curl -s -w "\n%{http_code}" -X POST \
|
||||
-H "X-Management-Key: ${API_SECRET}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @"${STATS_FILE}" \
|
||||
"http://localhost:${port}/v0/management/usage/import")
|
||||
IMPORT_CODE=$(echo "${IMPORT_RESPONSE}" | tail -n1)
|
||||
IMPORT_BODY=$(echo "${IMPORT_RESPONSE}" | sed '$d')
|
||||
|
||||
if [[ "${IMPORT_CODE}" == "200" ]]; then
|
||||
echo "Statistics imported successfully"
|
||||
else
|
||||
echo "Import failed (HTTP ${IMPORT_CODE}): ${IMPORT_BODY}"
|
||||
fi
|
||||
|
||||
rm -f "${STATS_FILE}"
|
||||
}
|
||||
|
||||
wait_for_service() {
|
||||
local port
|
||||
port=$(get_port)
|
||||
|
||||
echo "Waiting for service to be ready..."
|
||||
for i in {1..30}; do
|
||||
if curl -s -o /dev/null -w "%{http_code}" "http://localhost:${port}/" | grep -q "200"; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
sleep 2
|
||||
}
|
||||
|
||||
if [[ "${1:-}" == "--with-usage" ]]; then
|
||||
WITH_USAGE=true
|
||||
export_stats_api_secret
|
||||
fi
|
||||
|
||||
# --- Step 1: Choose Environment ---
|
||||
echo "Please select an option:"
|
||||
echo "1) Run using Pre-built Image (Recommended)"
|
||||
@@ -18,7 +124,14 @@ read -r -p "Enter choice [1-2]: " choice
|
||||
case "$choice" in
|
||||
1)
|
||||
echo "--- Running with Pre-built Image ---"
|
||||
if [[ "${WITH_USAGE}" == "true" ]]; then
|
||||
export_stats
|
||||
fi
|
||||
docker compose up -d --remove-orphans --no-build
|
||||
if [[ "${WITH_USAGE}" == "true" ]]; then
|
||||
wait_for_service
|
||||
import_stats
|
||||
fi
|
||||
echo "Services are starting from remote image."
|
||||
echo "Run 'docker compose logs -f' to see the logs."
|
||||
;;
|
||||
@@ -45,9 +158,18 @@ case "$choice" in
|
||||
--build-arg COMMIT="${COMMIT}" \
|
||||
--build-arg BUILD_DATE="${BUILD_DATE}"
|
||||
|
||||
if [[ "${WITH_USAGE}" == "true" ]]; then
|
||||
export_stats
|
||||
fi
|
||||
|
||||
echo "Starting the services..."
|
||||
docker compose up -d --remove-orphans --pull never
|
||||
|
||||
if [[ "${WITH_USAGE}" == "true" ]]; then
|
||||
wait_for_service
|
||||
import_stats
|
||||
fi
|
||||
|
||||
echo "Build complete. Services are starting."
|
||||
echo "Run 'docker compose logs -f' to see the logs."
|
||||
;;
|
||||
|
||||
@@ -10,14 +10,19 @@ services:
|
||||
COMMIT: ${COMMIT:-none}
|
||||
BUILD_DATE: ${BUILD_DATE:-unknown}
|
||||
container_name: cli-proxy-api
|
||||
# env_file:
|
||||
# - .env
|
||||
environment:
|
||||
DEPLOY: ${DEPLOY:-}
|
||||
ports:
|
||||
- "8317:8317"
|
||||
- "8085:8085"
|
||||
- "1455:1455"
|
||||
- "54545:54545"
|
||||
- "51121:51121"
|
||||
- "11451:11451"
|
||||
volumes:
|
||||
- ./config.yaml:/CLIProxyAPI/config.yaml
|
||||
- ./auths:/root/.cli-proxy-api
|
||||
- ./logs:/CLIProxyAPI/logs
|
||||
- ./conv:/CLIProxyAPI/conv
|
||||
- ${CLI_PROXY_CONFIG_PATH:-./config.yaml}:/CLIProxyAPI/config.yaml
|
||||
- ${CLI_PROXY_AUTH_PATH:-./auths}:/root/.cli-proxy-api
|
||||
- ${CLI_PROXY_LOG_PATH:-./logs}:/CLIProxyAPI/logs
|
||||
restart: unless-stopped
|
||||
176
docs/sdk-access.md
Normal file
176
docs/sdk-access.md
Normal file
@@ -0,0 +1,176 @@
|
||||
# @sdk/access SDK Reference
|
||||
|
||||
The `github.com/router-for-me/CLIProxyAPI/v6/sdk/access` package centralizes inbound request authentication for the proxy. It offers a lightweight manager that chains credential providers, so servers can reuse the same access control logic inside or outside the CLI runtime.
|
||||
|
||||
## Importing
|
||||
|
||||
```go
|
||||
import (
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
)
|
||||
```
|
||||
|
||||
Add the module with `go get github.com/router-for-me/CLIProxyAPI/v6/sdk/access`.
|
||||
|
||||
## Manager Lifecycle
|
||||
|
||||
```go
|
||||
manager := sdkaccess.NewManager()
|
||||
providers, err := sdkaccess.BuildProviders(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
manager.SetProviders(providers)
|
||||
```
|
||||
|
||||
* `NewManager` constructs an empty manager.
|
||||
* `SetProviders` replaces the provider slice using a defensive copy.
|
||||
* `Providers` retrieves a snapshot that can be iterated safely from other goroutines.
|
||||
* `BuildProviders` translates `config.Config` access declarations into runnable providers. When the config omits explicit providers but defines inline API keys, the helper auto-installs the built-in `config-api-key` provider.
|
||||
|
||||
## Authenticating Requests
|
||||
|
||||
```go
|
||||
result, err := manager.Authenticate(ctx, req)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Authentication succeeded; result describes the provider and principal.
|
||||
case errors.Is(err, sdkaccess.ErrNoCredentials):
|
||||
// No recognizable credentials were supplied.
|
||||
case errors.Is(err, sdkaccess.ErrInvalidCredential):
|
||||
// Supplied credentials were present but rejected.
|
||||
default:
|
||||
// Transport-level failure was returned by a provider.
|
||||
}
|
||||
```
|
||||
|
||||
`Manager.Authenticate` walks the configured providers in order. It returns on the first success, skips providers that surface `ErrNotHandled`, and tracks whether any provider reported `ErrNoCredentials` or `ErrInvalidCredential` for downstream error reporting.
|
||||
|
||||
If the manager itself is `nil` or no providers are registered, the call returns `nil, nil`, allowing callers to treat access control as disabled without branching on errors.
|
||||
|
||||
Each `Result` includes the provider identifier, the resolved principal, and optional metadata (for example, which header carried the credential).
|
||||
|
||||
## Configuration Layout
|
||||
|
||||
The manager expects access providers under the `auth.providers` key inside `config.yaml`:
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
providers:
|
||||
- name: inline-api
|
||||
type: config-api-key
|
||||
api-keys:
|
||||
- sk-test-123
|
||||
- sk-prod-456
|
||||
```
|
||||
|
||||
Fields map directly to `config.AccessProvider`: `name` labels the provider, `type` selects the registered factory, `sdk` can name an external module, `api-keys` seeds inline credentials, and `config` passes provider-specific options.
|
||||
|
||||
### Loading providers from external SDK modules
|
||||
|
||||
To consume a provider shipped in another Go module, point the `sdk` field at the module path and import it for its registration side effect:
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
providers:
|
||||
- name: partner-auth
|
||||
type: partner-token
|
||||
sdk: github.com/acme/xplatform/sdk/access/providers/partner
|
||||
config:
|
||||
region: us-west-2
|
||||
audience: cli-proxy
|
||||
```
|
||||
|
||||
```go
|
||||
import (
|
||||
_ "github.com/acme/xplatform/sdk/access/providers/partner" // registers partner-token
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
)
|
||||
```
|
||||
|
||||
The blank identifier import ensures `init` runs so `sdkaccess.RegisterProvider` executes before `BuildProviders` is called.
|
||||
|
||||
## Built-in Providers
|
||||
|
||||
The SDK ships with one provider out of the box:
|
||||
|
||||
- `config-api-key`: Validates API keys declared inline or under top-level `api-keys`. It accepts the key from `Authorization: Bearer`, `X-Goog-Api-Key`, `X-Api-Key`, or the `?key=` query string and reports `ErrInvalidCredential` when no match is found.
|
||||
|
||||
Additional providers can be delivered by third-party packages. When a provider package is imported, it registers itself with `sdkaccess.RegisterProvider`.
|
||||
|
||||
### Metadata and auditing
|
||||
|
||||
`Result.Metadata` carries provider-specific context. The built-in `config-api-key` provider, for example, stores the credential source (`authorization`, `x-goog-api-key`, `x-api-key`, or `query-key`). Populate this map in custom providers to enrich logs and downstream auditing.
|
||||
|
||||
## Writing Custom Providers
|
||||
|
||||
```go
|
||||
type customProvider struct{}
|
||||
|
||||
func (p *customProvider) Identifier() string { return "my-provider" }
|
||||
|
||||
func (p *customProvider) Authenticate(ctx context.Context, r *http.Request) (*sdkaccess.Result, error) {
|
||||
token := r.Header.Get("X-Custom")
|
||||
if token == "" {
|
||||
return nil, sdkaccess.ErrNoCredentials
|
||||
}
|
||||
if token != "expected" {
|
||||
return nil, sdkaccess.ErrInvalidCredential
|
||||
}
|
||||
return &sdkaccess.Result{
|
||||
Provider: p.Identifier(),
|
||||
Principal: "service-user",
|
||||
Metadata: map[string]string{"source": "x-custom"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
sdkaccess.RegisterProvider("custom", func(cfg *config.AccessProvider, root *config.Config) (sdkaccess.Provider, error) {
|
||||
return &customProvider{}, nil
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
A provider must implement `Identifier()` and `Authenticate()`. To expose it to configuration, call `RegisterProvider` inside `init`. Provider factories receive the specific `AccessProvider` block plus the full root configuration for contextual needs.
|
||||
|
||||
## Error Semantics
|
||||
|
||||
- `ErrNoCredentials`: no credentials were present or recognized by any provider.
|
||||
- `ErrInvalidCredential`: at least one provider processed the credentials but rejected them.
|
||||
- `ErrNotHandled`: instructs the manager to fall through to the next provider without affecting aggregate error reporting.
|
||||
|
||||
Return custom errors to surface transport failures; they propagate immediately to the caller instead of being masked.
|
||||
|
||||
## Integration with cliproxy Service
|
||||
|
||||
`sdk/cliproxy` wires `@sdk/access` automatically when you build a CLI service via `cliproxy.NewBuilder`. Supplying a preconfigured manager allows you to extend or override the default providers:
|
||||
|
||||
```go
|
||||
coreCfg, _ := config.LoadConfig("config.yaml")
|
||||
providers, _ := sdkaccess.BuildProviders(coreCfg)
|
||||
manager := sdkaccess.NewManager()
|
||||
manager.SetProviders(providers)
|
||||
|
||||
svc, _ := cliproxy.NewBuilder().
|
||||
WithConfig(coreCfg).
|
||||
WithAccessManager(manager).
|
||||
Build()
|
||||
```
|
||||
|
||||
The service reuses the manager for every inbound request, ensuring consistent authentication across embedded deployments and the canonical CLI binary.
|
||||
|
||||
### Hot reloading providers
|
||||
|
||||
When configuration changes, rebuild providers and swap them into the manager:
|
||||
|
||||
```go
|
||||
providers, err := sdkaccess.BuildProviders(newCfg)
|
||||
if err != nil {
|
||||
log.Errorf("reload auth providers failed: %v", err)
|
||||
return
|
||||
}
|
||||
accessManager.SetProviders(providers)
|
||||
```
|
||||
|
||||
This mirrors the behaviour in `cliproxy.Service.refreshAccessProviders` and `api.Server.applyAccessConfig`, enabling runtime updates without restarting the process.
|
||||
176
docs/sdk-access_CN.md
Normal file
176
docs/sdk-access_CN.md
Normal file
@@ -0,0 +1,176 @@
|
||||
# @sdk/access 开发指引
|
||||
|
||||
`github.com/router-for-me/CLIProxyAPI/v6/sdk/access` 包负责代理的入站访问认证。它提供一个轻量的管理器,用于按顺序链接多种凭证校验实现,让服务器在 CLI 运行时内外都能复用相同的访问控制逻辑。
|
||||
|
||||
## 引用方式
|
||||
|
||||
```go
|
||||
import (
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
)
|
||||
```
|
||||
|
||||
通过 `go get github.com/router-for-me/CLIProxyAPI/v6/sdk/access` 添加依赖。
|
||||
|
||||
## 管理器生命周期
|
||||
|
||||
```go
|
||||
manager := sdkaccess.NewManager()
|
||||
providers, err := sdkaccess.BuildProviders(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
manager.SetProviders(providers)
|
||||
```
|
||||
|
||||
- `NewManager` 创建空管理器。
|
||||
- `SetProviders` 替换提供者切片并做防御性拷贝。
|
||||
- `Providers` 返回适合并发读取的快照。
|
||||
- `BuildProviders` 将 `config.Config` 中的访问配置转换成可运行的提供者。当配置没有显式声明但包含顶层 `api-keys` 时,会自动挂载内建的 `config-api-key` 提供者。
|
||||
|
||||
## 认证请求
|
||||
|
||||
```go
|
||||
result, err := manager.Authenticate(ctx, req)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Authentication succeeded; result carries provider and principal.
|
||||
case errors.Is(err, sdkaccess.ErrNoCredentials):
|
||||
// No recognizable credentials were supplied.
|
||||
case errors.Is(err, sdkaccess.ErrInvalidCredential):
|
||||
// Credentials were present but rejected.
|
||||
default:
|
||||
// Provider surfaced a transport-level failure.
|
||||
}
|
||||
```
|
||||
|
||||
`Manager.Authenticate` 按配置顺序遍历提供者。遇到成功立即返回,`ErrNotHandled` 会继续尝试下一个;若发现 `ErrNoCredentials` 或 `ErrInvalidCredential`,会在遍历结束后汇总给调用方。
|
||||
|
||||
若管理器本身为 `nil` 或尚未注册提供者,调用会返回 `nil, nil`,让调用方无需针对错误做额外分支即可关闭访问控制。
|
||||
|
||||
`Result` 提供认证提供者标识、解析出的主体以及可选元数据(例如凭证来源)。
|
||||
|
||||
## 配置结构
|
||||
|
||||
在 `config.yaml` 的 `auth.providers` 下定义访问提供者:
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
providers:
|
||||
- name: inline-api
|
||||
type: config-api-key
|
||||
api-keys:
|
||||
- sk-test-123
|
||||
- sk-prod-456
|
||||
```
|
||||
|
||||
条目映射到 `config.AccessProvider`:`name` 指定实例名,`type` 选择注册的工厂,`sdk` 可引用第三方模块,`api-keys` 提供内联凭证,`config` 用于传递特定选项。
|
||||
|
||||
### 引入外部 SDK 提供者
|
||||
|
||||
若要消费其它 Go 模块输出的访问提供者,可在配置里填写 `sdk` 字段并在代码中引入该包,利用其 `init` 注册过程:
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
providers:
|
||||
- name: partner-auth
|
||||
type: partner-token
|
||||
sdk: github.com/acme/xplatform/sdk/access/providers/partner
|
||||
config:
|
||||
region: us-west-2
|
||||
audience: cli-proxy
|
||||
```
|
||||
|
||||
```go
|
||||
import (
|
||||
_ "github.com/acme/xplatform/sdk/access/providers/partner" // registers partner-token
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
)
|
||||
```
|
||||
|
||||
通过空白标识符导入即可确保 `init` 调用,先于 `BuildProviders` 完成 `sdkaccess.RegisterProvider`。
|
||||
|
||||
## 内建提供者
|
||||
|
||||
当前 SDK 默认内置:
|
||||
|
||||
- `config-api-key`:校验配置中的 API Key。它从 `Authorization: Bearer`、`X-Goog-Api-Key`、`X-Api-Key` 以及查询参数 `?key=` 提取凭证,不匹配时抛出 `ErrInvalidCredential`。
|
||||
|
||||
导入第三方包即可通过 `sdkaccess.RegisterProvider` 注册更多类型。
|
||||
|
||||
### 元数据与审计
|
||||
|
||||
`Result.Metadata` 用于携带提供者特定的上下文信息。内建的 `config-api-key` 会记录凭证来源(`authorization`、`x-goog-api-key`、`x-api-key` 或 `query-key`)。自定义提供者同样可以填充该 Map,以便丰富日志与审计场景。
|
||||
|
||||
## 编写自定义提供者
|
||||
|
||||
```go
|
||||
type customProvider struct{}
|
||||
|
||||
func (p *customProvider) Identifier() string { return "my-provider" }
|
||||
|
||||
func (p *customProvider) Authenticate(ctx context.Context, r *http.Request) (*sdkaccess.Result, error) {
|
||||
token := r.Header.Get("X-Custom")
|
||||
if token == "" {
|
||||
return nil, sdkaccess.ErrNoCredentials
|
||||
}
|
||||
if token != "expected" {
|
||||
return nil, sdkaccess.ErrInvalidCredential
|
||||
}
|
||||
return &sdkaccess.Result{
|
||||
Provider: p.Identifier(),
|
||||
Principal: "service-user",
|
||||
Metadata: map[string]string{"source": "x-custom"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
sdkaccess.RegisterProvider("custom", func(cfg *config.AccessProvider, root *config.Config) (sdkaccess.Provider, error) {
|
||||
return &customProvider{}, nil
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
自定义提供者需要实现 `Identifier()` 与 `Authenticate()`。在 `init` 中调用 `RegisterProvider` 暴露给配置层,工厂函数既能读取当前条目,也能访问完整根配置。
|
||||
|
||||
## 错误语义
|
||||
|
||||
- `ErrNoCredentials`:任何提供者都未识别到凭证。
|
||||
- `ErrInvalidCredential`:至少一个提供者处理了凭证但判定无效。
|
||||
- `ErrNotHandled`:告诉管理器跳到下一个提供者,不影响最终错误统计。
|
||||
|
||||
自定义错误(例如网络异常)会马上冒泡返回。
|
||||
|
||||
## 与 cliproxy 集成
|
||||
|
||||
使用 `sdk/cliproxy` 构建服务时会自动接入 `@sdk/access`。如果需要扩展内置行为,可传入自定义管理器:
|
||||
|
||||
```go
|
||||
coreCfg, _ := config.LoadConfig("config.yaml")
|
||||
providers, _ := sdkaccess.BuildProviders(coreCfg)
|
||||
manager := sdkaccess.NewManager()
|
||||
manager.SetProviders(providers)
|
||||
|
||||
svc, _ := cliproxy.NewBuilder().
|
||||
WithConfig(coreCfg).
|
||||
WithAccessManager(manager).
|
||||
Build()
|
||||
```
|
||||
|
||||
服务会复用该管理器处理每一个入站请求,实现与 CLI 二进制一致的访问控制体验。
|
||||
|
||||
### 动态热更新提供者
|
||||
|
||||
当配置发生变化时,可以重新构建提供者并替换当前列表:
|
||||
|
||||
```go
|
||||
providers, err := sdkaccess.BuildProviders(newCfg)
|
||||
if err != nil {
|
||||
log.Errorf("reload auth providers failed: %v", err)
|
||||
return
|
||||
}
|
||||
accessManager.SetProviders(providers)
|
||||
```
|
||||
|
||||
这一流程与 `cliproxy.Service.refreshAccessProviders` 和 `api.Server.applyAccessConfig` 保持一致,避免为更新访问策略而重启进程。
|
||||
138
docs/sdk-advanced.md
Normal file
138
docs/sdk-advanced.md
Normal file
@@ -0,0 +1,138 @@
|
||||
# SDK Advanced: Executors & Translators
|
||||
|
||||
This guide explains how to extend the embedded proxy with custom providers and schemas using the SDK. You will:
|
||||
- Implement a provider executor that talks to your upstream API
|
||||
- Register request/response translators for schema conversion
|
||||
- Register models so they appear in `/v1/models`
|
||||
|
||||
The examples use Go 1.24+ and the v6 module path.
|
||||
|
||||
## Concepts
|
||||
|
||||
- Provider executor: a runtime component implementing `auth.ProviderExecutor` that performs outbound calls for a given provider key (e.g., `gemini`, `claude`, `codex`). Executors can also implement `RequestPreparer` to inject credentials on raw HTTP requests.
|
||||
- Translator registry: schema conversion functions routed by `sdk/translator`. The built‑in handlers translate between OpenAI/Gemini/Claude/Codex formats; you can register new ones.
|
||||
- Model registry: publishes the list of available models per client/provider to power `/v1/models` and routing hints.
|
||||
|
||||
## 1) Implement a Provider Executor
|
||||
|
||||
Create a type that satisfies `auth.ProviderExecutor`.
|
||||
|
||||
```go
|
||||
package myprov
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
clipexec "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
)
|
||||
|
||||
type Executor struct{}
|
||||
|
||||
func (Executor) Identifier() string { return "myprov" }
|
||||
|
||||
// Optional: mutate outbound HTTP requests with credentials
|
||||
func (Executor) PrepareRequest(req *http.Request, a *coreauth.Auth) error {
|
||||
// Example: req.Header.Set("Authorization", "Bearer "+a.APIKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Executor) Execute(ctx context.Context, a *coreauth.Auth, req clipexec.Request, opts clipexec.Options) (clipexec.Response, error) {
|
||||
// Build HTTP request based on req.Payload (already translated into provider format)
|
||||
// Use per‑auth transport if provided: transport := a.RoundTripper // via RoundTripperProvider
|
||||
// Perform call and return provider JSON payload
|
||||
return clipexec.Response{Payload: []byte(`{"ok":true}`)}, nil
|
||||
}
|
||||
|
||||
func (Executor) ExecuteStream(ctx context.Context, a *coreauth.Auth, req clipexec.Request, opts clipexec.Options) (<-chan clipexec.StreamChunk, error) {
|
||||
ch := make(chan clipexec.StreamChunk, 1)
|
||||
go func() { defer close(ch); ch <- clipexec.StreamChunk{Payload: []byte("data: {\"done\":true}\n\n")} }()
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (Executor) Refresh(ctx context.Context, a *coreauth.Auth) (*coreauth.Auth, error) {
|
||||
// Optionally refresh tokens and return updated auth
|
||||
return a, nil
|
||||
}
|
||||
```
|
||||
|
||||
Register the executor with the core manager before starting the service:
|
||||
|
||||
```go
|
||||
core := coreauth.NewManager(coreauth.NewFileStore(cfg.AuthDir), nil, nil)
|
||||
core.RegisterExecutor(myprov.Executor{})
|
||||
svc, _ := cliproxy.NewBuilder().WithConfig(cfg).WithConfigPath(cfgPath).WithCoreAuthManager(core).Build()
|
||||
```
|
||||
|
||||
If your auth entries use provider `"myprov"`, the manager routes requests to your executor.
|
||||
|
||||
## 2) Register Translators
|
||||
|
||||
The handlers accept OpenAI/Gemini/Claude/Codex inputs. To support a new provider format, register translation functions in `sdk/translator`’s default registry.
|
||||
|
||||
Direction matters:
|
||||
- Request: register from inbound schema to provider schema
|
||||
- Response: register from provider schema back to inbound schema
|
||||
|
||||
Example: Convert OpenAI Chat → MyProv Chat and back.
|
||||
|
||||
```go
|
||||
package myprov
|
||||
|
||||
import (
|
||||
"context"
|
||||
sdktr "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
)
|
||||
|
||||
const (
|
||||
FOpenAI = sdktr.Format("openai.chat")
|
||||
FMyProv = sdktr.Format("myprov.chat")
|
||||
)
|
||||
|
||||
func init() {
|
||||
sdktr.Register(FOpenAI, FMyProv,
|
||||
// Request transform (model, rawJSON, stream)
|
||||
func(model string, raw []byte, stream bool) []byte { return convertOpenAIToMyProv(model, raw, stream) },
|
||||
// Response transform (stream & non‑stream)
|
||||
sdktr.ResponseTransform{
|
||||
Stream: func(ctx context.Context, model string, originalReq, translatedReq, raw []byte, param *any) []string {
|
||||
return convertStreamMyProvToOpenAI(model, originalReq, translatedReq, raw)
|
||||
},
|
||||
NonStream: func(ctx context.Context, model string, originalReq, translatedReq, raw []byte, param *any) string {
|
||||
return convertMyProvToOpenAI(model, originalReq, translatedReq, raw)
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
When the OpenAI handler receives a request that should route to `myprov`, the pipeline uses the registered transforms automatically.
|
||||
|
||||
## 3) Register Models
|
||||
|
||||
Expose models under `/v1/models` by registering them in the global model registry using the auth ID (client ID) and provider name.
|
||||
|
||||
```go
|
||||
models := []*cliproxy.ModelInfo{
|
||||
{ ID: "myprov-pro-1", Object: "model", Type: "myprov", DisplayName: "MyProv Pro 1" },
|
||||
}
|
||||
cliproxy.GlobalModelRegistry().RegisterClient(authID, "myprov", models)
|
||||
```
|
||||
|
||||
The embedded server calls this automatically for built‑in providers; for custom providers, register during startup (e.g., after loading auths) or upon auth registration hooks.
|
||||
|
||||
## Credentials & Transports
|
||||
|
||||
- Use `Manager.SetRoundTripperProvider` to inject per‑auth `*http.Transport` (e.g., proxy):
|
||||
```go
|
||||
core.SetRoundTripperProvider(myProvider) // returns transport per auth
|
||||
```
|
||||
- For raw HTTP flows, implement `PrepareRequest` and/or call `Manager.InjectCredentials(req, authID)` to set headers.
|
||||
|
||||
## Testing Tips
|
||||
|
||||
- Enable request logging: Management API GET/PUT `/v0/management/request-log`
|
||||
- Toggle debug logs: Management API GET/PUT `/v0/management/debug`
|
||||
- Hot reload changes in `config.yaml` and `auths/` are picked up automatically by the watcher
|
||||
|
||||
131
docs/sdk-advanced_CN.md
Normal file
131
docs/sdk-advanced_CN.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# SDK 高级指南:执行器与翻译器
|
||||
|
||||
本文介绍如何使用 SDK 扩展内嵌代理:
|
||||
- 实现自定义 Provider 执行器以调用你的上游 API
|
||||
- 注册请求/响应翻译器进行协议转换
|
||||
- 注册模型以出现在 `/v1/models`
|
||||
|
||||
示例基于 Go 1.24+ 与 v6 模块路径。
|
||||
|
||||
## 概念
|
||||
|
||||
- Provider 执行器:实现 `auth.ProviderExecutor` 的运行时组件,负责某个 provider key(如 `gemini`、`claude`、`codex`)的真正出站调用。若实现 `RequestPreparer` 接口,可在原始 HTTP 请求上注入凭据。
|
||||
- 翻译器注册表:由 `sdk/translator` 驱动的协议转换函数。内置了 OpenAI/Gemini/Claude/Codex 的互转;你也可以注册新的格式转换。
|
||||
- 模型注册表:对外发布可用模型列表,供 `/v1/models` 与路由参考。
|
||||
|
||||
## 1) 实现 Provider 执行器
|
||||
|
||||
创建类型满足 `auth.ProviderExecutor` 接口。
|
||||
|
||||
```go
|
||||
package myprov
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
clipexec "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
)
|
||||
|
||||
type Executor struct{}
|
||||
|
||||
func (Executor) Identifier() string { return "myprov" }
|
||||
|
||||
// 可选:在原始 HTTP 请求上注入凭据
|
||||
func (Executor) PrepareRequest(req *http.Request, a *coreauth.Auth) error {
|
||||
// 例如:req.Header.Set("Authorization", "Bearer "+a.Attributes["api_key"])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Executor) Execute(ctx context.Context, a *coreauth.Auth, req clipexec.Request, opts clipexec.Options) (clipexec.Response, error) {
|
||||
// 基于 req.Payload 构造上游请求,返回上游 JSON 负载
|
||||
return clipexec.Response{Payload: []byte(`{"ok":true}`)}, nil
|
||||
}
|
||||
|
||||
func (Executor) ExecuteStream(ctx context.Context, a *coreauth.Auth, req clipexec.Request, opts clipexec.Options) (<-chan clipexec.StreamChunk, error) {
|
||||
ch := make(chan clipexec.StreamChunk, 1)
|
||||
go func() { defer close(ch); ch <- clipexec.StreamChunk{Payload: []byte("data: {\\"done\\":true}\\n\\n")} }()
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (Executor) Refresh(ctx context.Context, a *coreauth.Auth) (*coreauth.Auth, error) { return a, nil }
|
||||
```
|
||||
|
||||
在启动服务前将执行器注册到核心管理器:
|
||||
|
||||
```go
|
||||
core := coreauth.NewManager(coreauth.NewFileStore(cfg.AuthDir), nil, nil)
|
||||
core.RegisterExecutor(myprov.Executor{})
|
||||
svc, _ := cliproxy.NewBuilder().WithConfig(cfg).WithConfigPath(cfgPath).WithCoreAuthManager(core).Build()
|
||||
```
|
||||
|
||||
当凭据的 `Provider` 为 `"myprov"` 时,管理器会将请求路由到你的执行器。
|
||||
|
||||
## 2) 注册翻译器
|
||||
|
||||
内置处理器接受 OpenAI/Gemini/Claude/Codex 的入站格式。要支持新的 provider 协议,需要在 `sdk/translator` 的默认注册表中注册转换函数。
|
||||
|
||||
方向很重要:
|
||||
- 请求:从“入站格式”转换为“provider 格式”
|
||||
- 响应:从“provider 格式”转换回“入站格式”
|
||||
|
||||
示例:OpenAI Chat → MyProv Chat 及其反向。
|
||||
|
||||
```go
|
||||
package myprov
|
||||
|
||||
import (
|
||||
"context"
|
||||
sdktr "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
)
|
||||
|
||||
const (
|
||||
FOpenAI = sdktr.Format("openai.chat")
|
||||
FMyProv = sdktr.Format("myprov.chat")
|
||||
)
|
||||
|
||||
func init() {
|
||||
sdktr.Register(FOpenAI, FMyProv,
|
||||
func(model string, raw []byte, stream bool) []byte { return convertOpenAIToMyProv(model, raw, stream) },
|
||||
sdktr.ResponseTransform{
|
||||
Stream: func(ctx context.Context, model string, originalReq, translatedReq, raw []byte, param *any) []string {
|
||||
return convertStreamMyProvToOpenAI(model, originalReq, translatedReq, raw)
|
||||
},
|
||||
NonStream: func(ctx context.Context, model string, originalReq, translatedReq, raw []byte, param *any) string {
|
||||
return convertMyProvToOpenAI(model, originalReq, translatedReq, raw)
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
当 OpenAI 处理器接到需要路由到 `myprov` 的请求时,流水线会自动应用已注册的转换。
|
||||
|
||||
## 3) 注册模型
|
||||
|
||||
通过全局模型注册表将模型暴露到 `/v1/models`:
|
||||
|
||||
```go
|
||||
models := []*cliproxy.ModelInfo{
|
||||
{ ID: "myprov-pro-1", Object: "model", Type: "myprov", DisplayName: "MyProv Pro 1" },
|
||||
}
|
||||
cliproxy.GlobalModelRegistry().RegisterClient(authID, "myprov", models)
|
||||
```
|
||||
|
||||
内置 Provider 会自动注册;自定义 Provider 建议在启动时(例如加载到 Auth 后)或在 Auth 注册钩子中调用。
|
||||
|
||||
## 凭据与传输
|
||||
|
||||
- 使用 `Manager.SetRoundTripperProvider` 注入按账户的 `*http.Transport`(例如代理):
|
||||
```go
|
||||
core.SetRoundTripperProvider(myProvider) // 按账户返回 transport
|
||||
```
|
||||
- 对于原始 HTTP 请求,若实现了 `PrepareRequest`,或通过 `Manager.InjectCredentials(req, authID)` 进行头部注入。
|
||||
|
||||
## 测试建议
|
||||
|
||||
- 启用请求日志:管理 API GET/PUT `/v0/management/request-log`
|
||||
- 切换调试日志:管理 API GET/PUT `/v0/management/debug`
|
||||
- 热更新:`config.yaml` 与 `auths/` 变化会自动被侦测并应用
|
||||
|
||||
163
docs/sdk-usage.md
Normal file
163
docs/sdk-usage.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# CLI Proxy SDK Guide
|
||||
|
||||
The `sdk/cliproxy` module exposes the proxy as a reusable Go library so external programs can embed the routing, authentication, hot‑reload, and translation layers without depending on the CLI binary.
|
||||
|
||||
## Install & Import
|
||||
|
||||
```bash
|
||||
go get github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy
|
||||
```
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy"
|
||||
)
|
||||
```
|
||||
|
||||
Note the `/v6` module path.
|
||||
|
||||
## Minimal Embed
|
||||
|
||||
```go
|
||||
cfg, err := config.LoadConfig("config.yaml")
|
||||
if err != nil { panic(err) }
|
||||
|
||||
svc, err := cliproxy.NewBuilder().
|
||||
WithConfig(cfg).
|
||||
WithConfigPath("config.yaml"). // absolute or working-dir relative
|
||||
Build()
|
||||
if err != nil { panic(err) }
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
if err := svc.Run(ctx); err != nil && !errors.Is(err, context.Canceled) {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
The service manages config/auth watching, background token refresh, and graceful shutdown. Cancel the context to stop it.
|
||||
|
||||
## Server Options (middleware, routes, logs)
|
||||
|
||||
The server accepts options via `WithServerOptions`:
|
||||
|
||||
```go
|
||||
svc, _ := cliproxy.NewBuilder().
|
||||
WithConfig(cfg).
|
||||
WithConfigPath("config.yaml").
|
||||
WithServerOptions(
|
||||
// Add global middleware
|
||||
cliproxy.WithMiddleware(func(c *gin.Context) { c.Header("X-Embed", "1"); c.Next() }),
|
||||
// Tweak gin engine early (CORS, trusted proxies, etc.)
|
||||
cliproxy.WithEngineConfigurator(func(e *gin.Engine) { e.ForwardedByClientIP = true }),
|
||||
// Add your own routes after defaults
|
||||
cliproxy.WithRouterConfigurator(func(e *gin.Engine, _ *handlers.BaseAPIHandler, _ *config.Config) {
|
||||
e.GET("/healthz", func(c *gin.Context) { c.String(200, "ok") })
|
||||
}),
|
||||
// Override request log writer/dir
|
||||
cliproxy.WithRequestLoggerFactory(func(cfg *config.Config, cfgPath string) logging.RequestLogger {
|
||||
return logging.NewFileRequestLogger(true, "logs", filepath.Dir(cfgPath))
|
||||
}),
|
||||
).
|
||||
Build()
|
||||
```
|
||||
|
||||
These options mirror the internals used by the CLI server.
|
||||
|
||||
## Management API (when embedded)
|
||||
|
||||
- Management endpoints are mounted only when `remote-management.secret-key` is set in `config.yaml`.
|
||||
- Remote access additionally requires `remote-management.allow-remote: true`.
|
||||
- See MANAGEMENT_API.md for endpoints. Your embedded server exposes them under `/v0/management` on the configured port.
|
||||
|
||||
## Using the Core Auth Manager
|
||||
|
||||
The service uses a core `auth.Manager` for selection, execution, and auto‑refresh. When embedding, you can provide your own manager to customize transports or hooks:
|
||||
|
||||
```go
|
||||
core := coreauth.NewManager(coreauth.NewFileStore(cfg.AuthDir), nil, nil)
|
||||
core.SetRoundTripperProvider(myRTProvider) // per‑auth *http.Transport
|
||||
|
||||
svc, _ := cliproxy.NewBuilder().
|
||||
WithConfig(cfg).
|
||||
WithConfigPath("config.yaml").
|
||||
WithCoreAuthManager(core).
|
||||
Build()
|
||||
```
|
||||
|
||||
Implement a custom per‑auth transport:
|
||||
|
||||
```go
|
||||
type myRTProvider struct{}
|
||||
func (myRTProvider) RoundTripperFor(a *coreauth.Auth) http.RoundTripper {
|
||||
if a == nil || a.ProxyURL == "" { return nil }
|
||||
u, _ := url.Parse(a.ProxyURL)
|
||||
return &http.Transport{ Proxy: http.ProxyURL(u) }
|
||||
}
|
||||
```
|
||||
|
||||
Programmatic execution is available on the manager:
|
||||
|
||||
```go
|
||||
// Non‑streaming
|
||||
resp, err := core.Execute(ctx, []string{"gemini"}, req, opts)
|
||||
|
||||
// Streaming
|
||||
chunks, err := core.ExecuteStream(ctx, []string{"gemini"}, req, opts)
|
||||
for ch := range chunks { /* ... */ }
|
||||
```
|
||||
|
||||
Note: Built‑in provider executors are wired automatically when you run the `Service`. If you want to use `Manager` stand‑alone without the HTTP server, you must register your own executors that implement `auth.ProviderExecutor`.
|
||||
|
||||
## Custom Client Sources
|
||||
|
||||
Replace the default loaders if your creds live outside the local filesystem:
|
||||
|
||||
```go
|
||||
type memoryTokenProvider struct{}
|
||||
func (p *memoryTokenProvider) Load(ctx context.Context, cfg *config.Config) (*cliproxy.TokenClientResult, error) {
|
||||
// Populate from memory/remote store and return counts
|
||||
return &cliproxy.TokenClientResult{}, nil
|
||||
}
|
||||
|
||||
svc, _ := cliproxy.NewBuilder().
|
||||
WithConfig(cfg).
|
||||
WithConfigPath("config.yaml").
|
||||
WithTokenClientProvider(&memoryTokenProvider{}).
|
||||
WithAPIKeyClientProvider(cliproxy.NewAPIKeyClientProvider()).
|
||||
Build()
|
||||
```
|
||||
|
||||
## Hooks
|
||||
|
||||
Observe lifecycle without patching internals:
|
||||
|
||||
```go
|
||||
hooks := cliproxy.Hooks{
|
||||
OnBeforeStart: func(cfg *config.Config) { log.Infof("starting on :%d", cfg.Port) },
|
||||
OnAfterStart: func(s *cliproxy.Service) { log.Info("ready") },
|
||||
}
|
||||
svc, _ := cliproxy.NewBuilder().WithConfig(cfg).WithConfigPath("config.yaml").WithHooks(hooks).Build()
|
||||
```
|
||||
|
||||
## Shutdown
|
||||
|
||||
`Run` defers `Shutdown`, so cancelling the parent context is enough. To stop manually:
|
||||
|
||||
```go
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
_ = svc.Shutdown(ctx)
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- Hot reload: changes to `config.yaml` and `auths/` are picked up automatically.
|
||||
- Request logging can be toggled at runtime via the Management API.
|
||||
- Gemini Web features (`gemini-web.*`) are honored in the embedded server.
|
||||
164
docs/sdk-usage_CN.md
Normal file
164
docs/sdk-usage_CN.md
Normal file
@@ -0,0 +1,164 @@
|
||||
# CLI Proxy SDK 使用指南
|
||||
|
||||
`sdk/cliproxy` 模块将代理能力以 Go 库的形式对外暴露,方便在其它服务中内嵌路由、鉴权、热更新与翻译层,而无需依赖可执行的 CLI 程序。
|
||||
|
||||
## 安装与导入
|
||||
|
||||
```bash
|
||||
go get github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy
|
||||
```
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy"
|
||||
)
|
||||
```
|
||||
|
||||
注意模块路径包含 `/v6`。
|
||||
|
||||
## 最小可用示例
|
||||
|
||||
```go
|
||||
cfg, err := config.LoadConfig("config.yaml")
|
||||
if err != nil { panic(err) }
|
||||
|
||||
svc, err := cliproxy.NewBuilder().
|
||||
WithConfig(cfg).
|
||||
WithConfigPath("config.yaml"). // 绝对路径或工作目录相对路径
|
||||
Build()
|
||||
if err != nil { panic(err) }
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
if err := svc.Run(ctx); err != nil && !errors.Is(err, context.Canceled) {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
服务内部会管理配置与认证文件的监听、后台令牌刷新与优雅关闭。取消上下文即可停止服务。
|
||||
|
||||
## 服务器可选项(中间件、路由、日志)
|
||||
|
||||
通过 `WithServerOptions` 自定义:
|
||||
|
||||
```go
|
||||
svc, _ := cliproxy.NewBuilder().
|
||||
WithConfig(cfg).
|
||||
WithConfigPath("config.yaml").
|
||||
WithServerOptions(
|
||||
// 追加全局中间件
|
||||
cliproxy.WithMiddleware(func(c *gin.Context) { c.Header("X-Embed", "1"); c.Next() }),
|
||||
// 提前调整 gin 引擎(如 CORS、trusted proxies)
|
||||
cliproxy.WithEngineConfigurator(func(e *gin.Engine) { e.ForwardedByClientIP = true }),
|
||||
// 在默认路由之后追加自定义路由
|
||||
cliproxy.WithRouterConfigurator(func(e *gin.Engine, _ *handlers.BaseAPIHandler, _ *config.Config) {
|
||||
e.GET("/healthz", func(c *gin.Context) { c.String(200, "ok") })
|
||||
}),
|
||||
// 覆盖请求日志的创建(启用/目录)
|
||||
cliproxy.WithRequestLoggerFactory(func(cfg *config.Config, cfgPath string) logging.RequestLogger {
|
||||
return logging.NewFileRequestLogger(true, "logs", filepath.Dir(cfgPath))
|
||||
}),
|
||||
).
|
||||
Build()
|
||||
```
|
||||
|
||||
这些选项与 CLI 服务器内部用法保持一致。
|
||||
|
||||
## 管理 API(内嵌时)
|
||||
|
||||
- 仅当 `config.yaml` 中设置了 `remote-management.secret-key` 时才会挂载管理端点。
|
||||
- 远程访问还需要 `remote-management.allow-remote: true`。
|
||||
- 具体端点见 MANAGEMENT_API_CN.md。内嵌服务器会在配置端口下暴露 `/v0/management`。
|
||||
|
||||
## 使用核心鉴权管理器
|
||||
|
||||
服务内部使用核心 `auth.Manager` 负责选择、执行、自动刷新。内嵌时可自定义其传输或钩子:
|
||||
|
||||
```go
|
||||
core := coreauth.NewManager(coreauth.NewFileStore(cfg.AuthDir), nil, nil)
|
||||
core.SetRoundTripperProvider(myRTProvider) // 按账户返回 *http.Transport
|
||||
|
||||
svc, _ := cliproxy.NewBuilder().
|
||||
WithConfig(cfg).
|
||||
WithConfigPath("config.yaml").
|
||||
WithCoreAuthManager(core).
|
||||
Build()
|
||||
```
|
||||
|
||||
实现每个账户的自定义传输:
|
||||
|
||||
```go
|
||||
type myRTProvider struct{}
|
||||
func (myRTProvider) RoundTripperFor(a *coreauth.Auth) http.RoundTripper {
|
||||
if a == nil || a.ProxyURL == "" { return nil }
|
||||
u, _ := url.Parse(a.ProxyURL)
|
||||
return &http.Transport{ Proxy: http.ProxyURL(u) }
|
||||
}
|
||||
```
|
||||
|
||||
管理器提供编程式执行接口:
|
||||
|
||||
```go
|
||||
// 非流式
|
||||
resp, err := core.Execute(ctx, []string{"gemini"}, req, opts)
|
||||
|
||||
// 流式
|
||||
chunks, err := core.ExecuteStream(ctx, []string{"gemini"}, req, opts)
|
||||
for ch := range chunks { /* ... */ }
|
||||
```
|
||||
|
||||
说明:运行 `Service` 时会自动注册内置的提供商执行器;若仅单独使用 `Manager` 而不启动 HTTP 服务器,则需要自行实现并注册满足 `auth.ProviderExecutor` 的执行器。
|
||||
|
||||
## 自定义凭据来源
|
||||
|
||||
当凭据不在本地文件系统时,替换默认加载器:
|
||||
|
||||
```go
|
||||
type memoryTokenProvider struct{}
|
||||
func (p *memoryTokenProvider) Load(ctx context.Context, cfg *config.Config) (*cliproxy.TokenClientResult, error) {
|
||||
// 从内存/远端加载并返回数量统计
|
||||
return &cliproxy.TokenClientResult{}, nil
|
||||
}
|
||||
|
||||
svc, _ := cliproxy.NewBuilder().
|
||||
WithConfig(cfg).
|
||||
WithConfigPath("config.yaml").
|
||||
WithTokenClientProvider(&memoryTokenProvider{}).
|
||||
WithAPIKeyClientProvider(cliproxy.NewAPIKeyClientProvider()).
|
||||
Build()
|
||||
```
|
||||
|
||||
## 启动钩子
|
||||
|
||||
无需修改内部代码即可观察生命周期:
|
||||
|
||||
```go
|
||||
hooks := cliproxy.Hooks{
|
||||
OnBeforeStart: func(cfg *config.Config) { log.Infof("starting on :%d", cfg.Port) },
|
||||
OnAfterStart: func(s *cliproxy.Service) { log.Info("ready") },
|
||||
}
|
||||
svc, _ := cliproxy.NewBuilder().WithConfig(cfg).WithConfigPath("config.yaml").WithHooks(hooks).Build()
|
||||
```
|
||||
|
||||
## 关闭
|
||||
|
||||
`Run` 内部会延迟调用 `Shutdown`,因此只需取消父上下文即可。若需手动停止:
|
||||
|
||||
```go
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
_ = svc.Shutdown(ctx)
|
||||
```
|
||||
|
||||
## 说明
|
||||
|
||||
- 热更新:`config.yaml` 与 `auths/` 变化会被自动侦测并应用。
|
||||
- 请求日志可通过管理 API 在运行时开关。
|
||||
- `gemini-web.*` 相关配置在内嵌服务器中会被遵循。
|
||||
|
||||
32
docs/sdk-watcher.md
Normal file
32
docs/sdk-watcher.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# SDK Watcher Integration
|
||||
|
||||
The SDK service exposes a watcher integration that surfaces granular auth updates without forcing a full reload. This document explains the queue contract, how the service consumes updates, and how high-frequency change bursts are handled.
|
||||
|
||||
## Update Queue Contract
|
||||
|
||||
- `watcher.AuthUpdate` represents a single credential change. `Action` may be `add`, `modify`, or `delete`, and `ID` carries the credential identifier. For `add`/`modify` the `Auth` payload contains a fully populated clone of the credential; `delete` may omit `Auth`.
|
||||
- `WatcherWrapper.SetAuthUpdateQueue(chan<- watcher.AuthUpdate)` wires the queue produced by the SDK service into the watcher. The queue must be created before the watcher starts.
|
||||
- The service builds the queue via `ensureAuthUpdateQueue`, using a buffered channel (`capacity=256`) and a dedicated consumer goroutine (`consumeAuthUpdates`). The consumer drains bursts by looping through the backlog before reacquiring the select loop.
|
||||
|
||||
## Watcher Behaviour
|
||||
|
||||
- `internal/watcher/watcher.go` keeps a shadow snapshot of auth state (`currentAuths`). Each filesystem or configuration event triggers a recomputation and a diff against the previous snapshot to produce minimal `AuthUpdate` entries that mirror adds, edits, and removals.
|
||||
- Updates are coalesced per credential identifier. If multiple changes occur before dispatch (e.g., write followed by delete), only the final action is sent downstream.
|
||||
- The watcher runs an internal dispatch loop that buffers pending updates in memory and forwards them asynchronously to the queue. Producers never block on channel capacity; they just enqueue into the in-memory buffer and signal the dispatcher. Dispatch cancellation happens when the watcher stops, guaranteeing goroutines exit cleanly.
|
||||
|
||||
## High-Frequency Change Handling
|
||||
|
||||
- The dispatch loop and service consumer run independently, preventing filesystem watchers from blocking even when many updates arrive at once.
|
||||
- Back-pressure is absorbed in two places:
|
||||
- The dispatch buffer (map + order slice) coalesces repeated updates for the same credential until the consumer catches up.
|
||||
- The service channel capacity (256) combined with the consumer drain loop ensures several bursts can be processed without oscillation.
|
||||
- If the queue is saturated for an extended period, updates continue to be merged, so the latest state is eventually applied without replaying redundant intermediate states.
|
||||
|
||||
## Usage Checklist
|
||||
|
||||
1. Instantiate the SDK service (builder or manual construction).
|
||||
2. Call `ensureAuthUpdateQueue` before starting the watcher to allocate the shared channel.
|
||||
3. When the `WatcherWrapper` is created, call `SetAuthUpdateQueue` with the service queue, then start the watcher.
|
||||
4. Provide a reload callback that handles configuration updates; auth deltas will arrive via the queue and are applied by the service automatically through `handleAuthUpdate`.
|
||||
|
||||
Following this flow keeps auth changes responsive while avoiding full reloads for every edit.
|
||||
32
docs/sdk-watcher_CN.md
Normal file
32
docs/sdk-watcher_CN.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# SDK Watcher集成说明
|
||||
|
||||
本文档介绍SDK服务与文件监控器之间的增量更新队列,包括接口契约、高频变更下的处理策略以及接入步骤。
|
||||
|
||||
## 更新队列契约
|
||||
|
||||
- `watcher.AuthUpdate`描述单条凭据变更,`Action`可能为`add`、`modify`或`delete`,`ID`是凭据标识。对于`add`/`modify`会携带完整的`Auth`克隆,`delete`可以省略`Auth`。
|
||||
- `WatcherWrapper.SetAuthUpdateQueue(chan<- watcher.AuthUpdate)`用于将服务侧创建的队列注入watcher,必须在watcher启动前完成。
|
||||
- 服务通过`ensureAuthUpdateQueue`创建容量为256的缓冲通道,并在`consumeAuthUpdates`中使用专职goroutine消费;消费侧会主动“抽干”积压事件,降低切换开销。
|
||||
|
||||
## Watcher行为
|
||||
|
||||
- `internal/watcher/watcher.go`维护`currentAuths`快照,文件或配置事件触发后会重建快照并与旧快照对比,生成最小化的`AuthUpdate`列表。
|
||||
- 以凭据ID为维度对更新进行合并,同一凭据在短时间内的多次变更只会保留最新状态(例如先写后删只会下发`delete`)。
|
||||
- watcher内部运行异步分发循环:生产者只向内存缓冲追加事件并唤醒分发协程,即使通道暂时写满也不会阻塞文件事件线程。watcher停止时会取消分发循环,确保协程正常退出。
|
||||
|
||||
## 高频变更处理
|
||||
|
||||
- 分发循环与服务消费协程相互独立,因此即便短时间内出现大量变更也不会阻塞watcher事件处理。
|
||||
- 背压通过两级缓冲吸收:
|
||||
- 分发缓冲(map + 顺序切片)会合并同一凭据的重复事件,直到消费者完成处理。
|
||||
- 服务端通道的256容量加上消费侧的“抽干”逻辑,可平稳处理多个突发批次。
|
||||
- 当通道长时间处于高压状态时,缓冲仍持续合并事件,从而在消费者恢复后一次性应用最新状态,避免重复处理无意义的中间状态。
|
||||
|
||||
## 接入步骤
|
||||
|
||||
1. 实例化SDK Service(构建器或手工创建)。
|
||||
2. 在启动watcher之前调用`ensureAuthUpdateQueue`创建共享通道。
|
||||
3. watcher通过工厂函数创建后立刻调用`SetAuthUpdateQueue`注入通道,然后再启动watcher。
|
||||
4. Reload回调专注于配置更新;认证增量会通过队列送达,并由`handleAuthUpdate`自动应用。
|
||||
|
||||
遵循上述流程即可在避免全量重载的同时保持凭据变更的实时性。
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -23,13 +24,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
clipexec "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/logging"
|
||||
sdktr "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
)
|
||||
|
||||
@@ -122,7 +123,9 @@ func (MyExecutor) Execute(ctx context.Context, a *coreauth.Auth, req clipexec.Re
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
// Inject credentials via PrepareRequest hook.
|
||||
_ = (MyExecutor{}).PrepareRequest(httpReq, a)
|
||||
if errPrep := (MyExecutor{}).PrepareRequest(httpReq, a); errPrep != nil {
|
||||
return clipexec.Response{}, errPrep
|
||||
}
|
||||
|
||||
resp, errDo := client.Do(httpReq)
|
||||
if errDo != nil {
|
||||
@@ -130,13 +133,32 @@ func (MyExecutor) Execute(ctx context.Context, a *coreauth.Auth, req clipexec.Re
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
// Best-effort close; log if needed in real projects.
|
||||
fmt.Fprintf(os.Stderr, "close response body error: %v\n", errClose)
|
||||
}
|
||||
}()
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return clipexec.Response{Payload: body}, nil
|
||||
}
|
||||
|
||||
func (MyExecutor) HttpRequest(ctx context.Context, a *coreauth.Auth, req *http.Request) (*http.Response, error) {
|
||||
if req == nil {
|
||||
return nil, fmt.Errorf("myprov executor: request is nil")
|
||||
}
|
||||
if ctx == nil {
|
||||
ctx = req.Context()
|
||||
}
|
||||
httpReq := req.WithContext(ctx)
|
||||
if errPrep := (MyExecutor{}).PrepareRequest(httpReq, a); errPrep != nil {
|
||||
return nil, errPrep
|
||||
}
|
||||
client := buildHTTPClient(a)
|
||||
return client.Do(httpReq)
|
||||
}
|
||||
|
||||
func (MyExecutor) CountTokens(context.Context, *coreauth.Auth, clipexec.Request, clipexec.Options) (clipexec.Response, error) {
|
||||
return clipexec.Response{}, errors.New("count tokens not implemented")
|
||||
}
|
||||
|
||||
func (MyExecutor) ExecuteStream(ctx context.Context, a *coreauth.Auth, req clipexec.Request, opts clipexec.Options) (<-chan clipexec.StreamChunk, error) {
|
||||
ch := make(chan clipexec.StreamChunk, 1)
|
||||
go func() {
|
||||
@@ -160,11 +182,7 @@ func main() {
|
||||
if dirSetter, ok := tokenStore.(interface{ SetBaseDir(string) }); ok {
|
||||
dirSetter.SetBaseDir(cfg.AuthDir)
|
||||
}
|
||||
store, ok := tokenStore.(coreauth.Store)
|
||||
if !ok {
|
||||
panic("token store does not implement coreauth.Store")
|
||||
}
|
||||
core := coreauth.NewManager(store, nil, nil)
|
||||
core := coreauth.NewManager(tokenStore, nil, nil)
|
||||
core.RegisterExecutor(MyExecutor{})
|
||||
|
||||
hooks := cliproxy.Hooks{
|
||||
@@ -187,7 +205,7 @@ func main() {
|
||||
// Optional: add a simple middleware + custom request logger
|
||||
api.WithMiddleware(func(c *gin.Context) { c.Header("X-Example", "custom-provider"); c.Next() }),
|
||||
api.WithRequestLoggerFactory(func(cfg *config.Config, cfgPath string) logging.RequestLogger {
|
||||
return logging.NewFileRequestLogger(true, "logs", filepath.Dir(cfgPath))
|
||||
return logging.NewFileRequestLoggerWithOptions(true, "logs", filepath.Dir(cfgPath), cfg.ErrorLogsMaxFiles)
|
||||
}),
|
||||
).
|
||||
WithHooks(hooks).
|
||||
@@ -199,8 +217,8 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
if err := svc.Run(ctx); err != nil && !errors.Is(err, context.Canceled) {
|
||||
panic(err)
|
||||
if errRun := svc.Run(ctx); errRun != nil && !errors.Is(errRun, context.Canceled) {
|
||||
panic(errRun)
|
||||
}
|
||||
_ = os.Stderr // keep os import used (demo only)
|
||||
_ = time.Second
|
||||
|
||||
140
examples/http-request/main.go
Normal file
140
examples/http-request/main.go
Normal file
@@ -0,0 +1,140 @@
|
||||
// Package main demonstrates how to use coreauth.Manager.HttpRequest/NewHttpRequest
|
||||
// to execute arbitrary HTTP requests with provider credentials injected.
|
||||
//
|
||||
// This example registers a minimal custom executor that injects an Authorization
|
||||
// header from auth.Attributes["api_key"], then performs two requests against
|
||||
// httpbin.org to show the injected headers.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
clipexec "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const providerKey = "echo"
|
||||
|
||||
// EchoExecutor is a minimal provider implementation for demonstration purposes.
|
||||
type EchoExecutor struct{}
|
||||
|
||||
func (EchoExecutor) Identifier() string { return providerKey }
|
||||
|
||||
func (EchoExecutor) PrepareRequest(req *http.Request, auth *coreauth.Auth) error {
|
||||
if req == nil || auth == nil {
|
||||
return nil
|
||||
}
|
||||
if auth.Attributes != nil {
|
||||
if apiKey := strings.TrimSpace(auth.Attributes["api_key"]); apiKey != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (EchoExecutor) HttpRequest(ctx context.Context, auth *coreauth.Auth, req *http.Request) (*http.Response, error) {
|
||||
if req == nil {
|
||||
return nil, fmt.Errorf("echo executor: request is nil")
|
||||
}
|
||||
if ctx == nil {
|
||||
ctx = req.Context()
|
||||
}
|
||||
httpReq := req.WithContext(ctx)
|
||||
if errPrep := (EchoExecutor{}).PrepareRequest(httpReq, auth); errPrep != nil {
|
||||
return nil, errPrep
|
||||
}
|
||||
return http.DefaultClient.Do(httpReq)
|
||||
}
|
||||
|
||||
func (EchoExecutor) Execute(context.Context, *coreauth.Auth, clipexec.Request, clipexec.Options) (clipexec.Response, error) {
|
||||
return clipexec.Response{}, errors.New("echo executor: Execute not implemented")
|
||||
}
|
||||
|
||||
func (EchoExecutor) ExecuteStream(context.Context, *coreauth.Auth, clipexec.Request, clipexec.Options) (<-chan clipexec.StreamChunk, error) {
|
||||
return nil, errors.New("echo executor: ExecuteStream not implemented")
|
||||
}
|
||||
|
||||
func (EchoExecutor) Refresh(context.Context, *coreauth.Auth) (*coreauth.Auth, error) {
|
||||
return nil, errors.New("echo executor: Refresh not implemented")
|
||||
}
|
||||
|
||||
func (EchoExecutor) CountTokens(context.Context, *coreauth.Auth, clipexec.Request, clipexec.Options) (clipexec.Response, error) {
|
||||
return clipexec.Response{}, errors.New("echo executor: CountTokens not implemented")
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetLevel(log.InfoLevel)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
core := coreauth.NewManager(nil, nil, nil)
|
||||
core.RegisterExecutor(EchoExecutor{})
|
||||
|
||||
auth := &coreauth.Auth{
|
||||
ID: "demo-echo",
|
||||
Provider: providerKey,
|
||||
Attributes: map[string]string{
|
||||
"api_key": "demo-api-key",
|
||||
},
|
||||
}
|
||||
|
||||
// Example 1: Build a prepared request and execute it using your own http.Client.
|
||||
reqPrepared, errReqPrepared := core.NewHttpRequest(
|
||||
ctx,
|
||||
auth,
|
||||
http.MethodGet,
|
||||
"https://httpbin.org/anything",
|
||||
nil,
|
||||
http.Header{"X-Example": []string{"prepared"}},
|
||||
)
|
||||
if errReqPrepared != nil {
|
||||
panic(errReqPrepared)
|
||||
}
|
||||
respPrepared, errDoPrepared := http.DefaultClient.Do(reqPrepared)
|
||||
if errDoPrepared != nil {
|
||||
panic(errDoPrepared)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := respPrepared.Body.Close(); errClose != nil {
|
||||
log.Errorf("close response body error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
bodyPrepared, errReadPrepared := io.ReadAll(respPrepared.Body)
|
||||
if errReadPrepared != nil {
|
||||
panic(errReadPrepared)
|
||||
}
|
||||
fmt.Printf("Prepared request status: %d\n%s\n\n", respPrepared.StatusCode, bodyPrepared)
|
||||
|
||||
// Example 2: Execute a raw request via core.HttpRequest (auto inject + do).
|
||||
rawBody := []byte(`{"hello":"world"}`)
|
||||
rawReq, errRawReq := http.NewRequestWithContext(ctx, http.MethodPost, "https://httpbin.org/anything", bytes.NewReader(rawBody))
|
||||
if errRawReq != nil {
|
||||
panic(errRawReq)
|
||||
}
|
||||
rawReq.Header.Set("Content-Type", "application/json")
|
||||
rawReq.Header.Set("X-Example", "executed")
|
||||
|
||||
respExec, errDoExec := core.HttpRequest(ctx, auth, rawReq)
|
||||
if errDoExec != nil {
|
||||
panic(errDoExec)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := respExec.Body.Close(); errClose != nil {
|
||||
log.Errorf("close response body error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
bodyExec, errReadExec := io.ReadAll(respExec.Body)
|
||||
if errReadExec != nil {
|
||||
panic(errReadExec)
|
||||
}
|
||||
fmt.Printf("Manager HttpRequest status: %d\n%s\n", respExec.StatusCode, bodyExec)
|
||||
}
|
||||
42
examples/translator/main.go
Normal file
42
examples/translator/main.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
_ "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator/builtin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
rawRequest := []byte(`{"messages":[{"content":[{"text":"Hello! Gemini","type":"text"}],"role":"user"}],"model":"gemini-2.5-pro","stream":false}`)
|
||||
fmt.Println("Has gemini->openai response translator:", translator.HasResponseTransformerByFormatName(
|
||||
translator.FormatGemini,
|
||||
translator.FormatOpenAI,
|
||||
))
|
||||
|
||||
translatedRequest := translator.TranslateRequestByFormatName(
|
||||
translator.FormatOpenAI,
|
||||
translator.FormatGemini,
|
||||
"gemini-2.5-pro",
|
||||
rawRequest,
|
||||
false,
|
||||
)
|
||||
|
||||
fmt.Printf("Translated request to Gemini format:\n%s\n\n", translatedRequest)
|
||||
|
||||
claudeResponse := []byte(`{"candidates":[{"content":{"role":"model","parts":[{"thought":true,"text":"Okay, here's what's going through my mind. I need to schedule a meeting"},{"thoughtSignature":"","functionCall":{"name":"schedule_meeting","args":{"topic":"Q3 planning","attendees":["Bob","Alice"],"time":"10:00","date":"2025-03-27"}}}]},"finishReason":"STOP","avgLogprobs":-0.50018133435930523}],"usageMetadata":{"promptTokenCount":117,"candidatesTokenCount":28,"totalTokenCount":474,"trafficType":"PROVISIONED_THROUGHPUT","promptTokensDetails":[{"modality":"TEXT","tokenCount":117}],"candidatesTokensDetails":[{"modality":"TEXT","tokenCount":28}],"thoughtsTokenCount":329},"modelVersion":"gemini-2.5-pro","createTime":"2025-08-15T04:12:55.249090Z","responseId":"x7OeaIKaD6CU48APvNXDyA4"}`)
|
||||
|
||||
convertedResponse := translator.TranslateNonStreamByFormatName(
|
||||
context.Background(),
|
||||
translator.FormatGemini,
|
||||
translator.FormatOpenAI,
|
||||
"gemini-2.5-pro",
|
||||
rawRequest,
|
||||
translatedRequest,
|
||||
claudeResponse,
|
||||
nil,
|
||||
)
|
||||
|
||||
fmt.Printf("Converted response for OpenAI clients:\n%s\n", convertedResponse)
|
||||
}
|
||||
46
go.mod
46
go.mod
@@ -1,49 +1,77 @@
|
||||
module github.com/router-for-me/CLIProxyAPI/v6
|
||||
|
||||
go 1.24
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.0.6
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/go-git/go-git/v6 v6.0.0-20251009132922-75a182125145
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/jackc/pgx/v5 v5.7.6
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/klauspost/compress v1.17.4
|
||||
github.com/minio/minio-go/v7 v7.0.66
|
||||
github.com/refraction-networking/utls v1.8.2
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/tidwall/gjson v1.18.0
|
||||
github.com/tidwall/sjson v1.2.5
|
||||
go.etcd.io/bbolt v1.3.8
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/net v0.37.1-0.20250305215238-2914f4677317
|
||||
github.com/tiktoken-go/tokenizer v0.7.0
|
||||
golang.org/x/crypto v0.45.0
|
||||
golang.org/x/net v0.47.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/bytedance/sonic v1.11.6 // indirect
|
||||
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/dlclark/regexp2 v1.11.5 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-git/gcfg/v2 v2.0.2 // indirect
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20250627091229-31e2a16eef30 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.3 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/kevinburke/ssh_config v1.4.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pjbgf/sha1cd v0.5.0 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/sergi/go-diff v1.4.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
golang.org/x/arch v0.8.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
)
|
||||
|
||||
116
go.sum
116
go.sum
@@ -1,16 +1,38 @@
|
||||
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
|
||||
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
|
||||
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
|
||||
github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
|
||||
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
|
||||
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||
@@ -19,6 +41,16 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ=
|
||||
github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
||||
github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
|
||||
github.com/go-git/gcfg/v2 v2.0.2 h1:MY5SIIfTGGEMhdA7d7JePuVVxtKL7Hp+ApGDJAJ7dpo=
|
||||
github.com/go-git/gcfg/v2 v2.0.2/go.mod h1:/lv2NsxvhepuMrldsFilrgct6pxzpGdSRC13ydTLSLs=
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20250627091229-31e2a16eef30 h1:4KqVJTL5eanN8Sgg3BV6f2/QzfZEFbCd+rTak1fGRRA=
|
||||
github.com/go-git/go-billy/v6 v6.0.0-20250627091229-31e2a16eef30/go.mod h1:snwvGrbywVFy2d6KJdQ132zapq4aLyzLMgpo79XdEfM=
|
||||
github.com/go-git/go-git-fixtures/v5 v5.1.1 h1:OH8i1ojV9bWfr0ZfasfpgtUXQHQyVS8HXik/V1C099w=
|
||||
github.com/go-git/go-git-fixtures/v5 v5.1.1/go.mod h1:Altk43lx3b1ks+dVoAG2300o5WWUnktvfY3VI6bcaXU=
|
||||
github.com/go-git/go-git/v6 v6.0.0-20251009132922-75a182125145 h1:C/oVxHd6KkkuvthQ/StZfHzZK07gl6xjfCfT3derko0=
|
||||
github.com/go-git/go-git/v6 v6.0.0-20251009132922-75a182125145/go.mod h1:gR+xpbL+o1wuJJDwRN4pOkpNwDS0D24Eo4AD5Aau2DY=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
@@ -29,23 +61,52 @@ github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBEx
|
||||
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
|
||||
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
|
||||
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ=
|
||||
github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw=
|
||||
github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -53,8 +114,18 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pjbgf/sha1cd v0.5.0 h1:a+UkboSi1znleCDUNT3M5YxjOnN1fz2FhN48FlwCxs0=
|
||||
github.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/refraction-networking/utls v1.8.2 h1:j4Q1gJj0xngdeH+Ox/qND11aEfhpgoEvV+S9iJ2IdQo=
|
||||
github.com/refraction-networking/utls v1.8.2/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
|
||||
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
@@ -64,13 +135,15 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
@@ -80,36 +153,45 @@ github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/tiktoken-go/tokenizer v0.7.0 h1:VMu6MPT0bXFDHr7UPh9uii7CNItVt3X9K90omxL54vw=
|
||||
github.com/tiktoken-go/tokenizer v0.7.0/go.mod h1:6UCYI/DtOallbmL7sSy30p6YQv60qNyU/4aVigPOx6w=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
|
||||
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/net v0.37.1-0.20250305215238-2914f4677317 h1:wneCP+2d9NUmndnyTmY7VwUNYiP26xiN/AtdcojQ1lI=
|
||||
golang.org/x/net v0.37.1-0.20250305215238-2914f4677317/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -1,27 +1,33 @@
|
||||
package configapikey
|
||||
package configaccess
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
sdkconfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
)
|
||||
|
||||
var registerOnce sync.Once
|
||||
|
||||
// Register ensures the config-access provider is available to the access manager.
|
||||
func Register() {
|
||||
registerOnce.Do(func() {
|
||||
sdkaccess.RegisterProvider(sdkconfig.AccessProviderTypeConfigAPIKey, newProvider)
|
||||
})
|
||||
}
|
||||
|
||||
type provider struct {
|
||||
name string
|
||||
keys map[string]struct{}
|
||||
}
|
||||
|
||||
func init() {
|
||||
sdkaccess.RegisterProvider(config.AccessProviderTypeConfigAPIKey, newProvider)
|
||||
}
|
||||
|
||||
func newProvider(cfg *config.AccessProvider, _ *config.Config) (sdkaccess.Provider, error) {
|
||||
func newProvider(cfg *sdkconfig.AccessProvider, _ *sdkconfig.SDKConfig) (sdkaccess.Provider, error) {
|
||||
name := cfg.Name
|
||||
if name == "" {
|
||||
name = config.DefaultAccessProviderName
|
||||
name = sdkconfig.DefaultAccessProviderName
|
||||
}
|
||||
keys := make(map[string]struct{}, len(cfg.APIKeys))
|
||||
for _, key := range cfg.APIKeys {
|
||||
@@ -35,7 +41,7 @@ func newProvider(cfg *config.AccessProvider, _ *config.Config) (sdkaccess.Provid
|
||||
|
||||
func (p *provider) Identifier() string {
|
||||
if p == nil || p.name == "" {
|
||||
return config.DefaultAccessProviderName
|
||||
return sdkconfig.DefaultAccessProviderName
|
||||
}
|
||||
return p.name
|
||||
}
|
||||
@@ -51,10 +57,12 @@ func (p *provider) Authenticate(_ context.Context, r *http.Request) (*sdkaccess.
|
||||
authHeaderGoogle := r.Header.Get("X-Goog-Api-Key")
|
||||
authHeaderAnthropic := r.Header.Get("X-Api-Key")
|
||||
queryKey := ""
|
||||
queryAuthToken := ""
|
||||
if r.URL != nil {
|
||||
queryKey = r.URL.Query().Get("key")
|
||||
queryAuthToken = r.URL.Query().Get("auth_token")
|
||||
}
|
||||
if authHeader == "" && authHeaderGoogle == "" && authHeaderAnthropic == "" && queryKey == "" {
|
||||
if authHeader == "" && authHeaderGoogle == "" && authHeaderAnthropic == "" && queryKey == "" && queryAuthToken == "" {
|
||||
return nil, sdkaccess.ErrNoCredentials
|
||||
}
|
||||
|
||||
@@ -68,6 +76,7 @@ func (p *provider) Authenticate(_ context.Context, r *http.Request) (*sdkaccess.
|
||||
{authHeaderGoogle, "x-goog-api-key"},
|
||||
{authHeaderAnthropic, "x-api-key"},
|
||||
{queryKey, "query-key"},
|
||||
{queryAuthToken, "query-auth-token"},
|
||||
}
|
||||
|
||||
for _, candidate := range candidates {
|
||||
270
internal/access/reconcile.go
Normal file
270
internal/access/reconcile.go
Normal file
@@ -0,0 +1,270 @@
|
||||
package access
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
sdkConfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ReconcileProviders builds the desired provider list by reusing existing providers when possible
|
||||
// and creating or removing providers only when their configuration changed. It returns the final
|
||||
// ordered provider slice along with the identifiers of providers that were added, updated, or
|
||||
// removed compared to the previous configuration.
|
||||
func ReconcileProviders(oldCfg, newCfg *config.Config, existing []sdkaccess.Provider) (result []sdkaccess.Provider, added, updated, removed []string, err error) {
|
||||
if newCfg == nil {
|
||||
return nil, nil, nil, nil, nil
|
||||
}
|
||||
|
||||
existingMap := make(map[string]sdkaccess.Provider, len(existing))
|
||||
for _, provider := range existing {
|
||||
if provider == nil {
|
||||
continue
|
||||
}
|
||||
existingMap[provider.Identifier()] = provider
|
||||
}
|
||||
|
||||
oldCfgMap := accessProviderMap(oldCfg)
|
||||
newEntries := collectProviderEntries(newCfg)
|
||||
|
||||
result = make([]sdkaccess.Provider, 0, len(newEntries))
|
||||
finalIDs := make(map[string]struct{}, len(newEntries))
|
||||
|
||||
isInlineProvider := func(id string) bool {
|
||||
return strings.EqualFold(id, sdkConfig.DefaultAccessProviderName)
|
||||
}
|
||||
appendChange := func(list *[]string, id string) {
|
||||
if isInlineProvider(id) {
|
||||
return
|
||||
}
|
||||
*list = append(*list, id)
|
||||
}
|
||||
|
||||
for _, providerCfg := range newEntries {
|
||||
key := providerIdentifier(providerCfg)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
forceRebuild := strings.EqualFold(strings.TrimSpace(providerCfg.Type), sdkConfig.AccessProviderTypeConfigAPIKey)
|
||||
if oldCfgProvider, ok := oldCfgMap[key]; ok {
|
||||
isAliased := oldCfgProvider == providerCfg
|
||||
if !forceRebuild && !isAliased && providerConfigEqual(oldCfgProvider, providerCfg) {
|
||||
if existingProvider, okExisting := existingMap[key]; okExisting {
|
||||
result = append(result, existingProvider)
|
||||
finalIDs[key] = struct{}{}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider, buildErr := sdkaccess.BuildProvider(providerCfg, &newCfg.SDKConfig)
|
||||
if buildErr != nil {
|
||||
return nil, nil, nil, nil, buildErr
|
||||
}
|
||||
if _, ok := oldCfgMap[key]; ok {
|
||||
if _, existed := existingMap[key]; existed {
|
||||
appendChange(&updated, key)
|
||||
} else {
|
||||
appendChange(&added, key)
|
||||
}
|
||||
} else {
|
||||
appendChange(&added, key)
|
||||
}
|
||||
result = append(result, provider)
|
||||
finalIDs[key] = struct{}{}
|
||||
}
|
||||
|
||||
if len(result) == 0 {
|
||||
if inline := sdkConfig.MakeInlineAPIKeyProvider(newCfg.APIKeys); inline != nil {
|
||||
key := providerIdentifier(inline)
|
||||
if key != "" {
|
||||
if oldCfgProvider, ok := oldCfgMap[key]; ok {
|
||||
if providerConfigEqual(oldCfgProvider, inline) {
|
||||
if existingProvider, okExisting := existingMap[key]; okExisting {
|
||||
result = append(result, existingProvider)
|
||||
finalIDs[key] = struct{}{}
|
||||
goto inlineDone
|
||||
}
|
||||
}
|
||||
}
|
||||
provider, buildErr := sdkaccess.BuildProvider(inline, &newCfg.SDKConfig)
|
||||
if buildErr != nil {
|
||||
return nil, nil, nil, nil, buildErr
|
||||
}
|
||||
if _, existed := existingMap[key]; existed {
|
||||
appendChange(&updated, key)
|
||||
} else if _, hadOld := oldCfgMap[key]; hadOld {
|
||||
appendChange(&updated, key)
|
||||
} else {
|
||||
appendChange(&added, key)
|
||||
}
|
||||
result = append(result, provider)
|
||||
finalIDs[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
inlineDone:
|
||||
}
|
||||
|
||||
removedSet := make(map[string]struct{})
|
||||
for id := range existingMap {
|
||||
if _, ok := finalIDs[id]; !ok {
|
||||
if isInlineProvider(id) {
|
||||
continue
|
||||
}
|
||||
removedSet[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
removed = make([]string, 0, len(removedSet))
|
||||
for id := range removedSet {
|
||||
removed = append(removed, id)
|
||||
}
|
||||
|
||||
sort.Strings(added)
|
||||
sort.Strings(updated)
|
||||
sort.Strings(removed)
|
||||
|
||||
return result, added, updated, removed, nil
|
||||
}
|
||||
|
||||
// ApplyAccessProviders reconciles the configured access providers against the
|
||||
// currently registered providers and updates the manager. It logs a concise
|
||||
// summary of the detected changes and returns whether any provider changed.
|
||||
func ApplyAccessProviders(manager *sdkaccess.Manager, oldCfg, newCfg *config.Config) (bool, error) {
|
||||
if manager == nil || newCfg == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
existing := manager.Providers()
|
||||
providers, added, updated, removed, err := ReconcileProviders(oldCfg, newCfg, existing)
|
||||
if err != nil {
|
||||
log.Errorf("failed to reconcile request auth providers: %v", err)
|
||||
return false, fmt.Errorf("reconciling access providers: %w", err)
|
||||
}
|
||||
|
||||
manager.SetProviders(providers)
|
||||
|
||||
if len(added)+len(updated)+len(removed) > 0 {
|
||||
log.Debugf("auth providers reconciled (added=%d updated=%d removed=%d)", len(added), len(updated), len(removed))
|
||||
log.Debugf("auth providers changes details - added=%v updated=%v removed=%v", added, updated, removed)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Debug("auth providers unchanged after config update")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func accessProviderMap(cfg *config.Config) map[string]*sdkConfig.AccessProvider {
|
||||
result := make(map[string]*sdkConfig.AccessProvider)
|
||||
if cfg == nil {
|
||||
return result
|
||||
}
|
||||
for i := range cfg.Access.Providers {
|
||||
providerCfg := &cfg.Access.Providers[i]
|
||||
if providerCfg.Type == "" {
|
||||
continue
|
||||
}
|
||||
key := providerIdentifier(providerCfg)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
result[key] = providerCfg
|
||||
}
|
||||
if len(result) == 0 && len(cfg.APIKeys) > 0 {
|
||||
if provider := sdkConfig.MakeInlineAPIKeyProvider(cfg.APIKeys); provider != nil {
|
||||
if key := providerIdentifier(provider); key != "" {
|
||||
result[key] = provider
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func collectProviderEntries(cfg *config.Config) []*sdkConfig.AccessProvider {
|
||||
entries := make([]*sdkConfig.AccessProvider, 0, len(cfg.Access.Providers))
|
||||
for i := range cfg.Access.Providers {
|
||||
providerCfg := &cfg.Access.Providers[i]
|
||||
if providerCfg.Type == "" {
|
||||
continue
|
||||
}
|
||||
if key := providerIdentifier(providerCfg); key != "" {
|
||||
entries = append(entries, providerCfg)
|
||||
}
|
||||
}
|
||||
if len(entries) == 0 && len(cfg.APIKeys) > 0 {
|
||||
if inline := sdkConfig.MakeInlineAPIKeyProvider(cfg.APIKeys); inline != nil {
|
||||
entries = append(entries, inline)
|
||||
}
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
func providerIdentifier(provider *sdkConfig.AccessProvider) string {
|
||||
if provider == nil {
|
||||
return ""
|
||||
}
|
||||
if name := strings.TrimSpace(provider.Name); name != "" {
|
||||
return name
|
||||
}
|
||||
typ := strings.TrimSpace(provider.Type)
|
||||
if typ == "" {
|
||||
return ""
|
||||
}
|
||||
if strings.EqualFold(typ, sdkConfig.AccessProviderTypeConfigAPIKey) {
|
||||
return sdkConfig.DefaultAccessProviderName
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
func providerConfigEqual(a, b *sdkConfig.AccessProvider) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == nil && b == nil
|
||||
}
|
||||
if !strings.EqualFold(strings.TrimSpace(a.Type), strings.TrimSpace(b.Type)) {
|
||||
return false
|
||||
}
|
||||
if strings.TrimSpace(a.SDK) != strings.TrimSpace(b.SDK) {
|
||||
return false
|
||||
}
|
||||
if !stringSetEqual(a.APIKeys, b.APIKeys) {
|
||||
return false
|
||||
}
|
||||
if len(a.Config) != len(b.Config) {
|
||||
return false
|
||||
}
|
||||
if len(a.Config) > 0 && !reflect.DeepEqual(a.Config, b.Config) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func stringSetEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
if len(a) == 0 {
|
||||
return true
|
||||
}
|
||||
seen := make(map[string]int, len(a))
|
||||
for _, val := range a {
|
||||
seen[val]++
|
||||
}
|
||||
for _, val := range b {
|
||||
count := seen[val]
|
||||
if count == 0 {
|
||||
return false
|
||||
}
|
||||
if count == 1 {
|
||||
delete(seen, val)
|
||||
} else {
|
||||
seen[val] = count - 1
|
||||
}
|
||||
}
|
||||
return len(seen) == 0
|
||||
}
|
||||
@@ -1,267 +0,0 @@
|
||||
// Package handlers provides core API handler functionality for the CLI Proxy API server.
|
||||
// It includes common types, client management, load balancing, and error handling
|
||||
// shared across all API endpoint handlers (OpenAI, Claude, Gemini).
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
coreexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// ErrorResponse represents a standard error response format for the API.
|
||||
// It contains a single ErrorDetail field.
|
||||
type ErrorResponse struct {
|
||||
// Error contains detailed information about the error that occurred.
|
||||
Error ErrorDetail `json:"error"`
|
||||
}
|
||||
|
||||
// ErrorDetail provides specific information about an error that occurred.
|
||||
// It includes a human-readable message, an error type, and an optional error code.
|
||||
type ErrorDetail struct {
|
||||
// Message is a human-readable message providing more details about the error.
|
||||
Message string `json:"message"`
|
||||
|
||||
// Type is the category of error that occurred (e.g., "invalid_request_error").
|
||||
Type string `json:"type"`
|
||||
|
||||
// Code is a short code identifying the error, if applicable.
|
||||
Code string `json:"code,omitempty"`
|
||||
}
|
||||
|
||||
// BaseAPIHandler contains the handlers for API endpoints.
|
||||
// It holds a pool of clients to interact with the backend service and manages
|
||||
// load balancing, client selection, and configuration.
|
||||
type BaseAPIHandler struct {
|
||||
// AuthManager manages auth lifecycle and execution in the new architecture.
|
||||
AuthManager *coreauth.Manager
|
||||
|
||||
// Cfg holds the current application configuration.
|
||||
Cfg *config.Config
|
||||
}
|
||||
|
||||
// NewBaseAPIHandlers creates a new API handlers instance.
|
||||
// It takes a slice of clients and configuration as input.
|
||||
//
|
||||
// Parameters:
|
||||
// - cliClients: A slice of AI service clients
|
||||
// - cfg: The application configuration
|
||||
//
|
||||
// Returns:
|
||||
// - *BaseAPIHandler: A new API handlers instance
|
||||
func NewBaseAPIHandlers(cfg *config.Config, authManager *coreauth.Manager) *BaseAPIHandler {
|
||||
return &BaseAPIHandler{
|
||||
Cfg: cfg,
|
||||
AuthManager: authManager,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateClients updates the handlers' client list and configuration.
|
||||
// This method is called when the configuration or authentication tokens change.
|
||||
//
|
||||
// Parameters:
|
||||
// - clients: The new slice of AI service clients
|
||||
// - cfg: The new application configuration
|
||||
func (h *BaseAPIHandler) UpdateClients(cfg *config.Config) { h.Cfg = cfg }
|
||||
|
||||
// GetAlt extracts the 'alt' parameter from the request query string.
|
||||
// It checks both 'alt' and '$alt' parameters and returns the appropriate value.
|
||||
//
|
||||
// Parameters:
|
||||
// - c: The Gin context containing the HTTP request
|
||||
//
|
||||
// Returns:
|
||||
// - string: The alt parameter value, or empty string if it's "sse"
|
||||
func (h *BaseAPIHandler) GetAlt(c *gin.Context) string {
|
||||
var alt string
|
||||
var hasAlt bool
|
||||
alt, hasAlt = c.GetQuery("alt")
|
||||
if !hasAlt {
|
||||
alt, _ = c.GetQuery("$alt")
|
||||
}
|
||||
if alt == "sse" {
|
||||
return ""
|
||||
}
|
||||
return alt
|
||||
}
|
||||
|
||||
// GetContextWithCancel creates a new context with cancellation capabilities.
|
||||
// It embeds the Gin context and the API handler into the new context for later use.
|
||||
// The returned cancel function also handles logging the API response if request logging is enabled.
|
||||
//
|
||||
// Parameters:
|
||||
// - handler: The API handler associated with the request.
|
||||
// - c: The Gin context of the current request.
|
||||
// - ctx: The parent context.
|
||||
//
|
||||
// Returns:
|
||||
// - context.Context: The new context with cancellation and embedded values.
|
||||
// - APIHandlerCancelFunc: A function to cancel the context and log the response.
|
||||
func (h *BaseAPIHandler) GetContextWithCancel(handler interfaces.APIHandler, c *gin.Context, ctx context.Context) (context.Context, APIHandlerCancelFunc) {
|
||||
newCtx, cancel := context.WithCancel(ctx)
|
||||
newCtx = context.WithValue(newCtx, "gin", c)
|
||||
newCtx = context.WithValue(newCtx, "handler", handler)
|
||||
return newCtx, func(params ...interface{}) {
|
||||
if h.Cfg.RequestLog {
|
||||
if len(params) == 1 {
|
||||
data := params[0]
|
||||
switch data.(type) {
|
||||
case []byte:
|
||||
c.Set("API_RESPONSE", data.([]byte))
|
||||
case error:
|
||||
c.Set("API_RESPONSE", []byte(data.(error).Error()))
|
||||
case string:
|
||||
c.Set("API_RESPONSE", []byte(data.(string)))
|
||||
case bool:
|
||||
case nil:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteWithAuthManager executes a non-streaming request via the core auth manager.
|
||||
// This path is the only supported execution route.
|
||||
func (h *BaseAPIHandler) ExecuteWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
||||
providers := util.GetProviderName(modelName, h.Cfg)
|
||||
if len(providers) == 0 {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||
}
|
||||
req := coreexecutor.Request{
|
||||
Model: modelName,
|
||||
Payload: cloneBytes(rawJSON),
|
||||
}
|
||||
opts := coreexecutor.Options{
|
||||
Stream: false,
|
||||
Alt: alt,
|
||||
OriginalRequest: cloneBytes(rawJSON),
|
||||
SourceFormat: sdktranslator.FromString(handlerType),
|
||||
}
|
||||
resp, err := h.AuthManager.Execute(ctx, providers, req, opts)
|
||||
if err != nil {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: err}
|
||||
}
|
||||
return cloneBytes(resp.Payload), nil
|
||||
}
|
||||
|
||||
// ExecuteCountWithAuthManager executes a non-streaming request via the core auth manager.
|
||||
// This path is the only supported execution route.
|
||||
func (h *BaseAPIHandler) ExecuteCountWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) ([]byte, *interfaces.ErrorMessage) {
|
||||
providers := util.GetProviderName(modelName, h.Cfg)
|
||||
if len(providers) == 0 {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||
}
|
||||
req := coreexecutor.Request{
|
||||
Model: modelName,
|
||||
Payload: cloneBytes(rawJSON),
|
||||
}
|
||||
opts := coreexecutor.Options{
|
||||
Stream: false,
|
||||
Alt: alt,
|
||||
OriginalRequest: cloneBytes(rawJSON),
|
||||
SourceFormat: sdktranslator.FromString(handlerType),
|
||||
}
|
||||
resp, err := h.AuthManager.ExecuteCount(ctx, providers, req, opts)
|
||||
if err != nil {
|
||||
return nil, &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: err}
|
||||
}
|
||||
return cloneBytes(resp.Payload), nil
|
||||
}
|
||||
|
||||
// ExecuteStreamWithAuthManager executes a streaming request via the core auth manager.
|
||||
// This path is the only supported execution route.
|
||||
func (h *BaseAPIHandler) ExecuteStreamWithAuthManager(ctx context.Context, handlerType, modelName string, rawJSON []byte, alt string) (<-chan []byte, <-chan *interfaces.ErrorMessage) {
|
||||
providers := util.GetProviderName(modelName, h.Cfg)
|
||||
if len(providers) == 0 {
|
||||
errChan := make(chan *interfaces.ErrorMessage, 1)
|
||||
errChan <- &interfaces.ErrorMessage{StatusCode: http.StatusBadRequest, Error: fmt.Errorf("unknown provider for model %s", modelName)}
|
||||
close(errChan)
|
||||
return nil, errChan
|
||||
}
|
||||
req := coreexecutor.Request{
|
||||
Model: modelName,
|
||||
Payload: cloneBytes(rawJSON),
|
||||
}
|
||||
opts := coreexecutor.Options{
|
||||
Stream: true,
|
||||
Alt: alt,
|
||||
OriginalRequest: cloneBytes(rawJSON),
|
||||
SourceFormat: sdktranslator.FromString(handlerType),
|
||||
}
|
||||
chunks, err := h.AuthManager.ExecuteStream(ctx, providers, req, opts)
|
||||
if err != nil {
|
||||
errChan := make(chan *interfaces.ErrorMessage, 1)
|
||||
errChan <- &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: err}
|
||||
close(errChan)
|
||||
return nil, errChan
|
||||
}
|
||||
dataChan := make(chan []byte)
|
||||
errChan := make(chan *interfaces.ErrorMessage, 1)
|
||||
go func() {
|
||||
defer close(dataChan)
|
||||
defer close(errChan)
|
||||
for chunk := range chunks {
|
||||
if chunk.Err != nil {
|
||||
errChan <- &interfaces.ErrorMessage{StatusCode: http.StatusInternalServerError, Error: chunk.Err}
|
||||
return
|
||||
}
|
||||
if len(chunk.Payload) > 0 {
|
||||
dataChan <- cloneBytes(chunk.Payload)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return dataChan, errChan
|
||||
}
|
||||
|
||||
func cloneBytes(src []byte) []byte {
|
||||
if len(src) == 0 {
|
||||
return nil
|
||||
}
|
||||
dst := make([]byte, len(src))
|
||||
copy(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// WriteErrorResponse writes an error message to the response writer using the HTTP status embedded in the message.
|
||||
func (h *BaseAPIHandler) WriteErrorResponse(c *gin.Context, msg *interfaces.ErrorMessage) {
|
||||
status := http.StatusInternalServerError
|
||||
if msg != nil && msg.StatusCode > 0 {
|
||||
status = msg.StatusCode
|
||||
}
|
||||
c.Status(status)
|
||||
if msg != nil && msg.Error != nil {
|
||||
_, _ = c.Writer.Write([]byte(msg.Error.Error()))
|
||||
} else {
|
||||
_, _ = c.Writer.Write([]byte(http.StatusText(status)))
|
||||
}
|
||||
}
|
||||
|
||||
func (h *BaseAPIHandler) LoggingAPIResponseError(ctx context.Context, err *interfaces.ErrorMessage) {
|
||||
if h.Cfg.RequestLog {
|
||||
if ginContext, ok := ctx.Value("gin").(*gin.Context); ok {
|
||||
if apiResponseErrors, isExist := ginContext.Get("API_RESPONSE_ERROR"); isExist {
|
||||
if slicesAPIResponseError, isOk := apiResponseErrors.([]*interfaces.ErrorMessage); isOk {
|
||||
slicesAPIResponseError = append(slicesAPIResponseError, err)
|
||||
ginContext.Set("API_RESPONSE_ERROR", slicesAPIResponseError)
|
||||
}
|
||||
} else {
|
||||
// Create new response data entry
|
||||
ginContext.Set("API_RESPONSE_ERROR", []*interfaces.ErrorMessage{err})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// APIHandlerCancelFunc is a function type for canceling an API handler's context.
|
||||
// It can optionally accept parameters, which are used for logging the response.
|
||||
type APIHandlerCancelFunc func(params ...interface{})
|
||||
704
internal/api/handlers/management/api_tools.go
Normal file
704
internal/api/handlers/management/api_tools.go
Normal file
@@ -0,0 +1,704 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/runtime/geminicli"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/proxy"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
const defaultAPICallTimeout = 60 * time.Second
|
||||
|
||||
const (
|
||||
geminiOAuthClientID = "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com"
|
||||
geminiOAuthClientSecret = "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"
|
||||
)
|
||||
|
||||
var geminiOAuthScopes = []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/userinfo.email",
|
||||
"https://www.googleapis.com/auth/userinfo.profile",
|
||||
}
|
||||
|
||||
const (
|
||||
antigravityOAuthClientID = "1071006060591-tmhssin2h21lcre235vtolojh4g403ep.apps.googleusercontent.com"
|
||||
antigravityOAuthClientSecret = "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"
|
||||
)
|
||||
|
||||
var antigravityOAuthTokenURL = "https://oauth2.googleapis.com/token"
|
||||
|
||||
type apiCallRequest struct {
|
||||
AuthIndexSnake *string `json:"auth_index"`
|
||||
AuthIndexCamel *string `json:"authIndex"`
|
||||
AuthIndexPascal *string `json:"AuthIndex"`
|
||||
Method string `json:"method"`
|
||||
URL string `json:"url"`
|
||||
Header map[string]string `json:"header"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
type apiCallResponse struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
Header map[string][]string `json:"header"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
|
||||
// APICall makes a generic HTTP request on behalf of the management API caller.
|
||||
// It is protected by the management middleware.
|
||||
//
|
||||
// Endpoint:
|
||||
//
|
||||
// POST /v0/management/api-call
|
||||
//
|
||||
// Authentication:
|
||||
//
|
||||
// Same as other management APIs (requires a management key and remote-management rules).
|
||||
// You can provide the key via:
|
||||
// - Authorization: Bearer <key>
|
||||
// - X-Management-Key: <key>
|
||||
//
|
||||
// Request JSON:
|
||||
// - auth_index / authIndex / AuthIndex (optional):
|
||||
// The credential "auth_index" from GET /v0/management/auth-files (or other endpoints returning it).
|
||||
// If omitted or not found, credential-specific proxy/token substitution is skipped.
|
||||
// - method (required): HTTP method, e.g. GET, POST, PUT, PATCH, DELETE.
|
||||
// - url (required): Absolute URL including scheme and host, e.g. "https://api.example.com/v1/ping".
|
||||
// - header (optional): Request headers map.
|
||||
// Supports magic variable "$TOKEN$" which is replaced using the selected credential:
|
||||
// 1) metadata.access_token
|
||||
// 2) attributes.api_key
|
||||
// 3) metadata.token / metadata.id_token / metadata.cookie
|
||||
// Example: {"Authorization":"Bearer $TOKEN$"}.
|
||||
// Note: if you need to override the HTTP Host header, set header["Host"].
|
||||
// - data (optional): Raw request body as string (useful for POST/PUT/PATCH).
|
||||
//
|
||||
// Proxy selection (highest priority first):
|
||||
// 1. Selected credential proxy_url
|
||||
// 2. Global config proxy-url
|
||||
// 3. Direct connect (environment proxies are not used)
|
||||
//
|
||||
// Response JSON (returned with HTTP 200 when the APICall itself succeeds):
|
||||
// - status_code: Upstream HTTP status code.
|
||||
// - header: Upstream response headers.
|
||||
// - body: Upstream response body as string.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// curl -sS -X POST "http://127.0.0.1:8317/v0/management/api-call" \
|
||||
// -H "Authorization: Bearer <MANAGEMENT_KEY>" \
|
||||
// -H "Content-Type: application/json" \
|
||||
// -d '{"auth_index":"<AUTH_INDEX>","method":"GET","url":"https://api.example.com/v1/ping","header":{"Authorization":"Bearer $TOKEN$"}}'
|
||||
//
|
||||
// curl -sS -X POST "http://127.0.0.1:8317/v0/management/api-call" \
|
||||
// -H "Authorization: Bearer 831227" \
|
||||
// -H "Content-Type: application/json" \
|
||||
// -d '{"auth_index":"<AUTH_INDEX>","method":"POST","url":"https://api.example.com/v1/fetchAvailableModels","header":{"Authorization":"Bearer $TOKEN$","Content-Type":"application/json","User-Agent":"cliproxyapi"},"data":"{}"}'
|
||||
func (h *Handler) APICall(c *gin.Context) {
|
||||
var body apiCallRequest
|
||||
if errBindJSON := c.ShouldBindJSON(&body); errBindJSON != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid body"})
|
||||
return
|
||||
}
|
||||
|
||||
method := strings.ToUpper(strings.TrimSpace(body.Method))
|
||||
if method == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "missing method"})
|
||||
return
|
||||
}
|
||||
|
||||
urlStr := strings.TrimSpace(body.URL)
|
||||
if urlStr == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "missing url"})
|
||||
return
|
||||
}
|
||||
parsedURL, errParseURL := url.Parse(urlStr)
|
||||
if errParseURL != nil || parsedURL.Scheme == "" || parsedURL.Host == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid url"})
|
||||
return
|
||||
}
|
||||
|
||||
authIndex := firstNonEmptyString(body.AuthIndexSnake, body.AuthIndexCamel, body.AuthIndexPascal)
|
||||
auth := h.authByIndex(authIndex)
|
||||
|
||||
reqHeaders := body.Header
|
||||
if reqHeaders == nil {
|
||||
reqHeaders = map[string]string{}
|
||||
}
|
||||
|
||||
var hostOverride string
|
||||
var token string
|
||||
var tokenResolved bool
|
||||
var tokenErr error
|
||||
for key, value := range reqHeaders {
|
||||
if !strings.Contains(value, "$TOKEN$") {
|
||||
continue
|
||||
}
|
||||
if !tokenResolved {
|
||||
token, tokenErr = h.resolveTokenForAuth(c.Request.Context(), auth)
|
||||
tokenResolved = true
|
||||
}
|
||||
if auth != nil && token == "" {
|
||||
if tokenErr != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "auth token refresh failed"})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "auth token not found"})
|
||||
return
|
||||
}
|
||||
if token == "" {
|
||||
continue
|
||||
}
|
||||
reqHeaders[key] = strings.ReplaceAll(value, "$TOKEN$", token)
|
||||
}
|
||||
|
||||
var requestBody io.Reader
|
||||
if body.Data != "" {
|
||||
requestBody = strings.NewReader(body.Data)
|
||||
}
|
||||
|
||||
req, errNewRequest := http.NewRequestWithContext(c.Request.Context(), method, urlStr, requestBody)
|
||||
if errNewRequest != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "failed to build request"})
|
||||
return
|
||||
}
|
||||
|
||||
for key, value := range reqHeaders {
|
||||
if strings.EqualFold(key, "host") {
|
||||
hostOverride = strings.TrimSpace(value)
|
||||
continue
|
||||
}
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
if hostOverride != "" {
|
||||
req.Host = hostOverride
|
||||
}
|
||||
|
||||
httpClient := &http.Client{
|
||||
Timeout: defaultAPICallTimeout,
|
||||
}
|
||||
httpClient.Transport = h.apiCallTransport(auth)
|
||||
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
log.WithError(errDo).Debug("management APICall request failed")
|
||||
c.JSON(http.StatusBadGateway, gin.H{"error": "request failed"})
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("response body close error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
respBody, errReadAll := io.ReadAll(resp.Body)
|
||||
if errReadAll != nil {
|
||||
c.JSON(http.StatusBadGateway, gin.H{"error": "failed to read response"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, apiCallResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Header: resp.Header,
|
||||
Body: string(respBody),
|
||||
})
|
||||
}
|
||||
|
||||
func firstNonEmptyString(values ...*string) string {
|
||||
for _, v := range values {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if out := strings.TrimSpace(*v); out != "" {
|
||||
return out
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func tokenValueForAuth(auth *coreauth.Auth) string {
|
||||
if auth == nil {
|
||||
return ""
|
||||
}
|
||||
if v := tokenValueFromMetadata(auth.Metadata); v != "" {
|
||||
return v
|
||||
}
|
||||
if auth.Attributes != nil {
|
||||
if v := strings.TrimSpace(auth.Attributes["api_key"]); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
if shared := geminicli.ResolveSharedCredential(auth.Runtime); shared != nil {
|
||||
if v := tokenValueFromMetadata(shared.MetadataSnapshot()); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *Handler) resolveTokenForAuth(ctx context.Context, auth *coreauth.Auth) (string, error) {
|
||||
if auth == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
provider := strings.ToLower(strings.TrimSpace(auth.Provider))
|
||||
if provider == "gemini-cli" {
|
||||
token, errToken := h.refreshGeminiOAuthAccessToken(ctx, auth)
|
||||
return token, errToken
|
||||
}
|
||||
if provider == "antigravity" {
|
||||
token, errToken := h.refreshAntigravityOAuthAccessToken(ctx, auth)
|
||||
return token, errToken
|
||||
}
|
||||
|
||||
return tokenValueForAuth(auth), nil
|
||||
}
|
||||
|
||||
func (h *Handler) refreshGeminiOAuthAccessToken(ctx context.Context, auth *coreauth.Auth) (string, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
if auth == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
metadata, updater := geminiOAuthMetadata(auth)
|
||||
if len(metadata) == 0 {
|
||||
return "", fmt.Errorf("gemini oauth metadata missing")
|
||||
}
|
||||
|
||||
base := make(map[string]any)
|
||||
if tokenRaw, ok := metadata["token"].(map[string]any); ok && tokenRaw != nil {
|
||||
base = cloneMap(tokenRaw)
|
||||
}
|
||||
|
||||
var token oauth2.Token
|
||||
if len(base) > 0 {
|
||||
if raw, errMarshal := json.Marshal(base); errMarshal == nil {
|
||||
_ = json.Unmarshal(raw, &token)
|
||||
}
|
||||
}
|
||||
|
||||
if token.AccessToken == "" {
|
||||
token.AccessToken = stringValue(metadata, "access_token")
|
||||
}
|
||||
if token.RefreshToken == "" {
|
||||
token.RefreshToken = stringValue(metadata, "refresh_token")
|
||||
}
|
||||
if token.TokenType == "" {
|
||||
token.TokenType = stringValue(metadata, "token_type")
|
||||
}
|
||||
if token.Expiry.IsZero() {
|
||||
if expiry := stringValue(metadata, "expiry"); expiry != "" {
|
||||
if ts, errParseTime := time.Parse(time.RFC3339, expiry); errParseTime == nil {
|
||||
token.Expiry = ts
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
conf := &oauth2.Config{
|
||||
ClientID: geminiOAuthClientID,
|
||||
ClientSecret: geminiOAuthClientSecret,
|
||||
Scopes: geminiOAuthScopes,
|
||||
Endpoint: google.Endpoint,
|
||||
}
|
||||
|
||||
ctxToken := ctx
|
||||
httpClient := &http.Client{
|
||||
Timeout: defaultAPICallTimeout,
|
||||
Transport: h.apiCallTransport(auth),
|
||||
}
|
||||
ctxToken = context.WithValue(ctxToken, oauth2.HTTPClient, httpClient)
|
||||
|
||||
src := conf.TokenSource(ctxToken, &token)
|
||||
currentToken, errToken := src.Token()
|
||||
if errToken != nil {
|
||||
return "", errToken
|
||||
}
|
||||
|
||||
merged := buildOAuthTokenMap(base, currentToken)
|
||||
fields := buildOAuthTokenFields(currentToken, merged)
|
||||
if updater != nil {
|
||||
updater(fields)
|
||||
}
|
||||
return strings.TrimSpace(currentToken.AccessToken), nil
|
||||
}
|
||||
|
||||
func (h *Handler) refreshAntigravityOAuthAccessToken(ctx context.Context, auth *coreauth.Auth) (string, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
if auth == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
metadata := auth.Metadata
|
||||
if len(metadata) == 0 {
|
||||
return "", fmt.Errorf("antigravity oauth metadata missing")
|
||||
}
|
||||
|
||||
current := strings.TrimSpace(tokenValueFromMetadata(metadata))
|
||||
if current != "" && !antigravityTokenNeedsRefresh(metadata) {
|
||||
return current, nil
|
||||
}
|
||||
|
||||
refreshToken := stringValue(metadata, "refresh_token")
|
||||
if refreshToken == "" {
|
||||
return "", fmt.Errorf("antigravity refresh token missing")
|
||||
}
|
||||
|
||||
tokenURL := strings.TrimSpace(antigravityOAuthTokenURL)
|
||||
if tokenURL == "" {
|
||||
tokenURL = "https://oauth2.googleapis.com/token"
|
||||
}
|
||||
form := url.Values{}
|
||||
form.Set("client_id", antigravityOAuthClientID)
|
||||
form.Set("client_secret", antigravityOAuthClientSecret)
|
||||
form.Set("grant_type", "refresh_token")
|
||||
form.Set("refresh_token", refreshToken)
|
||||
|
||||
req, errReq := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, strings.NewReader(form.Encode()))
|
||||
if errReq != nil {
|
||||
return "", errReq
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
httpClient := &http.Client{
|
||||
Timeout: defaultAPICallTimeout,
|
||||
Transport: h.apiCallTransport(auth),
|
||||
}
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return "", errDo
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("response body close error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
bodyBytes, errRead := io.ReadAll(resp.Body)
|
||||
if errRead != nil {
|
||||
return "", errRead
|
||||
}
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
return "", fmt.Errorf("antigravity oauth token refresh failed: status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||
}
|
||||
|
||||
var tokenResp struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int64 `json:"expires_in"`
|
||||
TokenType string `json:"token_type"`
|
||||
}
|
||||
if errUnmarshal := json.Unmarshal(bodyBytes, &tokenResp); errUnmarshal != nil {
|
||||
return "", errUnmarshal
|
||||
}
|
||||
|
||||
if strings.TrimSpace(tokenResp.AccessToken) == "" {
|
||||
return "", fmt.Errorf("antigravity oauth token refresh returned empty access_token")
|
||||
}
|
||||
|
||||
if auth.Metadata == nil {
|
||||
auth.Metadata = make(map[string]any)
|
||||
}
|
||||
now := time.Now()
|
||||
auth.Metadata["access_token"] = strings.TrimSpace(tokenResp.AccessToken)
|
||||
if strings.TrimSpace(tokenResp.RefreshToken) != "" {
|
||||
auth.Metadata["refresh_token"] = strings.TrimSpace(tokenResp.RefreshToken)
|
||||
}
|
||||
if tokenResp.ExpiresIn > 0 {
|
||||
auth.Metadata["expires_in"] = tokenResp.ExpiresIn
|
||||
auth.Metadata["timestamp"] = now.UnixMilli()
|
||||
auth.Metadata["expired"] = now.Add(time.Duration(tokenResp.ExpiresIn) * time.Second).Format(time.RFC3339)
|
||||
}
|
||||
auth.Metadata["type"] = "antigravity"
|
||||
|
||||
if h != nil && h.authManager != nil {
|
||||
auth.LastRefreshedAt = now
|
||||
auth.UpdatedAt = now
|
||||
_, _ = h.authManager.Update(ctx, auth)
|
||||
}
|
||||
|
||||
return strings.TrimSpace(tokenResp.AccessToken), nil
|
||||
}
|
||||
|
||||
func antigravityTokenNeedsRefresh(metadata map[string]any) bool {
|
||||
// Refresh a bit early to avoid requests racing token expiry.
|
||||
const skew = 30 * time.Second
|
||||
|
||||
if metadata == nil {
|
||||
return true
|
||||
}
|
||||
if expStr, ok := metadata["expired"].(string); ok {
|
||||
if ts, errParse := time.Parse(time.RFC3339, strings.TrimSpace(expStr)); errParse == nil {
|
||||
return !ts.After(time.Now().Add(skew))
|
||||
}
|
||||
}
|
||||
expiresIn := int64Value(metadata["expires_in"])
|
||||
timestampMs := int64Value(metadata["timestamp"])
|
||||
if expiresIn > 0 && timestampMs > 0 {
|
||||
exp := time.UnixMilli(timestampMs).Add(time.Duration(expiresIn) * time.Second)
|
||||
return !exp.After(time.Now().Add(skew))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func int64Value(raw any) int64 {
|
||||
switch typed := raw.(type) {
|
||||
case int:
|
||||
return int64(typed)
|
||||
case int32:
|
||||
return int64(typed)
|
||||
case int64:
|
||||
return typed
|
||||
case uint:
|
||||
return int64(typed)
|
||||
case uint32:
|
||||
return int64(typed)
|
||||
case uint64:
|
||||
if typed > uint64(^uint64(0)>>1) {
|
||||
return 0
|
||||
}
|
||||
return int64(typed)
|
||||
case float32:
|
||||
return int64(typed)
|
||||
case float64:
|
||||
return int64(typed)
|
||||
case json.Number:
|
||||
if i, errParse := typed.Int64(); errParse == nil {
|
||||
return i
|
||||
}
|
||||
case string:
|
||||
if s := strings.TrimSpace(typed); s != "" {
|
||||
if i, errParse := json.Number(s).Int64(); errParse == nil {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func geminiOAuthMetadata(auth *coreauth.Auth) (map[string]any, func(map[string]any)) {
|
||||
if auth == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if shared := geminicli.ResolveSharedCredential(auth.Runtime); shared != nil {
|
||||
snapshot := shared.MetadataSnapshot()
|
||||
return snapshot, func(fields map[string]any) { shared.MergeMetadata(fields) }
|
||||
}
|
||||
return auth.Metadata, func(fields map[string]any) {
|
||||
if auth.Metadata == nil {
|
||||
auth.Metadata = make(map[string]any)
|
||||
}
|
||||
for k, v := range fields {
|
||||
auth.Metadata[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func stringValue(metadata map[string]any, key string) string {
|
||||
if len(metadata) == 0 || key == "" {
|
||||
return ""
|
||||
}
|
||||
if v, ok := metadata[key].(string); ok {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func cloneMap(in map[string]any) map[string]any {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make(map[string]any, len(in))
|
||||
for k, v := range in {
|
||||
out[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func buildOAuthTokenMap(base map[string]any, tok *oauth2.Token) map[string]any {
|
||||
merged := cloneMap(base)
|
||||
if merged == nil {
|
||||
merged = make(map[string]any)
|
||||
}
|
||||
if tok == nil {
|
||||
return merged
|
||||
}
|
||||
if raw, errMarshal := json.Marshal(tok); errMarshal == nil {
|
||||
var tokenMap map[string]any
|
||||
if errUnmarshal := json.Unmarshal(raw, &tokenMap); errUnmarshal == nil {
|
||||
for k, v := range tokenMap {
|
||||
merged[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
func buildOAuthTokenFields(tok *oauth2.Token, merged map[string]any) map[string]any {
|
||||
fields := make(map[string]any, 5)
|
||||
if tok != nil && tok.AccessToken != "" {
|
||||
fields["access_token"] = tok.AccessToken
|
||||
}
|
||||
if tok != nil && tok.TokenType != "" {
|
||||
fields["token_type"] = tok.TokenType
|
||||
}
|
||||
if tok != nil && tok.RefreshToken != "" {
|
||||
fields["refresh_token"] = tok.RefreshToken
|
||||
}
|
||||
if tok != nil && !tok.Expiry.IsZero() {
|
||||
fields["expiry"] = tok.Expiry.Format(time.RFC3339)
|
||||
}
|
||||
if len(merged) > 0 {
|
||||
fields["token"] = cloneMap(merged)
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
func tokenValueFromMetadata(metadata map[string]any) string {
|
||||
if len(metadata) == 0 {
|
||||
return ""
|
||||
}
|
||||
if v, ok := metadata["accessToken"].(string); ok && strings.TrimSpace(v) != "" {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
if v, ok := metadata["access_token"].(string); ok && strings.TrimSpace(v) != "" {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
if tokenRaw, ok := metadata["token"]; ok && tokenRaw != nil {
|
||||
switch typed := tokenRaw.(type) {
|
||||
case string:
|
||||
if v := strings.TrimSpace(typed); v != "" {
|
||||
return v
|
||||
}
|
||||
case map[string]any:
|
||||
if v, ok := typed["access_token"].(string); ok && strings.TrimSpace(v) != "" {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
if v, ok := typed["accessToken"].(string); ok && strings.TrimSpace(v) != "" {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
case map[string]string:
|
||||
if v := strings.TrimSpace(typed["access_token"]); v != "" {
|
||||
return v
|
||||
}
|
||||
if v := strings.TrimSpace(typed["accessToken"]); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
}
|
||||
if v, ok := metadata["token"].(string); ok && strings.TrimSpace(v) != "" {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
if v, ok := metadata["id_token"].(string); ok && strings.TrimSpace(v) != "" {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
if v, ok := metadata["cookie"].(string); ok && strings.TrimSpace(v) != "" {
|
||||
return strings.TrimSpace(v)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *Handler) authByIndex(authIndex string) *coreauth.Auth {
|
||||
authIndex = strings.TrimSpace(authIndex)
|
||||
if authIndex == "" || h == nil || h.authManager == nil {
|
||||
return nil
|
||||
}
|
||||
auths := h.authManager.List()
|
||||
for _, auth := range auths {
|
||||
if auth == nil {
|
||||
continue
|
||||
}
|
||||
auth.EnsureIndex()
|
||||
if auth.Index == authIndex {
|
||||
return auth
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Handler) apiCallTransport(auth *coreauth.Auth) http.RoundTripper {
|
||||
var proxyCandidates []string
|
||||
if auth != nil {
|
||||
if proxyStr := strings.TrimSpace(auth.ProxyURL); proxyStr != "" {
|
||||
proxyCandidates = append(proxyCandidates, proxyStr)
|
||||
}
|
||||
}
|
||||
if h != nil && h.cfg != nil {
|
||||
if proxyStr := strings.TrimSpace(h.cfg.ProxyURL); proxyStr != "" {
|
||||
proxyCandidates = append(proxyCandidates, proxyStr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, proxyStr := range proxyCandidates {
|
||||
if transport := buildProxyTransport(proxyStr); transport != nil {
|
||||
return transport
|
||||
}
|
||||
}
|
||||
|
||||
transport, ok := http.DefaultTransport.(*http.Transport)
|
||||
if !ok || transport == nil {
|
||||
return &http.Transport{Proxy: nil}
|
||||
}
|
||||
clone := transport.Clone()
|
||||
clone.Proxy = nil
|
||||
return clone
|
||||
}
|
||||
|
||||
func buildProxyTransport(proxyStr string) *http.Transport {
|
||||
proxyStr = strings.TrimSpace(proxyStr)
|
||||
if proxyStr == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
proxyURL, errParse := url.Parse(proxyStr)
|
||||
if errParse != nil {
|
||||
log.WithError(errParse).Debug("parse proxy URL failed")
|
||||
return nil
|
||||
}
|
||||
if proxyURL.Scheme == "" || proxyURL.Host == "" {
|
||||
log.Debug("proxy URL missing scheme/host")
|
||||
return nil
|
||||
}
|
||||
|
||||
if proxyURL.Scheme == "socks5" {
|
||||
var proxyAuth *proxy.Auth
|
||||
if proxyURL.User != nil {
|
||||
username := proxyURL.User.Username()
|
||||
password, _ := proxyURL.User.Password()
|
||||
proxyAuth = &proxy.Auth{User: username, Password: password}
|
||||
}
|
||||
dialer, errSOCKS5 := proxy.SOCKS5("tcp", proxyURL.Host, proxyAuth, proxy.Direct)
|
||||
if errSOCKS5 != nil {
|
||||
log.WithError(errSOCKS5).Debug("create SOCKS5 dialer failed")
|
||||
return nil
|
||||
}
|
||||
return &http.Transport{
|
||||
Proxy: nil,
|
||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return dialer.Dial(network, addr)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if proxyURL.Scheme == "http" || proxyURL.Scheme == "https" {
|
||||
return &http.Transport{Proxy: http.ProxyURL(proxyURL)}
|
||||
}
|
||||
|
||||
log.Debugf("unsupported proxy scheme: %s", proxyURL.Scheme)
|
||||
return nil
|
||||
}
|
||||
173
internal/api/handlers/management/api_tools_test.go
Normal file
173
internal/api/handlers/management/api_tools_test.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
type memoryAuthStore struct {
|
||||
mu sync.Mutex
|
||||
items map[string]*coreauth.Auth
|
||||
}
|
||||
|
||||
func (s *memoryAuthStore) List(ctx context.Context) ([]*coreauth.Auth, error) {
|
||||
_ = ctx
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
out := make([]*coreauth.Auth, 0, len(s.items))
|
||||
for _, a := range s.items {
|
||||
out = append(out, a.Clone())
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *memoryAuthStore) Save(ctx context.Context, auth *coreauth.Auth) (string, error) {
|
||||
_ = ctx
|
||||
if auth == nil {
|
||||
return "", nil
|
||||
}
|
||||
s.mu.Lock()
|
||||
if s.items == nil {
|
||||
s.items = make(map[string]*coreauth.Auth)
|
||||
}
|
||||
s.items[auth.ID] = auth.Clone()
|
||||
s.mu.Unlock()
|
||||
return auth.ID, nil
|
||||
}
|
||||
|
||||
func (s *memoryAuthStore) Delete(ctx context.Context, id string) error {
|
||||
_ = ctx
|
||||
s.mu.Lock()
|
||||
delete(s.items, id)
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestResolveTokenForAuth_Antigravity_RefreshesExpiredToken(t *testing.T) {
|
||||
var callCount int
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
callCount++
|
||||
if r.Method != http.MethodPost {
|
||||
t.Fatalf("expected POST, got %s", r.Method)
|
||||
}
|
||||
if ct := r.Header.Get("Content-Type"); !strings.HasPrefix(ct, "application/x-www-form-urlencoded") {
|
||||
t.Fatalf("unexpected content-type: %s", ct)
|
||||
}
|
||||
bodyBytes, _ := io.ReadAll(r.Body)
|
||||
_ = r.Body.Close()
|
||||
values, err := url.ParseQuery(string(bodyBytes))
|
||||
if err != nil {
|
||||
t.Fatalf("parse form: %v", err)
|
||||
}
|
||||
if values.Get("grant_type") != "refresh_token" {
|
||||
t.Fatalf("unexpected grant_type: %s", values.Get("grant_type"))
|
||||
}
|
||||
if values.Get("refresh_token") != "rt" {
|
||||
t.Fatalf("unexpected refresh_token: %s", values.Get("refresh_token"))
|
||||
}
|
||||
if values.Get("client_id") != antigravityOAuthClientID {
|
||||
t.Fatalf("unexpected client_id: %s", values.Get("client_id"))
|
||||
}
|
||||
if values.Get("client_secret") != antigravityOAuthClientSecret {
|
||||
t.Fatalf("unexpected client_secret")
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"access_token": "new-token",
|
||||
"refresh_token": "rt2",
|
||||
"expires_in": int64(3600),
|
||||
"token_type": "Bearer",
|
||||
})
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
originalURL := antigravityOAuthTokenURL
|
||||
antigravityOAuthTokenURL = srv.URL
|
||||
t.Cleanup(func() { antigravityOAuthTokenURL = originalURL })
|
||||
|
||||
store := &memoryAuthStore{}
|
||||
manager := coreauth.NewManager(store, nil, nil)
|
||||
|
||||
auth := &coreauth.Auth{
|
||||
ID: "antigravity-test.json",
|
||||
FileName: "antigravity-test.json",
|
||||
Provider: "antigravity",
|
||||
Metadata: map[string]any{
|
||||
"type": "antigravity",
|
||||
"access_token": "old-token",
|
||||
"refresh_token": "rt",
|
||||
"expires_in": int64(3600),
|
||||
"timestamp": time.Now().Add(-2 * time.Hour).UnixMilli(),
|
||||
"expired": time.Now().Add(-1 * time.Hour).Format(time.RFC3339),
|
||||
},
|
||||
}
|
||||
if _, err := manager.Register(context.Background(), auth); err != nil {
|
||||
t.Fatalf("register auth: %v", err)
|
||||
}
|
||||
|
||||
h := &Handler{authManager: manager}
|
||||
token, err := h.resolveTokenForAuth(context.Background(), auth)
|
||||
if err != nil {
|
||||
t.Fatalf("resolveTokenForAuth: %v", err)
|
||||
}
|
||||
if token != "new-token" {
|
||||
t.Fatalf("expected refreshed token, got %q", token)
|
||||
}
|
||||
if callCount != 1 {
|
||||
t.Fatalf("expected 1 refresh call, got %d", callCount)
|
||||
}
|
||||
|
||||
updated, ok := manager.GetByID(auth.ID)
|
||||
if !ok || updated == nil {
|
||||
t.Fatalf("expected auth in manager after update")
|
||||
}
|
||||
if got := tokenValueFromMetadata(updated.Metadata); got != "new-token" {
|
||||
t.Fatalf("expected manager metadata updated, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveTokenForAuth_Antigravity_SkipsRefreshWhenTokenValid(t *testing.T) {
|
||||
var callCount int
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
callCount++
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
originalURL := antigravityOAuthTokenURL
|
||||
antigravityOAuthTokenURL = srv.URL
|
||||
t.Cleanup(func() { antigravityOAuthTokenURL = originalURL })
|
||||
|
||||
auth := &coreauth.Auth{
|
||||
ID: "antigravity-valid.json",
|
||||
FileName: "antigravity-valid.json",
|
||||
Provider: "antigravity",
|
||||
Metadata: map[string]any{
|
||||
"type": "antigravity",
|
||||
"access_token": "ok-token",
|
||||
"expired": time.Now().Add(30 * time.Minute).Format(time.RFC3339),
|
||||
},
|
||||
}
|
||||
h := &Handler{}
|
||||
token, err := h.resolveTokenForAuth(context.Background(), auth)
|
||||
if err != nil {
|
||||
t.Fatalf("resolveTokenForAuth: %v", err)
|
||||
}
|
||||
if token != "ok-token" {
|
||||
t.Fatalf("expected existing token, got %q", token)
|
||||
}
|
||||
if callCount != 0 {
|
||||
t.Fatalf("expected no refresh calls, got %d", callCount)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,23 +1,261 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
sdkconfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
latestReleaseURL = "https://api.github.com/repos/router-for-me/CLIProxyAPI/releases/latest"
|
||||
latestReleaseUserAgent = "CLIProxyAPI"
|
||||
)
|
||||
|
||||
func (h *Handler) GetConfig(c *gin.Context) {
|
||||
c.JSON(200, h.cfg)
|
||||
if h == nil || h.cfg == nil {
|
||||
c.JSON(200, gin.H{})
|
||||
return
|
||||
}
|
||||
cfgCopy := *h.cfg
|
||||
c.JSON(200, &cfgCopy)
|
||||
}
|
||||
|
||||
type releaseInfo struct {
|
||||
TagName string `json:"tag_name"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// GetLatestVersion returns the latest release version from GitHub without downloading assets.
|
||||
func (h *Handler) GetLatestVersion(c *gin.Context) {
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
proxyURL := ""
|
||||
if h != nil && h.cfg != nil {
|
||||
proxyURL = strings.TrimSpace(h.cfg.ProxyURL)
|
||||
}
|
||||
if proxyURL != "" {
|
||||
sdkCfg := &sdkconfig.SDKConfig{ProxyURL: proxyURL}
|
||||
util.SetProxy(sdkCfg, client)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(c.Request.Context(), http.MethodGet, latestReleaseURL, nil)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "request_create_failed", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
req.Header.Set("Accept", "application/vnd.github+json")
|
||||
req.Header.Set("User-Agent", latestReleaseUserAgent)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadGateway, gin.H{"error": "request_failed", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.WithError(errClose).Debug("failed to close latest version response body")
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
c.JSON(http.StatusBadGateway, gin.H{"error": "unexpected_status", "message": fmt.Sprintf("status %d: %s", resp.StatusCode, strings.TrimSpace(string(body)))})
|
||||
return
|
||||
}
|
||||
|
||||
var info releaseInfo
|
||||
if errDecode := json.NewDecoder(resp.Body).Decode(&info); errDecode != nil {
|
||||
c.JSON(http.StatusBadGateway, gin.H{"error": "decode_failed", "message": errDecode.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
version := strings.TrimSpace(info.TagName)
|
||||
if version == "" {
|
||||
version = strings.TrimSpace(info.Name)
|
||||
}
|
||||
if version == "" {
|
||||
c.JSON(http.StatusBadGateway, gin.H{"error": "invalid_response", "message": "missing release version"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"latest-version": version})
|
||||
}
|
||||
|
||||
func WriteConfig(path string, data []byte) error {
|
||||
data = config.NormalizeCommentIndentation(data)
|
||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, errWrite := f.Write(data); errWrite != nil {
|
||||
_ = f.Close()
|
||||
return errWrite
|
||||
}
|
||||
if errSync := f.Sync(); errSync != nil {
|
||||
_ = f.Close()
|
||||
return errSync
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func (h *Handler) PutConfigYAML(c *gin.Context) {
|
||||
body, err := io.ReadAll(c.Request.Body)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid_yaml", "message": "cannot read request body"})
|
||||
return
|
||||
}
|
||||
var cfg config.Config
|
||||
if err = yaml.Unmarshal(body, &cfg); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid_yaml", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
// Validate config using LoadConfigOptional with optional=false to enforce parsing
|
||||
tmpDir := filepath.Dir(h.configFilePath)
|
||||
tmpFile, err := os.CreateTemp(tmpDir, "config-validate-*.yaml")
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "write_failed", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
tempFile := tmpFile.Name()
|
||||
if _, errWrite := tmpFile.Write(body); errWrite != nil {
|
||||
_ = tmpFile.Close()
|
||||
_ = os.Remove(tempFile)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "write_failed", "message": errWrite.Error()})
|
||||
return
|
||||
}
|
||||
if errClose := tmpFile.Close(); errClose != nil {
|
||||
_ = os.Remove(tempFile)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "write_failed", "message": errClose.Error()})
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = os.Remove(tempFile)
|
||||
}()
|
||||
_, err = config.LoadConfigOptional(tempFile, false)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusUnprocessableEntity, gin.H{"error": "invalid_config", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
if WriteConfig(h.configFilePath, body) != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "write_failed", "message": "failed to write config"})
|
||||
return
|
||||
}
|
||||
// Reload into handler to keep memory in sync
|
||||
newCfg, err := config.LoadConfig(h.configFilePath)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "reload_failed", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
h.cfg = newCfg
|
||||
c.JSON(http.StatusOK, gin.H{"ok": true, "changed": []string{"config"}})
|
||||
}
|
||||
|
||||
// GetConfigYAML returns the raw config.yaml file bytes without re-encoding.
|
||||
// It preserves comments and original formatting/styles.
|
||||
func (h *Handler) GetConfigYAML(c *gin.Context) {
|
||||
data, err := os.ReadFile(h.configFilePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "not_found", "message": "config file not found"})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "read_failed", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
c.Header("Content-Type", "application/yaml; charset=utf-8")
|
||||
c.Header("Cache-Control", "no-store")
|
||||
c.Header("X-Content-Type-Options", "nosniff")
|
||||
// Write raw bytes as-is
|
||||
_, _ = c.Writer.Write(data)
|
||||
}
|
||||
|
||||
// Debug
|
||||
func (h *Handler) GetDebug(c *gin.Context) { c.JSON(200, gin.H{"debug": h.cfg.Debug}) }
|
||||
func (h *Handler) PutDebug(c *gin.Context) { h.updateBoolField(c, func(v bool) { h.cfg.Debug = v }) }
|
||||
|
||||
// UsageStatisticsEnabled
|
||||
func (h *Handler) GetUsageStatisticsEnabled(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"usage-statistics-enabled": h.cfg.UsageStatisticsEnabled})
|
||||
}
|
||||
func (h *Handler) PutUsageStatisticsEnabled(c *gin.Context) {
|
||||
h.updateBoolField(c, func(v bool) { h.cfg.UsageStatisticsEnabled = v })
|
||||
}
|
||||
|
||||
// UsageStatisticsEnabled
|
||||
func (h *Handler) GetLoggingToFile(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"logging-to-file": h.cfg.LoggingToFile})
|
||||
}
|
||||
func (h *Handler) PutLoggingToFile(c *gin.Context) {
|
||||
h.updateBoolField(c, func(v bool) { h.cfg.LoggingToFile = v })
|
||||
}
|
||||
|
||||
// LogsMaxTotalSizeMB
|
||||
func (h *Handler) GetLogsMaxTotalSizeMB(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"logs-max-total-size-mb": h.cfg.LogsMaxTotalSizeMB})
|
||||
}
|
||||
func (h *Handler) PutLogsMaxTotalSizeMB(c *gin.Context) {
|
||||
var body struct {
|
||||
Value *int `json:"value"`
|
||||
}
|
||||
if errBindJSON := c.ShouldBindJSON(&body); errBindJSON != nil || body.Value == nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid body"})
|
||||
return
|
||||
}
|
||||
value := *body.Value
|
||||
if value < 0 {
|
||||
value = 0
|
||||
}
|
||||
h.cfg.LogsMaxTotalSizeMB = value
|
||||
h.persist(c)
|
||||
}
|
||||
|
||||
// ErrorLogsMaxFiles
|
||||
func (h *Handler) GetErrorLogsMaxFiles(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"error-logs-max-files": h.cfg.ErrorLogsMaxFiles})
|
||||
}
|
||||
func (h *Handler) PutErrorLogsMaxFiles(c *gin.Context) {
|
||||
var body struct {
|
||||
Value *int `json:"value"`
|
||||
}
|
||||
if errBindJSON := c.ShouldBindJSON(&body); errBindJSON != nil || body.Value == nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid body"})
|
||||
return
|
||||
}
|
||||
value := *body.Value
|
||||
if value < 0 {
|
||||
value = 10
|
||||
}
|
||||
h.cfg.ErrorLogsMaxFiles = value
|
||||
h.persist(c)
|
||||
}
|
||||
|
||||
// Request log
|
||||
func (h *Handler) GetRequestLog(c *gin.Context) { c.JSON(200, gin.H{"request-log": h.cfg.RequestLog}) }
|
||||
func (h *Handler) PutRequestLog(c *gin.Context) {
|
||||
h.updateBoolField(c, func(v bool) { h.cfg.RequestLog = v })
|
||||
}
|
||||
|
||||
// Websocket auth
|
||||
func (h *Handler) GetWebsocketAuth(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"ws-auth": h.cfg.WebsocketAuth})
|
||||
}
|
||||
func (h *Handler) PutWebsocketAuth(c *gin.Context) {
|
||||
h.updateBoolField(c, func(v bool) { h.cfg.WebsocketAuth = v })
|
||||
}
|
||||
|
||||
// Request retry
|
||||
func (h *Handler) GetRequestRetry(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"request-retry": h.cfg.RequestRetry})
|
||||
@@ -26,6 +264,60 @@ func (h *Handler) PutRequestRetry(c *gin.Context) {
|
||||
h.updateIntField(c, func(v int) { h.cfg.RequestRetry = v })
|
||||
}
|
||||
|
||||
// Max retry interval
|
||||
func (h *Handler) GetMaxRetryInterval(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"max-retry-interval": h.cfg.MaxRetryInterval})
|
||||
}
|
||||
func (h *Handler) PutMaxRetryInterval(c *gin.Context) {
|
||||
h.updateIntField(c, func(v int) { h.cfg.MaxRetryInterval = v })
|
||||
}
|
||||
|
||||
// ForceModelPrefix
|
||||
func (h *Handler) GetForceModelPrefix(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"force-model-prefix": h.cfg.ForceModelPrefix})
|
||||
}
|
||||
func (h *Handler) PutForceModelPrefix(c *gin.Context) {
|
||||
h.updateBoolField(c, func(v bool) { h.cfg.ForceModelPrefix = v })
|
||||
}
|
||||
|
||||
func normalizeRoutingStrategy(strategy string) (string, bool) {
|
||||
normalized := strings.ToLower(strings.TrimSpace(strategy))
|
||||
switch normalized {
|
||||
case "", "round-robin", "roundrobin", "rr":
|
||||
return "round-robin", true
|
||||
case "fill-first", "fillfirst", "ff":
|
||||
return "fill-first", true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingStrategy
|
||||
func (h *Handler) GetRoutingStrategy(c *gin.Context) {
|
||||
strategy, ok := normalizeRoutingStrategy(h.cfg.Routing.Strategy)
|
||||
if !ok {
|
||||
c.JSON(200, gin.H{"strategy": strings.TrimSpace(h.cfg.Routing.Strategy)})
|
||||
return
|
||||
}
|
||||
c.JSON(200, gin.H{"strategy": strategy})
|
||||
}
|
||||
func (h *Handler) PutRoutingStrategy(c *gin.Context) {
|
||||
var body struct {
|
||||
Value *string `json:"value"`
|
||||
}
|
||||
if errBindJSON := c.ShouldBindJSON(&body); errBindJSON != nil || body.Value == nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid body"})
|
||||
return
|
||||
}
|
||||
normalized, ok := normalizeRoutingStrategy(*body.Value)
|
||||
if !ok {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid strategy"})
|
||||
return
|
||||
}
|
||||
h.cfg.Routing.Strategy = normalized
|
||||
h.persist(c)
|
||||
}
|
||||
|
||||
// Proxy URL
|
||||
func (h *Handler) GetProxyURL(c *gin.Context) { c.JSON(200, gin.H{"proxy-url": h.cfg.ProxyURL}) }
|
||||
func (h *Handler) PutProxyURL(c *gin.Context) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,13 +3,17 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/buildinfo"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/usage"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
@@ -20,31 +24,83 @@ import (
|
||||
type attemptInfo struct {
|
||||
count int
|
||||
blockedUntil time.Time
|
||||
lastActivity time.Time // track last activity for cleanup
|
||||
}
|
||||
|
||||
// attemptCleanupInterval controls how often stale IP entries are purged
|
||||
const attemptCleanupInterval = 1 * time.Hour
|
||||
|
||||
// attemptMaxIdleTime controls how long an IP can be idle before cleanup
|
||||
const attemptMaxIdleTime = 2 * time.Hour
|
||||
|
||||
// Handler aggregates config reference, persistence path and helpers.
|
||||
type Handler struct {
|
||||
cfg *config.Config
|
||||
configFilePath string
|
||||
mu sync.Mutex
|
||||
|
||||
attemptsMu sync.Mutex
|
||||
failedAttempts map[string]*attemptInfo // keyed by client IP
|
||||
authManager *coreauth.Manager
|
||||
usageStats *usage.RequestStatistics
|
||||
tokenStore sdkAuth.TokenStore
|
||||
tokenStore coreauth.Store
|
||||
localPassword string
|
||||
allowRemoteOverride bool
|
||||
envSecret string
|
||||
logDir string
|
||||
}
|
||||
|
||||
// NewHandler creates a new management handler instance.
|
||||
func NewHandler(cfg *config.Config, configFilePath string, manager *coreauth.Manager) *Handler {
|
||||
return &Handler{
|
||||
envSecret, _ := os.LookupEnv("MANAGEMENT_PASSWORD")
|
||||
envSecret = strings.TrimSpace(envSecret)
|
||||
|
||||
h := &Handler{
|
||||
cfg: cfg,
|
||||
configFilePath: configFilePath,
|
||||
failedAttempts: make(map[string]*attemptInfo),
|
||||
authManager: manager,
|
||||
usageStats: usage.GetRequestStatistics(),
|
||||
tokenStore: sdkAuth.GetTokenStore(),
|
||||
allowRemoteOverride: envSecret != "",
|
||||
envSecret: envSecret,
|
||||
}
|
||||
h.startAttemptCleanup()
|
||||
return h
|
||||
}
|
||||
|
||||
// startAttemptCleanup launches a background goroutine that periodically
|
||||
// removes stale IP entries from failedAttempts to prevent memory leaks.
|
||||
func (h *Handler) startAttemptCleanup() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(attemptCleanupInterval)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
h.purgeStaleAttempts()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// purgeStaleAttempts removes IP entries that have been idle beyond attemptMaxIdleTime
|
||||
// and whose ban (if any) has expired.
|
||||
func (h *Handler) purgeStaleAttempts() {
|
||||
now := time.Now()
|
||||
h.attemptsMu.Lock()
|
||||
defer h.attemptsMu.Unlock()
|
||||
for ip, ai := range h.failedAttempts {
|
||||
// Skip if still banned
|
||||
if !ai.blockedUntil.IsZero() && now.Before(ai.blockedUntil) {
|
||||
continue
|
||||
}
|
||||
// Remove if idle too long
|
||||
if now.Sub(ai.lastActivity) > attemptMaxIdleTime {
|
||||
delete(h.failedAttempts, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandler creates a new management handler instance.
|
||||
func NewHandlerWithoutConfigFilePath(cfg *config.Config, manager *coreauth.Manager) *Handler {
|
||||
return NewHandler(cfg, "", manager)
|
||||
}
|
||||
|
||||
// SetConfig updates the in-memory config reference when the server hot-reloads.
|
||||
@@ -56,6 +112,22 @@ func (h *Handler) SetAuthManager(manager *coreauth.Manager) { h.authManager = ma
|
||||
// SetUsageStatistics allows replacing the usage statistics reference.
|
||||
func (h *Handler) SetUsageStatistics(stats *usage.RequestStatistics) { h.usageStats = stats }
|
||||
|
||||
// SetLocalPassword configures the runtime-local password accepted for localhost requests.
|
||||
func (h *Handler) SetLocalPassword(password string) { h.localPassword = password }
|
||||
|
||||
// SetLogDirectory updates the directory where main.log should be looked up.
|
||||
func (h *Handler) SetLogDirectory(dir string) {
|
||||
if dir == "" {
|
||||
return
|
||||
}
|
||||
if !filepath.IsAbs(dir) {
|
||||
if abs, err := filepath.Abs(dir); err == nil {
|
||||
dir = abs
|
||||
}
|
||||
}
|
||||
h.logDir = dir
|
||||
}
|
||||
|
||||
// Middleware enforces access control for management endpoints.
|
||||
// All requests (local and remote) require a valid management key.
|
||||
// Additionally, remote access requires allow-remote-management=true.
|
||||
@@ -64,11 +136,28 @@ func (h *Handler) Middleware() gin.HandlerFunc {
|
||||
const banDuration = 30 * time.Minute
|
||||
|
||||
return func(c *gin.Context) {
|
||||
clientIP := c.ClientIP()
|
||||
c.Header("X-CPA-VERSION", buildinfo.Version)
|
||||
c.Header("X-CPA-COMMIT", buildinfo.Commit)
|
||||
c.Header("X-CPA-BUILD-DATE", buildinfo.BuildDate)
|
||||
|
||||
// For remote IPs, enforce allow-remote-management and ban checks
|
||||
if !(clientIP == "127.0.0.1" || clientIP == "::1") {
|
||||
// Check if IP is currently blocked
|
||||
clientIP := c.ClientIP()
|
||||
localClient := clientIP == "127.0.0.1" || clientIP == "::1"
|
||||
cfg := h.cfg
|
||||
var (
|
||||
allowRemote bool
|
||||
secretHash string
|
||||
)
|
||||
if cfg != nil {
|
||||
allowRemote = cfg.RemoteManagement.AllowRemote
|
||||
secretHash = cfg.RemoteManagement.SecretKey
|
||||
}
|
||||
if h.allowRemoteOverride {
|
||||
allowRemote = true
|
||||
}
|
||||
envSecret := h.envSecret
|
||||
|
||||
fail := func() {}
|
||||
if !localClient {
|
||||
h.attemptsMu.Lock()
|
||||
ai := h.failedAttempts[clientIP]
|
||||
if ai != nil {
|
||||
@@ -86,14 +175,28 @@ func (h *Handler) Middleware() gin.HandlerFunc {
|
||||
}
|
||||
h.attemptsMu.Unlock()
|
||||
|
||||
allowRemote := h.cfg.RemoteManagement.AllowRemote
|
||||
if !allowRemote {
|
||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "remote management disabled"})
|
||||
return
|
||||
}
|
||||
|
||||
fail = func() {
|
||||
h.attemptsMu.Lock()
|
||||
aip := h.failedAttempts[clientIP]
|
||||
if aip == nil {
|
||||
aip = &attemptInfo{}
|
||||
h.failedAttempts[clientIP] = aip
|
||||
}
|
||||
secret := h.cfg.RemoteManagement.SecretKey
|
||||
if secret == "" {
|
||||
aip.count++
|
||||
aip.lastActivity = time.Now()
|
||||
if aip.count >= maxFailures {
|
||||
aip.blockedUntil = time.Now().Add(banDuration)
|
||||
aip.count = 0
|
||||
}
|
||||
h.attemptsMu.Unlock()
|
||||
}
|
||||
}
|
||||
if secretHash == "" && envSecret == "" {
|
||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "remote management key not set"})
|
||||
return
|
||||
}
|
||||
@@ -112,36 +215,45 @@ func (h *Handler) Middleware() gin.HandlerFunc {
|
||||
provided = c.GetHeader("X-Management-Key")
|
||||
}
|
||||
|
||||
if !(clientIP == "127.0.0.1" || clientIP == "::1") {
|
||||
// For remote IPs, enforce key and track failures
|
||||
fail := func() {
|
||||
h.attemptsMu.Lock()
|
||||
ai := h.failedAttempts[clientIP]
|
||||
if ai == nil {
|
||||
ai = &attemptInfo{}
|
||||
h.failedAttempts[clientIP] = ai
|
||||
}
|
||||
ai.count++
|
||||
if ai.count >= maxFailures {
|
||||
ai.blockedUntil = time.Now().Add(banDuration)
|
||||
ai.count = 0
|
||||
}
|
||||
h.attemptsMu.Unlock()
|
||||
}
|
||||
|
||||
if provided == "" {
|
||||
if !localClient {
|
||||
fail()
|
||||
}
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "missing management key"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(secret), []byte(provided)); err != nil {
|
||||
if localClient {
|
||||
if lp := h.localPassword; lp != "" {
|
||||
if subtle.ConstantTimeCompare([]byte(provided), []byte(lp)) == 1 {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if envSecret != "" && subtle.ConstantTimeCompare([]byte(provided), []byte(envSecret)) == 1 {
|
||||
if !localClient {
|
||||
h.attemptsMu.Lock()
|
||||
if ai := h.failedAttempts[clientIP]; ai != nil {
|
||||
ai.count = 0
|
||||
ai.blockedUntil = time.Time{}
|
||||
}
|
||||
h.attemptsMu.Unlock()
|
||||
}
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
if secretHash == "" || bcrypt.CompareHashAndPassword([]byte(secretHash), []byte(provided)) != nil {
|
||||
if !localClient {
|
||||
fail()
|
||||
}
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "invalid management key"})
|
||||
return
|
||||
}
|
||||
|
||||
// Success: reset failed count for this IP
|
||||
if !localClient {
|
||||
h.attemptsMu.Lock()
|
||||
if ai := h.failedAttempts[clientIP]; ai != nil {
|
||||
ai.count = 0
|
||||
@@ -173,16 +285,6 @@ func (h *Handler) updateBoolField(c *gin.Context, set func(bool)) {
|
||||
Value *bool `json:"value"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&body); err != nil || body.Value == nil {
|
||||
var m map[string]any
|
||||
if err2 := c.ShouldBindJSON(&m); err2 == nil {
|
||||
for _, v := range m {
|
||||
if b, ok := v.(bool); ok {
|
||||
set(b)
|
||||
h.persist(c)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid body"})
|
||||
return
|
||||
}
|
||||
|
||||
583
internal/api/handlers/management/logs.go
Normal file
583
internal/api/handlers/management/logs.go
Normal file
@@ -0,0 +1,583 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLogFileName = "main.log"
|
||||
logScannerInitialBuffer = 64 * 1024
|
||||
logScannerMaxBuffer = 8 * 1024 * 1024
|
||||
)
|
||||
|
||||
// GetLogs returns log lines with optional incremental loading.
|
||||
func (h *Handler) GetLogs(c *gin.Context) {
|
||||
if h == nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "handler unavailable"})
|
||||
return
|
||||
}
|
||||
if h.cfg == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "configuration unavailable"})
|
||||
return
|
||||
}
|
||||
if !h.cfg.LoggingToFile {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "logging to file disabled"})
|
||||
return
|
||||
}
|
||||
|
||||
logDir := h.logDirectory()
|
||||
if strings.TrimSpace(logDir) == "" {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "log directory not configured"})
|
||||
return
|
||||
}
|
||||
|
||||
files, err := h.collectLogFiles(logDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
cutoff := parseCutoff(c.Query("after"))
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"lines": []string{},
|
||||
"line-count": 0,
|
||||
"latest-timestamp": cutoff,
|
||||
})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to list log files: %v", err)})
|
||||
return
|
||||
}
|
||||
|
||||
limit, errLimit := parseLimit(c.Query("limit"))
|
||||
if errLimit != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid limit: %v", errLimit)})
|
||||
return
|
||||
}
|
||||
|
||||
cutoff := parseCutoff(c.Query("after"))
|
||||
acc := newLogAccumulator(cutoff, limit)
|
||||
for i := range files {
|
||||
if errProcess := acc.consumeFile(files[i]); errProcess != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to read log file %s: %v", files[i], errProcess)})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
lines, total, latest := acc.result()
|
||||
if latest == 0 || latest < cutoff {
|
||||
latest = cutoff
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"lines": lines,
|
||||
"line-count": total,
|
||||
"latest-timestamp": latest,
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteLogs removes all rotated log files and truncates the active log.
|
||||
func (h *Handler) DeleteLogs(c *gin.Context) {
|
||||
if h == nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "handler unavailable"})
|
||||
return
|
||||
}
|
||||
if h.cfg == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "configuration unavailable"})
|
||||
return
|
||||
}
|
||||
if !h.cfg.LoggingToFile {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "logging to file disabled"})
|
||||
return
|
||||
}
|
||||
|
||||
dir := h.logDirectory()
|
||||
if strings.TrimSpace(dir) == "" {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "log directory not configured"})
|
||||
return
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "log directory not found"})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to list log directory: %v", err)})
|
||||
return
|
||||
}
|
||||
|
||||
removed := 0
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
fullPath := filepath.Join(dir, name)
|
||||
if name == defaultLogFileName {
|
||||
if errTrunc := os.Truncate(fullPath, 0); errTrunc != nil && !os.IsNotExist(errTrunc) {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to truncate log file: %v", errTrunc)})
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if isRotatedLogFile(name) {
|
||||
if errRemove := os.Remove(fullPath); errRemove != nil && !os.IsNotExist(errRemove) {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to remove %s: %v", name, errRemove)})
|
||||
return
|
||||
}
|
||||
removed++
|
||||
}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"message": "Logs cleared successfully",
|
||||
"removed": removed,
|
||||
})
|
||||
}
|
||||
|
||||
// GetRequestErrorLogs lists error request log files when RequestLog is disabled.
|
||||
// It returns an empty list when RequestLog is enabled.
|
||||
func (h *Handler) GetRequestErrorLogs(c *gin.Context) {
|
||||
if h == nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "handler unavailable"})
|
||||
return
|
||||
}
|
||||
if h.cfg == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "configuration unavailable"})
|
||||
return
|
||||
}
|
||||
if h.cfg.RequestLog {
|
||||
c.JSON(http.StatusOK, gin.H{"files": []any{}})
|
||||
return
|
||||
}
|
||||
|
||||
dir := h.logDirectory()
|
||||
if strings.TrimSpace(dir) == "" {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "log directory not configured"})
|
||||
return
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
c.JSON(http.StatusOK, gin.H{"files": []any{}})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to list request error logs: %v", err)})
|
||||
return
|
||||
}
|
||||
|
||||
type errorLog struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Modified int64 `json:"modified"`
|
||||
}
|
||||
|
||||
files := make([]errorLog, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
if !strings.HasPrefix(name, "error-") || !strings.HasSuffix(name, ".log") {
|
||||
continue
|
||||
}
|
||||
info, errInfo := entry.Info()
|
||||
if errInfo != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to read log info for %s: %v", name, errInfo)})
|
||||
return
|
||||
}
|
||||
files = append(files, errorLog{
|
||||
Name: name,
|
||||
Size: info.Size(),
|
||||
Modified: info.ModTime().Unix(),
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(files, func(i, j int) bool { return files[i].Modified > files[j].Modified })
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"files": files})
|
||||
}
|
||||
|
||||
// GetRequestLogByID finds and downloads a request log file by its request ID.
|
||||
// The ID is matched against the suffix of log file names (format: *-{requestID}.log).
|
||||
func (h *Handler) GetRequestLogByID(c *gin.Context) {
|
||||
if h == nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "handler unavailable"})
|
||||
return
|
||||
}
|
||||
if h.cfg == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "configuration unavailable"})
|
||||
return
|
||||
}
|
||||
|
||||
dir := h.logDirectory()
|
||||
if strings.TrimSpace(dir) == "" {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "log directory not configured"})
|
||||
return
|
||||
}
|
||||
|
||||
requestID := strings.TrimSpace(c.Param("id"))
|
||||
if requestID == "" {
|
||||
requestID = strings.TrimSpace(c.Query("id"))
|
||||
}
|
||||
if requestID == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "missing request ID"})
|
||||
return
|
||||
}
|
||||
if strings.ContainsAny(requestID, "/\\") {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request ID"})
|
||||
return
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "log directory not found"})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to list log directory: %v", err)})
|
||||
return
|
||||
}
|
||||
|
||||
suffix := "-" + requestID + ".log"
|
||||
var matchedFile string
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
if strings.HasSuffix(name, suffix) {
|
||||
matchedFile = name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if matchedFile == "" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "log file not found for the given request ID"})
|
||||
return
|
||||
}
|
||||
|
||||
dirAbs, errAbs := filepath.Abs(dir)
|
||||
if errAbs != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to resolve log directory: %v", errAbs)})
|
||||
return
|
||||
}
|
||||
fullPath := filepath.Clean(filepath.Join(dirAbs, matchedFile))
|
||||
prefix := dirAbs + string(os.PathSeparator)
|
||||
if !strings.HasPrefix(fullPath, prefix) {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid log file path"})
|
||||
return
|
||||
}
|
||||
|
||||
info, errStat := os.Stat(fullPath)
|
||||
if errStat != nil {
|
||||
if os.IsNotExist(errStat) {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "log file not found"})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to read log file: %v", errStat)})
|
||||
return
|
||||
}
|
||||
if info.IsDir() {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid log file"})
|
||||
return
|
||||
}
|
||||
|
||||
c.FileAttachment(fullPath, matchedFile)
|
||||
}
|
||||
|
||||
// DownloadRequestErrorLog downloads a specific error request log file by name.
|
||||
func (h *Handler) DownloadRequestErrorLog(c *gin.Context) {
|
||||
if h == nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "handler unavailable"})
|
||||
return
|
||||
}
|
||||
if h.cfg == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "configuration unavailable"})
|
||||
return
|
||||
}
|
||||
|
||||
dir := h.logDirectory()
|
||||
if strings.TrimSpace(dir) == "" {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "log directory not configured"})
|
||||
return
|
||||
}
|
||||
|
||||
name := strings.TrimSpace(c.Param("name"))
|
||||
if name == "" || strings.Contains(name, "/") || strings.Contains(name, "\\") {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid log file name"})
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(name, "error-") || !strings.HasSuffix(name, ".log") {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "log file not found"})
|
||||
return
|
||||
}
|
||||
|
||||
dirAbs, errAbs := filepath.Abs(dir)
|
||||
if errAbs != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to resolve log directory: %v", errAbs)})
|
||||
return
|
||||
}
|
||||
fullPath := filepath.Clean(filepath.Join(dirAbs, name))
|
||||
prefix := dirAbs + string(os.PathSeparator)
|
||||
if !strings.HasPrefix(fullPath, prefix) {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid log file path"})
|
||||
return
|
||||
}
|
||||
|
||||
info, errStat := os.Stat(fullPath)
|
||||
if errStat != nil {
|
||||
if os.IsNotExist(errStat) {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "log file not found"})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to read log file: %v", errStat)})
|
||||
return
|
||||
}
|
||||
if info.IsDir() {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid log file"})
|
||||
return
|
||||
}
|
||||
|
||||
c.FileAttachment(fullPath, name)
|
||||
}
|
||||
|
||||
func (h *Handler) logDirectory() string {
|
||||
if h == nil {
|
||||
return ""
|
||||
}
|
||||
if h.logDir != "" {
|
||||
return h.logDir
|
||||
}
|
||||
return logging.ResolveLogDirectory(h.cfg)
|
||||
}
|
||||
|
||||
func (h *Handler) collectLogFiles(dir string) ([]string, error) {
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
type candidate struct {
|
||||
path string
|
||||
order int64
|
||||
}
|
||||
cands := make([]candidate, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
if name == defaultLogFileName {
|
||||
cands = append(cands, candidate{path: filepath.Join(dir, name), order: 0})
|
||||
continue
|
||||
}
|
||||
if order, ok := rotationOrder(name); ok {
|
||||
cands = append(cands, candidate{path: filepath.Join(dir, name), order: order})
|
||||
}
|
||||
}
|
||||
if len(cands) == 0 {
|
||||
return []string{}, nil
|
||||
}
|
||||
sort.Slice(cands, func(i, j int) bool { return cands[i].order < cands[j].order })
|
||||
paths := make([]string, 0, len(cands))
|
||||
for i := len(cands) - 1; i >= 0; i-- {
|
||||
paths = append(paths, cands[i].path)
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
type logAccumulator struct {
|
||||
cutoff int64
|
||||
limit int
|
||||
lines []string
|
||||
total int
|
||||
latest int64
|
||||
include bool
|
||||
}
|
||||
|
||||
func newLogAccumulator(cutoff int64, limit int) *logAccumulator {
|
||||
capacity := 256
|
||||
if limit > 0 && limit < capacity {
|
||||
capacity = limit
|
||||
}
|
||||
return &logAccumulator{
|
||||
cutoff: cutoff,
|
||||
limit: limit,
|
||||
lines: make([]string, 0, capacity),
|
||||
}
|
||||
}
|
||||
|
||||
func (acc *logAccumulator) consumeFile(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
}()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
buf := make([]byte, 0, logScannerInitialBuffer)
|
||||
scanner.Buffer(buf, logScannerMaxBuffer)
|
||||
for scanner.Scan() {
|
||||
acc.addLine(scanner.Text())
|
||||
}
|
||||
if errScan := scanner.Err(); errScan != nil {
|
||||
return errScan
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (acc *logAccumulator) addLine(raw string) {
|
||||
line := strings.TrimRight(raw, "\r")
|
||||
acc.total++
|
||||
ts := parseTimestamp(line)
|
||||
if ts > acc.latest {
|
||||
acc.latest = ts
|
||||
}
|
||||
if ts > 0 {
|
||||
acc.include = acc.cutoff == 0 || ts > acc.cutoff
|
||||
if acc.cutoff == 0 || acc.include {
|
||||
acc.append(line)
|
||||
}
|
||||
return
|
||||
}
|
||||
if acc.cutoff == 0 || acc.include {
|
||||
acc.append(line)
|
||||
}
|
||||
}
|
||||
|
||||
func (acc *logAccumulator) append(line string) {
|
||||
acc.lines = append(acc.lines, line)
|
||||
if acc.limit > 0 && len(acc.lines) > acc.limit {
|
||||
acc.lines = acc.lines[len(acc.lines)-acc.limit:]
|
||||
}
|
||||
}
|
||||
|
||||
func (acc *logAccumulator) result() ([]string, int, int64) {
|
||||
if acc.lines == nil {
|
||||
acc.lines = []string{}
|
||||
}
|
||||
return acc.lines, acc.total, acc.latest
|
||||
}
|
||||
|
||||
func parseCutoff(raw string) int64 {
|
||||
value := strings.TrimSpace(raw)
|
||||
if value == "" {
|
||||
return 0
|
||||
}
|
||||
ts, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil || ts <= 0 {
|
||||
return 0
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
func parseLimit(raw string) (int, error) {
|
||||
value := strings.TrimSpace(raw)
|
||||
if value == "" {
|
||||
return 0, nil
|
||||
}
|
||||
limit, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("must be a positive integer")
|
||||
}
|
||||
if limit <= 0 {
|
||||
return 0, fmt.Errorf("must be greater than zero")
|
||||
}
|
||||
return limit, nil
|
||||
}
|
||||
|
||||
func parseTimestamp(line string) int64 {
|
||||
if strings.HasPrefix(line, "[") {
|
||||
line = line[1:]
|
||||
}
|
||||
if len(line) < 19 {
|
||||
return 0
|
||||
}
|
||||
candidate := line[:19]
|
||||
t, err := time.ParseInLocation("2006-01-02 15:04:05", candidate, time.Local)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
func isRotatedLogFile(name string) bool {
|
||||
if _, ok := rotationOrder(name); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func rotationOrder(name string) (int64, bool) {
|
||||
if order, ok := numericRotationOrder(name); ok {
|
||||
return order, true
|
||||
}
|
||||
if order, ok := timestampRotationOrder(name); ok {
|
||||
return order, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func numericRotationOrder(name string) (int64, bool) {
|
||||
if !strings.HasPrefix(name, defaultLogFileName+".") {
|
||||
return 0, false
|
||||
}
|
||||
suffix := strings.TrimPrefix(name, defaultLogFileName+".")
|
||||
if suffix == "" {
|
||||
return 0, false
|
||||
}
|
||||
n, err := strconv.Atoi(suffix)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return int64(n), true
|
||||
}
|
||||
|
||||
func timestampRotationOrder(name string) (int64, bool) {
|
||||
ext := filepath.Ext(defaultLogFileName)
|
||||
base := strings.TrimSuffix(defaultLogFileName, ext)
|
||||
if base == "" {
|
||||
return 0, false
|
||||
}
|
||||
prefix := base + "-"
|
||||
if !strings.HasPrefix(name, prefix) {
|
||||
return 0, false
|
||||
}
|
||||
clean := strings.TrimPrefix(name, prefix)
|
||||
if strings.HasSuffix(clean, ".gz") {
|
||||
clean = strings.TrimSuffix(clean, ".gz")
|
||||
}
|
||||
if ext != "" {
|
||||
if !strings.HasSuffix(clean, ext) {
|
||||
return 0, false
|
||||
}
|
||||
clean = strings.TrimSuffix(clean, ext)
|
||||
}
|
||||
if clean == "" {
|
||||
return 0, false
|
||||
}
|
||||
if idx := strings.IndexByte(clean, '.'); idx != -1 {
|
||||
clean = clean[:idx]
|
||||
}
|
||||
parsed, err := time.ParseInLocation("2006-01-02T15-04-05", clean, time.Local)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return math.MaxInt64 - parsed.Unix(), true
|
||||
}
|
||||
33
internal/api/handlers/management/model_definitions.go
Normal file
33
internal/api/handlers/management/model_definitions.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
)
|
||||
|
||||
// GetStaticModelDefinitions returns static model metadata for a given channel.
|
||||
// Channel is provided via path param (:channel) or query param (?channel=...).
|
||||
func (h *Handler) GetStaticModelDefinitions(c *gin.Context) {
|
||||
channel := strings.TrimSpace(c.Param("channel"))
|
||||
if channel == "" {
|
||||
channel = strings.TrimSpace(c.Query("channel"))
|
||||
}
|
||||
if channel == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "channel is required"})
|
||||
return
|
||||
}
|
||||
|
||||
models := registry.GetStaticModelDefinitionsByChannel(channel)
|
||||
if models == nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "unknown channel", "channel": channel})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"channel": strings.ToLower(strings.TrimSpace(channel)),
|
||||
"models": models,
|
||||
})
|
||||
}
|
||||
100
internal/api/handlers/management/oauth_callback.go
Normal file
100
internal/api/handlers/management/oauth_callback.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type oauthCallbackRequest struct {
|
||||
Provider string `json:"provider"`
|
||||
RedirectURL string `json:"redirect_url"`
|
||||
Code string `json:"code"`
|
||||
State string `json:"state"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func (h *Handler) PostOAuthCallback(c *gin.Context) {
|
||||
if h == nil || h.cfg == nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"status": "error", "error": "handler not initialized"})
|
||||
return
|
||||
}
|
||||
|
||||
var req oauthCallbackRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"status": "error", "error": "invalid body"})
|
||||
return
|
||||
}
|
||||
|
||||
canonicalProvider, err := NormalizeOAuthProvider(req.Provider)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"status": "error", "error": "unsupported provider"})
|
||||
return
|
||||
}
|
||||
|
||||
state := strings.TrimSpace(req.State)
|
||||
code := strings.TrimSpace(req.Code)
|
||||
errMsg := strings.TrimSpace(req.Error)
|
||||
|
||||
if rawRedirect := strings.TrimSpace(req.RedirectURL); rawRedirect != "" {
|
||||
u, errParse := url.Parse(rawRedirect)
|
||||
if errParse != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"status": "error", "error": "invalid redirect_url"})
|
||||
return
|
||||
}
|
||||
q := u.Query()
|
||||
if state == "" {
|
||||
state = strings.TrimSpace(q.Get("state"))
|
||||
}
|
||||
if code == "" {
|
||||
code = strings.TrimSpace(q.Get("code"))
|
||||
}
|
||||
if errMsg == "" {
|
||||
errMsg = strings.TrimSpace(q.Get("error"))
|
||||
if errMsg == "" {
|
||||
errMsg = strings.TrimSpace(q.Get("error_description"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if state == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"status": "error", "error": "state is required"})
|
||||
return
|
||||
}
|
||||
if err := ValidateOAuthState(state); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"status": "error", "error": "invalid state"})
|
||||
return
|
||||
}
|
||||
if code == "" && errMsg == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"status": "error", "error": "code or error is required"})
|
||||
return
|
||||
}
|
||||
|
||||
sessionProvider, sessionStatus, ok := GetOAuthSession(state)
|
||||
if !ok {
|
||||
c.JSON(http.StatusNotFound, gin.H{"status": "error", "error": "unknown or expired state"})
|
||||
return
|
||||
}
|
||||
if sessionStatus != "" {
|
||||
c.JSON(http.StatusConflict, gin.H{"status": "error", "error": "oauth flow is not pending"})
|
||||
return
|
||||
}
|
||||
if !strings.EqualFold(sessionProvider, canonicalProvider) {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"status": "error", "error": "provider does not match state"})
|
||||
return
|
||||
}
|
||||
|
||||
if _, errWrite := WriteOAuthCallbackFileForPendingSession(h.cfg.AuthDir, canonicalProvider, state, code, errMsg); errWrite != nil {
|
||||
if errors.Is(errWrite, errOAuthSessionNotPending) {
|
||||
c.JSON(http.StatusConflict, gin.H{"status": "error", "error": "oauth flow is not pending"})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"status": "error", "error": "failed to persist oauth callback"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"status": "ok"})
|
||||
}
|
||||
283
internal/api/handlers/management/oauth_sessions.go
Normal file
283
internal/api/handlers/management/oauth_sessions.go
Normal file
@@ -0,0 +1,283 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
oauthSessionTTL = 10 * time.Minute
|
||||
maxOAuthStateLength = 128
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidOAuthState = errors.New("invalid oauth state")
|
||||
errUnsupportedOAuthFlow = errors.New("unsupported oauth provider")
|
||||
errOAuthSessionNotPending = errors.New("oauth session is not pending")
|
||||
)
|
||||
|
||||
type oauthSession struct {
|
||||
Provider string
|
||||
Status string
|
||||
CreatedAt time.Time
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
type oauthSessionStore struct {
|
||||
mu sync.RWMutex
|
||||
ttl time.Duration
|
||||
sessions map[string]oauthSession
|
||||
}
|
||||
|
||||
func newOAuthSessionStore(ttl time.Duration) *oauthSessionStore {
|
||||
if ttl <= 0 {
|
||||
ttl = oauthSessionTTL
|
||||
}
|
||||
return &oauthSessionStore{
|
||||
ttl: ttl,
|
||||
sessions: make(map[string]oauthSession),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *oauthSessionStore) purgeExpiredLocked(now time.Time) {
|
||||
for state, session := range s.sessions {
|
||||
if !session.ExpiresAt.IsZero() && now.After(session.ExpiresAt) {
|
||||
delete(s.sessions, state)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *oauthSessionStore) Register(state, provider string) {
|
||||
state = strings.TrimSpace(state)
|
||||
provider = strings.ToLower(strings.TrimSpace(provider))
|
||||
if state == "" || provider == "" {
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.purgeExpiredLocked(now)
|
||||
s.sessions[state] = oauthSession{
|
||||
Provider: provider,
|
||||
Status: "",
|
||||
CreatedAt: now,
|
||||
ExpiresAt: now.Add(s.ttl),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *oauthSessionStore) SetError(state, message string) {
|
||||
state = strings.TrimSpace(state)
|
||||
message = strings.TrimSpace(message)
|
||||
if state == "" {
|
||||
return
|
||||
}
|
||||
if message == "" {
|
||||
message = "Authentication failed"
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.purgeExpiredLocked(now)
|
||||
session, ok := s.sessions[state]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
session.Status = message
|
||||
session.ExpiresAt = now.Add(s.ttl)
|
||||
s.sessions[state] = session
|
||||
}
|
||||
|
||||
func (s *oauthSessionStore) Complete(state string) {
|
||||
state = strings.TrimSpace(state)
|
||||
if state == "" {
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.purgeExpiredLocked(now)
|
||||
delete(s.sessions, state)
|
||||
}
|
||||
|
||||
func (s *oauthSessionStore) CompleteProvider(provider string) int {
|
||||
provider = strings.ToLower(strings.TrimSpace(provider))
|
||||
if provider == "" {
|
||||
return 0
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.purgeExpiredLocked(now)
|
||||
removed := 0
|
||||
for state, session := range s.sessions {
|
||||
if strings.EqualFold(session.Provider, provider) {
|
||||
delete(s.sessions, state)
|
||||
removed++
|
||||
}
|
||||
}
|
||||
return removed
|
||||
}
|
||||
|
||||
func (s *oauthSessionStore) Get(state string) (oauthSession, bool) {
|
||||
state = strings.TrimSpace(state)
|
||||
now := time.Now()
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.purgeExpiredLocked(now)
|
||||
session, ok := s.sessions[state]
|
||||
return session, ok
|
||||
}
|
||||
|
||||
func (s *oauthSessionStore) IsPending(state, provider string) bool {
|
||||
state = strings.TrimSpace(state)
|
||||
provider = strings.ToLower(strings.TrimSpace(provider))
|
||||
now := time.Now()
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.purgeExpiredLocked(now)
|
||||
session, ok := s.sessions[state]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if session.Status != "" {
|
||||
return false
|
||||
}
|
||||
if provider == "" {
|
||||
return true
|
||||
}
|
||||
return strings.EqualFold(session.Provider, provider)
|
||||
}
|
||||
|
||||
var oauthSessions = newOAuthSessionStore(oauthSessionTTL)
|
||||
|
||||
func RegisterOAuthSession(state, provider string) { oauthSessions.Register(state, provider) }
|
||||
|
||||
func SetOAuthSessionError(state, message string) { oauthSessions.SetError(state, message) }
|
||||
|
||||
func CompleteOAuthSession(state string) { oauthSessions.Complete(state) }
|
||||
|
||||
func CompleteOAuthSessionsByProvider(provider string) int {
|
||||
return oauthSessions.CompleteProvider(provider)
|
||||
}
|
||||
|
||||
func GetOAuthSession(state string) (provider string, status string, ok bool) {
|
||||
session, ok := oauthSessions.Get(state)
|
||||
if !ok {
|
||||
return "", "", false
|
||||
}
|
||||
return session.Provider, session.Status, true
|
||||
}
|
||||
|
||||
func IsOAuthSessionPending(state, provider string) bool {
|
||||
return oauthSessions.IsPending(state, provider)
|
||||
}
|
||||
|
||||
func ValidateOAuthState(state string) error {
|
||||
trimmed := strings.TrimSpace(state)
|
||||
if trimmed == "" {
|
||||
return fmt.Errorf("%w: empty", errInvalidOAuthState)
|
||||
}
|
||||
if len(trimmed) > maxOAuthStateLength {
|
||||
return fmt.Errorf("%w: too long", errInvalidOAuthState)
|
||||
}
|
||||
if strings.Contains(trimmed, "/") || strings.Contains(trimmed, "\\") {
|
||||
return fmt.Errorf("%w: contains path separator", errInvalidOAuthState)
|
||||
}
|
||||
if strings.Contains(trimmed, "..") {
|
||||
return fmt.Errorf("%w: contains '..'", errInvalidOAuthState)
|
||||
}
|
||||
for _, r := range trimmed {
|
||||
switch {
|
||||
case r >= 'a' && r <= 'z':
|
||||
case r >= 'A' && r <= 'Z':
|
||||
case r >= '0' && r <= '9':
|
||||
case r == '-' || r == '_' || r == '.':
|
||||
default:
|
||||
return fmt.Errorf("%w: invalid character", errInvalidOAuthState)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NormalizeOAuthProvider(provider string) (string, error) {
|
||||
switch strings.ToLower(strings.TrimSpace(provider)) {
|
||||
case "anthropic", "claude":
|
||||
return "anthropic", nil
|
||||
case "codex", "openai":
|
||||
return "codex", nil
|
||||
case "gemini", "google":
|
||||
return "gemini", nil
|
||||
case "iflow", "i-flow":
|
||||
return "iflow", nil
|
||||
case "antigravity", "anti-gravity":
|
||||
return "antigravity", nil
|
||||
case "qwen":
|
||||
return "qwen", nil
|
||||
default:
|
||||
return "", errUnsupportedOAuthFlow
|
||||
}
|
||||
}
|
||||
|
||||
type oauthCallbackFilePayload struct {
|
||||
Code string `json:"code"`
|
||||
State string `json:"state"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func WriteOAuthCallbackFile(authDir, provider, state, code, errorMessage string) (string, error) {
|
||||
if strings.TrimSpace(authDir) == "" {
|
||||
return "", fmt.Errorf("auth dir is empty")
|
||||
}
|
||||
canonicalProvider, err := NormalizeOAuthProvider(provider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := ValidateOAuthState(state); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fileName := fmt.Sprintf(".oauth-%s-%s.oauth", canonicalProvider, state)
|
||||
filePath := filepath.Join(authDir, fileName)
|
||||
payload := oauthCallbackFilePayload{
|
||||
Code: strings.TrimSpace(code),
|
||||
State: strings.TrimSpace(state),
|
||||
Error: strings.TrimSpace(errorMessage),
|
||||
}
|
||||
data, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshal oauth callback payload: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(filePath, data, 0o600); err != nil {
|
||||
return "", fmt.Errorf("write oauth callback file: %w", err)
|
||||
}
|
||||
return filePath, nil
|
||||
}
|
||||
|
||||
func WriteOAuthCallbackFileForPendingSession(authDir, provider, state, code, errorMessage string) (string, error) {
|
||||
canonicalProvider, err := NormalizeOAuthProvider(provider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !IsOAuthSessionPending(state, canonicalProvider) {
|
||||
return "", errOAuthSessionNotPending
|
||||
}
|
||||
return WriteOAuthCallbackFile(authDir, canonicalProvider, state, code, errorMessage)
|
||||
}
|
||||
@@ -1,17 +1,79 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/usage"
|
||||
)
|
||||
|
||||
type usageExportPayload struct {
|
||||
Version int `json:"version"`
|
||||
ExportedAt time.Time `json:"exported_at"`
|
||||
Usage usage.StatisticsSnapshot `json:"usage"`
|
||||
}
|
||||
|
||||
type usageImportPayload struct {
|
||||
Version int `json:"version"`
|
||||
Usage usage.StatisticsSnapshot `json:"usage"`
|
||||
}
|
||||
|
||||
// GetUsageStatistics returns the in-memory request statistics snapshot.
|
||||
func (h *Handler) GetUsageStatistics(c *gin.Context) {
|
||||
var snapshot usage.StatisticsSnapshot
|
||||
if h != nil && h.usageStats != nil {
|
||||
snapshot = h.usageStats.Snapshot()
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{"usage": snapshot})
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"usage": snapshot,
|
||||
"failed_requests": snapshot.FailureCount,
|
||||
})
|
||||
}
|
||||
|
||||
// ExportUsageStatistics returns a complete usage snapshot for backup/migration.
|
||||
func (h *Handler) ExportUsageStatistics(c *gin.Context) {
|
||||
var snapshot usage.StatisticsSnapshot
|
||||
if h != nil && h.usageStats != nil {
|
||||
snapshot = h.usageStats.Snapshot()
|
||||
}
|
||||
c.JSON(http.StatusOK, usageExportPayload{
|
||||
Version: 1,
|
||||
ExportedAt: time.Now().UTC(),
|
||||
Usage: snapshot,
|
||||
})
|
||||
}
|
||||
|
||||
// ImportUsageStatistics merges a previously exported usage snapshot into memory.
|
||||
func (h *Handler) ImportUsageStatistics(c *gin.Context) {
|
||||
if h == nil || h.usageStats == nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "usage statistics unavailable"})
|
||||
return
|
||||
}
|
||||
|
||||
data, err := c.GetRawData()
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "failed to read request body"})
|
||||
return
|
||||
}
|
||||
|
||||
var payload usageImportPayload
|
||||
if err := json.Unmarshal(data, &payload); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid json"})
|
||||
return
|
||||
}
|
||||
if payload.Version != 0 && payload.Version != 1 {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "unsupported version"})
|
||||
return
|
||||
}
|
||||
|
||||
result := h.usageStats.MergeSnapshot(payload.Usage)
|
||||
snapshot := h.usageStats.Snapshot()
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"added": result.Added,
|
||||
"skipped": result.Skipped,
|
||||
"total_requests": snapshot.TotalRequests,
|
||||
"failed_requests": snapshot.FailureCount,
|
||||
})
|
||||
}
|
||||
|
||||
156
internal/api/handlers/management/vertex_import.go
Normal file
156
internal/api/handlers/management/vertex_import.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package management
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/vertex"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
// ImportVertexCredential handles uploading a Vertex service account JSON and saving it as an auth record.
|
||||
func (h *Handler) ImportVertexCredential(c *gin.Context) {
|
||||
if h == nil || h.cfg == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "config unavailable"})
|
||||
return
|
||||
}
|
||||
if h.cfg.AuthDir == "" {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "auth directory not configured"})
|
||||
return
|
||||
}
|
||||
|
||||
fileHeader, err := c.FormFile("file")
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "file required"})
|
||||
return
|
||||
}
|
||||
|
||||
file, err := fileHeader.Open()
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("failed to read file: %v", err)})
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("failed to read file: %v", err)})
|
||||
return
|
||||
}
|
||||
|
||||
var serviceAccount map[string]any
|
||||
if err := json.Unmarshal(data, &serviceAccount); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid json", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
normalizedSA, err := vertex.NormalizeServiceAccountMap(serviceAccount)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid service account", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
serviceAccount = normalizedSA
|
||||
|
||||
projectID := strings.TrimSpace(valueAsString(serviceAccount["project_id"]))
|
||||
if projectID == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "project_id missing"})
|
||||
return
|
||||
}
|
||||
email := strings.TrimSpace(valueAsString(serviceAccount["client_email"]))
|
||||
|
||||
location := strings.TrimSpace(c.PostForm("location"))
|
||||
if location == "" {
|
||||
location = strings.TrimSpace(c.Query("location"))
|
||||
}
|
||||
if location == "" {
|
||||
location = "us-central1"
|
||||
}
|
||||
|
||||
fileName := fmt.Sprintf("vertex-%s.json", sanitizeVertexFilePart(projectID))
|
||||
label := labelForVertex(projectID, email)
|
||||
storage := &vertex.VertexCredentialStorage{
|
||||
ServiceAccount: serviceAccount,
|
||||
ProjectID: projectID,
|
||||
Email: email,
|
||||
Location: location,
|
||||
Type: "vertex",
|
||||
}
|
||||
metadata := map[string]any{
|
||||
"service_account": serviceAccount,
|
||||
"project_id": projectID,
|
||||
"email": email,
|
||||
"location": location,
|
||||
"type": "vertex",
|
||||
"label": label,
|
||||
}
|
||||
record := &coreauth.Auth{
|
||||
ID: fileName,
|
||||
Provider: "vertex",
|
||||
FileName: fileName,
|
||||
Storage: storage,
|
||||
Label: label,
|
||||
Metadata: metadata,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if reqCtx := c.Request.Context(); reqCtx != nil {
|
||||
ctx = reqCtx
|
||||
}
|
||||
savedPath, err := h.saveTokenRecord(ctx, record)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "save_failed", "message": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"status": "ok",
|
||||
"auth-file": savedPath,
|
||||
"project_id": projectID,
|
||||
"email": email,
|
||||
"location": location,
|
||||
})
|
||||
}
|
||||
|
||||
func valueAsString(v any) string {
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
return t
|
||||
default:
|
||||
return fmt.Sprint(t)
|
||||
}
|
||||
}
|
||||
|
||||
func sanitizeVertexFilePart(s string) string {
|
||||
out := strings.TrimSpace(s)
|
||||
replacers := []string{"/", "_", "\\", "_", ":", "_", " ", "-"}
|
||||
for i := 0; i < len(replacers); i += 2 {
|
||||
out = strings.ReplaceAll(out, replacers[i], replacers[i+1])
|
||||
}
|
||||
if out == "" {
|
||||
return "vertex"
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func labelForVertex(projectID, email string) string {
|
||||
p := strings.TrimSpace(projectID)
|
||||
e := strings.TrimSpace(email)
|
||||
if p != "" && e != "" {
|
||||
return fmt.Sprintf("%s (%s)", p, e)
|
||||
}
|
||||
if p != "" {
|
||||
return p
|
||||
}
|
||||
if e != "" {
|
||||
return e
|
||||
}
|
||||
return "vertex"
|
||||
}
|
||||
@@ -6,19 +6,33 @@ package middleware
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
)
|
||||
|
||||
// RequestLoggingMiddleware creates a Gin middleware that logs HTTP requests and responses.
|
||||
// It captures detailed information about the request and response, including headers and body,
|
||||
// and uses the provided RequestLogger to record this data. If logging is disabled in the
|
||||
// logger, the middleware has minimal overhead.
|
||||
// and uses the provided RequestLogger to record this data. When logging is disabled in the
|
||||
// logger, it still captures data so that upstream errors can be persisted.
|
||||
func RequestLoggingMiddleware(logger logging.RequestLogger) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Early return if logging is disabled (zero overhead)
|
||||
if !logger.IsEnabled() {
|
||||
if logger == nil {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
if c.Request.Method == http.MethodGet {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
path := c.Request.URL.Path
|
||||
if !shouldLogRequest(path) {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
@@ -34,6 +48,9 @@ func RequestLoggingMiddleware(logger logging.RequestLogger) gin.HandlerFunc {
|
||||
|
||||
// Create response writer wrapper
|
||||
wrapper := NewResponseWriterWrapper(c.Writer, logger, requestInfo)
|
||||
if !logger.IsEnabled() {
|
||||
wrapper.logOnErrorOnly = true
|
||||
}
|
||||
c.Writer = wrapper
|
||||
|
||||
// Process the request
|
||||
@@ -51,13 +68,11 @@ func RequestLoggingMiddleware(logger logging.RequestLogger) gin.HandlerFunc {
|
||||
// It captures the URL, method, headers, and body. The request body is read and then
|
||||
// restored so that it can be processed by subsequent handlers.
|
||||
func captureRequestInfo(c *gin.Context) (*RequestInfo, error) {
|
||||
// Capture URL
|
||||
url := c.Request.URL.String()
|
||||
if c.Request.URL.Path != "" {
|
||||
url = c.Request.URL.Path
|
||||
if c.Request.URL.RawQuery != "" {
|
||||
url += "?" + c.Request.URL.RawQuery
|
||||
}
|
||||
// Capture URL with sensitive query parameters masked
|
||||
maskedQuery := util.MaskSensitiveQuery(c.Request.URL.RawQuery)
|
||||
url := c.Request.URL.Path
|
||||
if maskedQuery != "" {
|
||||
url += "?" + maskedQuery
|
||||
}
|
||||
|
||||
// Capture method
|
||||
@@ -88,5 +103,22 @@ func captureRequestInfo(c *gin.Context) (*RequestInfo, error) {
|
||||
Method: method,
|
||||
Headers: headers,
|
||||
Body: body,
|
||||
RequestID: logging.GetGinRequestID(c),
|
||||
Timestamp: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// shouldLogRequest determines whether the request should be logged.
|
||||
// It skips management endpoints to avoid leaking secrets but allows
|
||||
// all other routes, including module-provided ones, to honor request-log.
|
||||
func shouldLogRequest(path string) bool {
|
||||
if strings.HasPrefix(path, "/v0/management") || strings.HasPrefix(path, "/management") {
|
||||
return false
|
||||
}
|
||||
|
||||
if strings.HasPrefix(path, "/api") {
|
||||
return strings.HasPrefix(path, "/api/provider")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -5,7 +5,9 @@ package middleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
@@ -18,6 +20,8 @@ type RequestInfo struct {
|
||||
Method string // Method is the HTTP method (e.g., GET, POST).
|
||||
Headers map[string][]string // Headers contains the request headers.
|
||||
Body []byte // Body is the raw request body.
|
||||
RequestID string // RequestID is the unique identifier for the request.
|
||||
Timestamp time.Time // Timestamp is when the request was received.
|
||||
}
|
||||
|
||||
// ResponseWriterWrapper wraps the standard gin.ResponseWriter to intercept and log response data.
|
||||
@@ -33,6 +37,8 @@ type ResponseWriterWrapper struct {
|
||||
requestInfo *RequestInfo // requestInfo holds the details of the original request.
|
||||
statusCode int // statusCode stores the HTTP status code of the response.
|
||||
headers map[string][]string // headers stores the response headers.
|
||||
logOnErrorOnly bool // logOnErrorOnly enables logging only when an error response is detected.
|
||||
firstChunkTimestamp time.Time // firstChunkTimestamp captures TTFB for streaming responses.
|
||||
}
|
||||
|
||||
// NewResponseWriterWrapper creates and initializes a new ResponseWriterWrapper.
|
||||
@@ -69,22 +75,72 @@ func (w *ResponseWriterWrapper) Write(data []byte) (int, error) {
|
||||
n, err := w.ResponseWriter.Write(data)
|
||||
|
||||
// THEN: Handle logging based on response type
|
||||
if w.isStreaming {
|
||||
if w.isStreaming && w.chunkChannel != nil {
|
||||
// Capture TTFB on first chunk (synchronous, before async channel send)
|
||||
if w.firstChunkTimestamp.IsZero() {
|
||||
w.firstChunkTimestamp = time.Now()
|
||||
}
|
||||
// For streaming responses: Send to async logging channel (non-blocking)
|
||||
if w.chunkChannel != nil {
|
||||
select {
|
||||
case w.chunkChannel <- append([]byte(nil), data...): // Non-blocking send with copy
|
||||
default: // Channel full, skip logging to avoid blocking
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
} else {
|
||||
// For non-streaming responses: Buffer complete response
|
||||
|
||||
if w.shouldBufferResponseBody() {
|
||||
w.body.Write(data)
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *ResponseWriterWrapper) shouldBufferResponseBody() bool {
|
||||
if w.logger != nil && w.logger.IsEnabled() {
|
||||
return true
|
||||
}
|
||||
if !w.logOnErrorOnly {
|
||||
return false
|
||||
}
|
||||
status := w.statusCode
|
||||
if status == 0 {
|
||||
if statusWriter, ok := w.ResponseWriter.(interface{ Status() int }); ok && statusWriter != nil {
|
||||
status = statusWriter.Status()
|
||||
} else {
|
||||
status = http.StatusOK
|
||||
}
|
||||
}
|
||||
return status >= http.StatusBadRequest
|
||||
}
|
||||
|
||||
// WriteString wraps the underlying ResponseWriter's WriteString method to capture response data.
|
||||
// Some handlers (and fmt/io helpers) write via io.StringWriter; without this override, those writes
|
||||
// bypass Write() and would be missing from request logs.
|
||||
func (w *ResponseWriterWrapper) WriteString(data string) (int, error) {
|
||||
w.ensureHeadersCaptured()
|
||||
|
||||
// CRITICAL: Write to client first (zero latency)
|
||||
n, err := w.ResponseWriter.WriteString(data)
|
||||
|
||||
// THEN: Capture for logging
|
||||
if w.isStreaming && w.chunkChannel != nil {
|
||||
// Capture TTFB on first chunk (synchronous, before async channel send)
|
||||
if w.firstChunkTimestamp.IsZero() {
|
||||
w.firstChunkTimestamp = time.Now()
|
||||
}
|
||||
select {
|
||||
case w.chunkChannel <- []byte(data):
|
||||
default:
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
if w.shouldBufferResponseBody() {
|
||||
w.body.WriteString(data)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteHeader wraps the underlying ResponseWriter's WriteHeader method.
|
||||
// It captures the status code, detects if the response is streaming based on the Content-Type header,
|
||||
// and initializes the appropriate logging mechanism (standard or streaming).
|
||||
@@ -105,6 +161,7 @@ func (w *ResponseWriterWrapper) WriteHeader(statusCode int) {
|
||||
w.requestInfo.Method,
|
||||
w.requestInfo.Headers,
|
||||
w.requestInfo.Body,
|
||||
w.requestInfo.RequestID,
|
||||
)
|
||||
if err == nil {
|
||||
w.streamWriter = streamWriter
|
||||
@@ -158,12 +215,16 @@ func (w *ResponseWriterWrapper) detectStreaming(contentType string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check request body for streaming indicators
|
||||
if w.requestInfo.Body != nil {
|
||||
bodyStr := string(w.requestInfo.Body)
|
||||
if strings.Contains(bodyStr, `"stream": true`) || strings.Contains(bodyStr, `"stream":true`) {
|
||||
return true
|
||||
// If a concrete Content-Type is already set (e.g., application/json for error responses),
|
||||
// treat it as non-streaming instead of inferring from the request payload.
|
||||
if strings.TrimSpace(contentType) != "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Only fall back to request payload hints when Content-Type is not set yet.
|
||||
if w.requestInfo != nil && len(w.requestInfo.Body) > 0 {
|
||||
bodyStr := string(w.requestInfo.Body)
|
||||
return strings.Contains(bodyStr, `"stream": true`) || strings.Contains(bodyStr, `"stream":true`)
|
||||
}
|
||||
|
||||
return false
|
||||
@@ -192,12 +253,34 @@ func (w *ResponseWriterWrapper) processStreamingChunks(done chan struct{}) {
|
||||
// For non-streaming responses, it logs the complete request and response details,
|
||||
// including any API-specific request/response data stored in the Gin context.
|
||||
func (w *ResponseWriterWrapper) Finalize(c *gin.Context) error {
|
||||
if !w.logger.IsEnabled() {
|
||||
if w.logger == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if w.isStreaming {
|
||||
// Close streaming channel and writer
|
||||
finalStatusCode := w.statusCode
|
||||
if finalStatusCode == 0 {
|
||||
if statusWriter, ok := w.ResponseWriter.(interface{ Status() int }); ok {
|
||||
finalStatusCode = statusWriter.Status()
|
||||
} else {
|
||||
finalStatusCode = 200
|
||||
}
|
||||
}
|
||||
|
||||
var slicesAPIResponseError []*interfaces.ErrorMessage
|
||||
apiResponseError, isExist := c.Get("API_RESPONSE_ERROR")
|
||||
if isExist {
|
||||
if apiErrors, ok := apiResponseError.([]*interfaces.ErrorMessage); ok {
|
||||
slicesAPIResponseError = apiErrors
|
||||
}
|
||||
}
|
||||
|
||||
hasAPIError := len(slicesAPIResponseError) > 0 || finalStatusCode >= http.StatusBadRequest
|
||||
forceLog := w.logOnErrorOnly && hasAPIError && !w.logger.IsEnabled()
|
||||
if !w.logger.IsEnabled() && !forceLog {
|
||||
return nil
|
||||
}
|
||||
|
||||
if w.isStreaming && w.streamWriter != nil {
|
||||
if w.chunkChannel != nil {
|
||||
close(w.chunkChannel)
|
||||
w.chunkChannel = nil
|
||||
@@ -208,102 +291,120 @@ func (w *ResponseWriterWrapper) Finalize(c *gin.Context) error {
|
||||
w.streamDone = nil
|
||||
}
|
||||
|
||||
if w.streamWriter != nil {
|
||||
err := w.streamWriter.Close()
|
||||
w.streamWriter.SetFirstChunkTimestamp(w.firstChunkTimestamp)
|
||||
|
||||
// Write API Request and Response to the streaming log before closing
|
||||
apiRequest := w.extractAPIRequest(c)
|
||||
if len(apiRequest) > 0 {
|
||||
_ = w.streamWriter.WriteAPIRequest(apiRequest)
|
||||
}
|
||||
apiResponse := w.extractAPIResponse(c)
|
||||
if len(apiResponse) > 0 {
|
||||
_ = w.streamWriter.WriteAPIResponse(apiResponse)
|
||||
}
|
||||
if err := w.streamWriter.Close(); err != nil {
|
||||
w.streamWriter = nil
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Capture final status code and headers if not already captured
|
||||
finalStatusCode := w.statusCode
|
||||
if finalStatusCode == 0 {
|
||||
// Get status from underlying ResponseWriter if available
|
||||
if statusWriter, ok := w.ResponseWriter.(interface{ Status() int }); ok {
|
||||
finalStatusCode = statusWriter.Status()
|
||||
} else {
|
||||
finalStatusCode = 200 // Default
|
||||
}
|
||||
w.streamWriter = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure we have the latest headers before finalizing
|
||||
return w.logRequest(finalStatusCode, w.cloneHeaders(), w.body.Bytes(), w.extractAPIRequest(c), w.extractAPIResponse(c), w.extractAPIResponseTimestamp(c), slicesAPIResponseError, forceLog)
|
||||
}
|
||||
|
||||
func (w *ResponseWriterWrapper) cloneHeaders() map[string][]string {
|
||||
w.ensureHeadersCaptured()
|
||||
|
||||
// Use the captured headers as the final headers
|
||||
finalHeaders := make(map[string][]string)
|
||||
finalHeaders := make(map[string][]string, len(w.headers))
|
||||
for key, values := range w.headers {
|
||||
// Make a copy of the values slice to avoid reference issues
|
||||
headerValues := make([]string, len(values))
|
||||
copy(headerValues, values)
|
||||
finalHeaders[key] = headerValues
|
||||
}
|
||||
|
||||
var apiRequestBody []byte
|
||||
return finalHeaders
|
||||
}
|
||||
|
||||
func (w *ResponseWriterWrapper) extractAPIRequest(c *gin.Context) []byte {
|
||||
apiRequest, isExist := c.Get("API_REQUEST")
|
||||
if isExist {
|
||||
var ok bool
|
||||
apiRequestBody, ok = apiRequest.([]byte)
|
||||
if !ok {
|
||||
apiRequestBody = nil
|
||||
if !isExist {
|
||||
return nil
|
||||
}
|
||||
data, ok := apiRequest.([]byte)
|
||||
if !ok || len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
var apiResponseBody []byte
|
||||
func (w *ResponseWriterWrapper) extractAPIResponse(c *gin.Context) []byte {
|
||||
apiResponse, isExist := c.Get("API_RESPONSE")
|
||||
if isExist {
|
||||
var ok bool
|
||||
apiResponseBody, ok = apiResponse.([]byte)
|
||||
if !ok {
|
||||
apiResponseBody = nil
|
||||
if !isExist {
|
||||
return nil
|
||||
}
|
||||
data, ok := apiResponse.([]byte)
|
||||
if !ok || len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
var slicesAPIResponseError []*interfaces.ErrorMessage
|
||||
apiResponseError, isExist := c.Get("API_RESPONSE_ERROR")
|
||||
if isExist {
|
||||
var ok bool
|
||||
slicesAPIResponseError, ok = apiResponseError.([]*interfaces.ErrorMessage)
|
||||
if !ok {
|
||||
slicesAPIResponseError = nil
|
||||
func (w *ResponseWriterWrapper) extractAPIResponseTimestamp(c *gin.Context) time.Time {
|
||||
ts, isExist := c.Get("API_RESPONSE_TIMESTAMP")
|
||||
if !isExist {
|
||||
return time.Time{}
|
||||
}
|
||||
if t, ok := ts.(time.Time); ok {
|
||||
return t
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (w *ResponseWriterWrapper) logRequest(statusCode int, headers map[string][]string, body []byte, apiRequestBody, apiResponseBody []byte, apiResponseTimestamp time.Time, apiResponseErrors []*interfaces.ErrorMessage, forceLog bool) error {
|
||||
if w.requestInfo == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var requestBody []byte
|
||||
if len(w.requestInfo.Body) > 0 {
|
||||
requestBody = w.requestInfo.Body
|
||||
}
|
||||
|
||||
if loggerWithOptions, ok := w.logger.(interface {
|
||||
LogRequestWithOptions(string, string, map[string][]string, []byte, int, map[string][]string, []byte, []byte, []byte, []*interfaces.ErrorMessage, bool, string, time.Time, time.Time) error
|
||||
}); ok {
|
||||
return loggerWithOptions.LogRequestWithOptions(
|
||||
w.requestInfo.URL,
|
||||
w.requestInfo.Method,
|
||||
w.requestInfo.Headers,
|
||||
requestBody,
|
||||
statusCode,
|
||||
headers,
|
||||
body,
|
||||
apiRequestBody,
|
||||
apiResponseBody,
|
||||
apiResponseErrors,
|
||||
forceLog,
|
||||
w.requestInfo.RequestID,
|
||||
w.requestInfo.Timestamp,
|
||||
apiResponseTimestamp,
|
||||
)
|
||||
}
|
||||
|
||||
// Log complete non-streaming response
|
||||
return w.logger.LogRequest(
|
||||
w.requestInfo.URL,
|
||||
w.requestInfo.Method,
|
||||
w.requestInfo.Headers,
|
||||
w.requestInfo.Body,
|
||||
finalStatusCode,
|
||||
finalHeaders,
|
||||
w.body.Bytes(),
|
||||
requestBody,
|
||||
statusCode,
|
||||
headers,
|
||||
body,
|
||||
apiRequestBody,
|
||||
apiResponseBody,
|
||||
slicesAPIResponseError,
|
||||
apiResponseErrors,
|
||||
w.requestInfo.RequestID,
|
||||
w.requestInfo.Timestamp,
|
||||
apiResponseTimestamp,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status returns the HTTP response status code captured by the wrapper.
|
||||
// It defaults to 200 if WriteHeader has not been called.
|
||||
func (w *ResponseWriterWrapper) Status() int {
|
||||
if w.statusCode == 0 {
|
||||
return 200 // Default status code
|
||||
}
|
||||
return w.statusCode
|
||||
}
|
||||
|
||||
// Size returns the size of the response body in bytes for non-streaming responses.
|
||||
// For streaming responses, it returns -1, as the total size is unknown.
|
||||
func (w *ResponseWriterWrapper) Size() int {
|
||||
if w.isStreaming {
|
||||
return -1 // Unknown size for streaming responses
|
||||
}
|
||||
return w.body.Len()
|
||||
}
|
||||
|
||||
// Written returns true if the response header has been written (i.e., a status code has been set).
|
||||
func (w *ResponseWriterWrapper) Written() bool {
|
||||
return w.statusCode != 0
|
||||
}
|
||||
|
||||
435
internal/api/modules/amp/amp.go
Normal file
435
internal/api/modules/amp/amp.go
Normal file
@@ -0,0 +1,435 @@
|
||||
// Package amp implements the Amp CLI routing module, providing OAuth-based
|
||||
// integration with Amp CLI for ChatGPT and Anthropic subscriptions.
|
||||
package amp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/modules"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Option configures the AmpModule.
|
||||
type Option func(*AmpModule)
|
||||
|
||||
// AmpModule implements the RouteModuleV2 interface for Amp CLI integration.
|
||||
// It provides:
|
||||
// - Reverse proxy to Amp control plane for OAuth/management
|
||||
// - Provider-specific route aliases (/api/provider/{provider}/...)
|
||||
// - Automatic gzip decompression for misconfigured upstreams
|
||||
// - Model mapping for routing unavailable models to alternatives
|
||||
type AmpModule struct {
|
||||
secretSource SecretSource
|
||||
proxy *httputil.ReverseProxy
|
||||
proxyMu sync.RWMutex // protects proxy for hot-reload
|
||||
accessManager *sdkaccess.Manager
|
||||
authMiddleware_ gin.HandlerFunc
|
||||
modelMapper *DefaultModelMapper
|
||||
enabled bool
|
||||
registerOnce sync.Once
|
||||
|
||||
// restrictToLocalhost controls localhost-only access for management routes (hot-reloadable)
|
||||
restrictToLocalhost bool
|
||||
restrictMu sync.RWMutex
|
||||
|
||||
// configMu protects lastConfig for partial reload comparison
|
||||
configMu sync.RWMutex
|
||||
lastConfig *config.AmpCode
|
||||
}
|
||||
|
||||
// New creates a new Amp routing module with the given options.
|
||||
// This is the preferred constructor using the Option pattern.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// ampModule := amp.New(
|
||||
// amp.WithAccessManager(accessManager),
|
||||
// amp.WithAuthMiddleware(authMiddleware),
|
||||
// amp.WithSecretSource(customSecret),
|
||||
// )
|
||||
func New(opts ...Option) *AmpModule {
|
||||
m := &AmpModule{
|
||||
secretSource: nil, // Will be created on demand if not provided
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(m)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// NewLegacy creates a new Amp routing module using the legacy constructor signature.
|
||||
// This is provided for backwards compatibility.
|
||||
//
|
||||
// DEPRECATED: Use New with options instead.
|
||||
func NewLegacy(accessManager *sdkaccess.Manager, authMiddleware gin.HandlerFunc) *AmpModule {
|
||||
return New(
|
||||
WithAccessManager(accessManager),
|
||||
WithAuthMiddleware(authMiddleware),
|
||||
)
|
||||
}
|
||||
|
||||
// WithSecretSource sets a custom secret source for the module.
|
||||
func WithSecretSource(source SecretSource) Option {
|
||||
return func(m *AmpModule) {
|
||||
m.secretSource = source
|
||||
}
|
||||
}
|
||||
|
||||
// WithAccessManager sets the access manager for the module.
|
||||
func WithAccessManager(am *sdkaccess.Manager) Option {
|
||||
return func(m *AmpModule) {
|
||||
m.accessManager = am
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthMiddleware sets the authentication middleware for provider routes.
|
||||
func WithAuthMiddleware(middleware gin.HandlerFunc) Option {
|
||||
return func(m *AmpModule) {
|
||||
m.authMiddleware_ = middleware
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the module identifier
|
||||
func (m *AmpModule) Name() string {
|
||||
return "amp-routing"
|
||||
}
|
||||
|
||||
// forceModelMappings returns whether model mappings should take precedence over local API keys
|
||||
func (m *AmpModule) forceModelMappings() bool {
|
||||
m.configMu.RLock()
|
||||
defer m.configMu.RUnlock()
|
||||
if m.lastConfig == nil {
|
||||
return false
|
||||
}
|
||||
return m.lastConfig.ForceModelMappings
|
||||
}
|
||||
|
||||
// Register sets up Amp routes if configured.
|
||||
// This implements the RouteModuleV2 interface with Context.
|
||||
// Routes are registered only once via sync.Once for idempotent behavior.
|
||||
func (m *AmpModule) Register(ctx modules.Context) error {
|
||||
settings := ctx.Config.AmpCode
|
||||
upstreamURL := strings.TrimSpace(settings.UpstreamURL)
|
||||
|
||||
// Determine auth middleware (from module or context)
|
||||
auth := m.getAuthMiddleware(ctx)
|
||||
|
||||
// Use registerOnce to ensure routes are only registered once
|
||||
var regErr error
|
||||
m.registerOnce.Do(func() {
|
||||
// Initialize model mapper from config (for routing unavailable models to alternatives)
|
||||
m.modelMapper = NewModelMapper(settings.ModelMappings)
|
||||
// Load oauth-model-alias for provider lookup via aliases
|
||||
m.modelMapper.UpdateOAuthModelAlias(ctx.Config.OAuthModelAlias)
|
||||
|
||||
// Store initial config for partial reload comparison
|
||||
settingsCopy := settings
|
||||
m.lastConfig = &settingsCopy
|
||||
|
||||
// Initialize localhost restriction setting (hot-reloadable)
|
||||
m.setRestrictToLocalhost(settings.RestrictManagementToLocalhost)
|
||||
|
||||
// Always register provider aliases - these work without an upstream
|
||||
m.registerProviderAliases(ctx.Engine, ctx.BaseHandler, auth)
|
||||
|
||||
// Register management proxy routes once; middleware will gate access when upstream is unavailable.
|
||||
// Pass auth middleware to require valid API key for all management routes.
|
||||
m.registerManagementRoutes(ctx.Engine, ctx.BaseHandler, auth)
|
||||
|
||||
// If no upstream URL, skip proxy routes but provider aliases are still available
|
||||
if upstreamURL == "" {
|
||||
log.Debug("amp upstream proxy disabled (no upstream URL configured)")
|
||||
log.Debug("amp provider alias routes registered")
|
||||
m.enabled = false
|
||||
return
|
||||
}
|
||||
|
||||
if err := m.enableUpstreamProxy(upstreamURL, &settings); err != nil {
|
||||
regErr = fmt.Errorf("failed to create amp proxy: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("amp provider alias routes registered")
|
||||
})
|
||||
|
||||
return regErr
|
||||
}
|
||||
|
||||
// getAuthMiddleware returns the authentication middleware, preferring the
|
||||
// module's configured middleware, then the context middleware, then a fallback.
|
||||
func (m *AmpModule) getAuthMiddleware(ctx modules.Context) gin.HandlerFunc {
|
||||
if m.authMiddleware_ != nil {
|
||||
return m.authMiddleware_
|
||||
}
|
||||
if ctx.AuthMiddleware != nil {
|
||||
return ctx.AuthMiddleware
|
||||
}
|
||||
// Fallback: no authentication (should not happen in production)
|
||||
log.Warn("amp module: no auth middleware provided, allowing all requests")
|
||||
return func(c *gin.Context) {
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// OnConfigUpdated handles configuration updates with partial reload support.
|
||||
// Only updates components that have actually changed to avoid unnecessary work.
|
||||
// Supports hot-reload for: model-mappings, upstream-api-key, upstream-url, restrict-management-to-localhost.
|
||||
func (m *AmpModule) OnConfigUpdated(cfg *config.Config) error {
|
||||
newSettings := cfg.AmpCode
|
||||
|
||||
// Get previous config for comparison
|
||||
m.configMu.RLock()
|
||||
oldSettings := m.lastConfig
|
||||
m.configMu.RUnlock()
|
||||
|
||||
if oldSettings != nil && oldSettings.RestrictManagementToLocalhost != newSettings.RestrictManagementToLocalhost {
|
||||
m.setRestrictToLocalhost(newSettings.RestrictManagementToLocalhost)
|
||||
}
|
||||
|
||||
newUpstreamURL := strings.TrimSpace(newSettings.UpstreamURL)
|
||||
oldUpstreamURL := ""
|
||||
if oldSettings != nil {
|
||||
oldUpstreamURL = strings.TrimSpace(oldSettings.UpstreamURL)
|
||||
}
|
||||
|
||||
if !m.enabled && newUpstreamURL != "" {
|
||||
if err := m.enableUpstreamProxy(newUpstreamURL, &newSettings); err != nil {
|
||||
log.Errorf("amp config: failed to enable upstream proxy for %s: %v", newUpstreamURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check model mappings change
|
||||
modelMappingsChanged := m.hasModelMappingsChanged(oldSettings, &newSettings)
|
||||
if modelMappingsChanged {
|
||||
if m.modelMapper != nil {
|
||||
m.modelMapper.UpdateMappings(newSettings.ModelMappings)
|
||||
} else if m.enabled {
|
||||
log.Warnf("amp model mapper not initialized, skipping model mapping update")
|
||||
}
|
||||
}
|
||||
|
||||
// Always update oauth-model-alias for model mapper (used for provider lookup)
|
||||
if m.modelMapper != nil {
|
||||
m.modelMapper.UpdateOAuthModelAlias(cfg.OAuthModelAlias)
|
||||
}
|
||||
|
||||
if m.enabled {
|
||||
// Check upstream URL change - now supports hot-reload
|
||||
if newUpstreamURL == "" && oldUpstreamURL != "" {
|
||||
m.setProxy(nil)
|
||||
m.enabled = false
|
||||
} else if oldUpstreamURL != "" && newUpstreamURL != oldUpstreamURL && newUpstreamURL != "" {
|
||||
// Recreate proxy with new URL
|
||||
proxy, err := createReverseProxy(newUpstreamURL, m.secretSource)
|
||||
if err != nil {
|
||||
log.Errorf("amp config: failed to create proxy for new upstream URL %s: %v", newUpstreamURL, err)
|
||||
} else {
|
||||
m.setProxy(proxy)
|
||||
}
|
||||
}
|
||||
|
||||
// Check API key change (both default and per-client mappings)
|
||||
apiKeyChanged := m.hasAPIKeyChanged(oldSettings, &newSettings)
|
||||
upstreamAPIKeysChanged := m.hasUpstreamAPIKeysChanged(oldSettings, &newSettings)
|
||||
if apiKeyChanged || upstreamAPIKeysChanged {
|
||||
if m.secretSource != nil {
|
||||
if ms, ok := m.secretSource.(*MappedSecretSource); ok {
|
||||
if apiKeyChanged {
|
||||
ms.UpdateDefaultExplicitKey(newSettings.UpstreamAPIKey)
|
||||
ms.InvalidateCache()
|
||||
}
|
||||
if upstreamAPIKeysChanged {
|
||||
ms.UpdateMappings(newSettings.UpstreamAPIKeys)
|
||||
}
|
||||
} else if ms, ok := m.secretSource.(*MultiSourceSecret); ok {
|
||||
ms.UpdateExplicitKey(newSettings.UpstreamAPIKey)
|
||||
ms.InvalidateCache()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Store current config for next comparison
|
||||
m.configMu.Lock()
|
||||
settingsCopy := newSettings // copy struct
|
||||
m.lastConfig = &settingsCopy
|
||||
m.configMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AmpModule) enableUpstreamProxy(upstreamURL string, settings *config.AmpCode) error {
|
||||
if m.secretSource == nil {
|
||||
// Create MultiSourceSecret as the default source, then wrap with MappedSecretSource
|
||||
defaultSource := NewMultiSourceSecret(settings.UpstreamAPIKey, 0 /* default 5min */)
|
||||
mappedSource := NewMappedSecretSource(defaultSource)
|
||||
mappedSource.UpdateMappings(settings.UpstreamAPIKeys)
|
||||
m.secretSource = mappedSource
|
||||
} else if ms, ok := m.secretSource.(*MappedSecretSource); ok {
|
||||
ms.UpdateDefaultExplicitKey(settings.UpstreamAPIKey)
|
||||
ms.InvalidateCache()
|
||||
ms.UpdateMappings(settings.UpstreamAPIKeys)
|
||||
} else if ms, ok := m.secretSource.(*MultiSourceSecret); ok {
|
||||
// Legacy path: wrap existing MultiSourceSecret with MappedSecretSource
|
||||
ms.UpdateExplicitKey(settings.UpstreamAPIKey)
|
||||
ms.InvalidateCache()
|
||||
mappedSource := NewMappedSecretSource(ms)
|
||||
mappedSource.UpdateMappings(settings.UpstreamAPIKeys)
|
||||
m.secretSource = mappedSource
|
||||
}
|
||||
|
||||
proxy, err := createReverseProxy(upstreamURL, m.secretSource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.setProxy(proxy)
|
||||
m.enabled = true
|
||||
|
||||
log.Infof("amp upstream proxy enabled for: %s", upstreamURL)
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasModelMappingsChanged compares old and new model mappings.
|
||||
func (m *AmpModule) hasModelMappingsChanged(old *config.AmpCode, new *config.AmpCode) bool {
|
||||
if old == nil {
|
||||
return len(new.ModelMappings) > 0
|
||||
}
|
||||
|
||||
if len(old.ModelMappings) != len(new.ModelMappings) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Build map for efficient and robust comparison
|
||||
type mappingInfo struct {
|
||||
to string
|
||||
regex bool
|
||||
}
|
||||
oldMap := make(map[string]mappingInfo, len(old.ModelMappings))
|
||||
for _, mapping := range old.ModelMappings {
|
||||
oldMap[strings.TrimSpace(mapping.From)] = mappingInfo{
|
||||
to: strings.TrimSpace(mapping.To),
|
||||
regex: mapping.Regex,
|
||||
}
|
||||
}
|
||||
|
||||
for _, mapping := range new.ModelMappings {
|
||||
from := strings.TrimSpace(mapping.From)
|
||||
to := strings.TrimSpace(mapping.To)
|
||||
if oldVal, exists := oldMap[from]; !exists || oldVal.to != to || oldVal.regex != mapping.Regex {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// hasAPIKeyChanged compares old and new API keys.
|
||||
func (m *AmpModule) hasAPIKeyChanged(old *config.AmpCode, new *config.AmpCode) bool {
|
||||
oldKey := ""
|
||||
if old != nil {
|
||||
oldKey = strings.TrimSpace(old.UpstreamAPIKey)
|
||||
}
|
||||
newKey := strings.TrimSpace(new.UpstreamAPIKey)
|
||||
return oldKey != newKey
|
||||
}
|
||||
|
||||
// hasUpstreamAPIKeysChanged compares old and new per-client upstream API key mappings.
|
||||
func (m *AmpModule) hasUpstreamAPIKeysChanged(old *config.AmpCode, new *config.AmpCode) bool {
|
||||
if old == nil {
|
||||
return len(new.UpstreamAPIKeys) > 0
|
||||
}
|
||||
|
||||
if len(old.UpstreamAPIKeys) != len(new.UpstreamAPIKeys) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Build map for comparison: upstreamKey -> set of clientKeys
|
||||
type entryInfo struct {
|
||||
upstreamKey string
|
||||
clientKeys map[string]struct{}
|
||||
}
|
||||
oldEntries := make([]entryInfo, len(old.UpstreamAPIKeys))
|
||||
for i, entry := range old.UpstreamAPIKeys {
|
||||
clientKeys := make(map[string]struct{}, len(entry.APIKeys))
|
||||
for _, k := range entry.APIKeys {
|
||||
trimmed := strings.TrimSpace(k)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
clientKeys[trimmed] = struct{}{}
|
||||
}
|
||||
oldEntries[i] = entryInfo{
|
||||
upstreamKey: strings.TrimSpace(entry.UpstreamAPIKey),
|
||||
clientKeys: clientKeys,
|
||||
}
|
||||
}
|
||||
|
||||
for i, newEntry := range new.UpstreamAPIKeys {
|
||||
if i >= len(oldEntries) {
|
||||
return true
|
||||
}
|
||||
oldE := oldEntries[i]
|
||||
if strings.TrimSpace(newEntry.UpstreamAPIKey) != oldE.upstreamKey {
|
||||
return true
|
||||
}
|
||||
newKeys := make(map[string]struct{}, len(newEntry.APIKeys))
|
||||
for _, k := range newEntry.APIKeys {
|
||||
trimmed := strings.TrimSpace(k)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
newKeys[trimmed] = struct{}{}
|
||||
}
|
||||
if len(newKeys) != len(oldE.clientKeys) {
|
||||
return true
|
||||
}
|
||||
for k := range newKeys {
|
||||
if _, ok := oldE.clientKeys[k]; !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetModelMapper returns the model mapper instance (for testing/debugging).
|
||||
func (m *AmpModule) GetModelMapper() *DefaultModelMapper {
|
||||
return m.modelMapper
|
||||
}
|
||||
|
||||
// getProxy returns the current proxy instance (thread-safe for hot-reload).
|
||||
func (m *AmpModule) getProxy() *httputil.ReverseProxy {
|
||||
m.proxyMu.RLock()
|
||||
defer m.proxyMu.RUnlock()
|
||||
return m.proxy
|
||||
}
|
||||
|
||||
// setProxy updates the proxy instance (thread-safe for hot-reload).
|
||||
func (m *AmpModule) setProxy(proxy *httputil.ReverseProxy) {
|
||||
m.proxyMu.Lock()
|
||||
defer m.proxyMu.Unlock()
|
||||
m.proxy = proxy
|
||||
}
|
||||
|
||||
// IsRestrictedToLocalhost returns whether management routes are restricted to localhost.
|
||||
func (m *AmpModule) IsRestrictedToLocalhost() bool {
|
||||
m.restrictMu.RLock()
|
||||
defer m.restrictMu.RUnlock()
|
||||
return m.restrictToLocalhost
|
||||
}
|
||||
|
||||
// setRestrictToLocalhost updates the localhost restriction setting.
|
||||
func (m *AmpModule) setRestrictToLocalhost(restrict bool) {
|
||||
m.restrictMu.Lock()
|
||||
defer m.restrictMu.Unlock()
|
||||
m.restrictToLocalhost = restrict
|
||||
}
|
||||
352
internal/api/modules/amp/amp_test.go
Normal file
352
internal/api/modules/amp/amp_test.go
Normal file
@@ -0,0 +1,352 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/modules"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
)
|
||||
|
||||
func TestAmpModule_Name(t *testing.T) {
|
||||
m := New()
|
||||
if m.Name() != "amp-routing" {
|
||||
t.Fatalf("want amp-routing, got %s", m.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_New(t *testing.T) {
|
||||
accessManager := sdkaccess.NewManager()
|
||||
authMiddleware := func(c *gin.Context) { c.Next() }
|
||||
|
||||
m := NewLegacy(accessManager, authMiddleware)
|
||||
|
||||
if m.accessManager != accessManager {
|
||||
t.Fatal("accessManager not set")
|
||||
}
|
||||
if m.authMiddleware_ == nil {
|
||||
t.Fatal("authMiddleware not set")
|
||||
}
|
||||
if m.enabled {
|
||||
t.Fatal("enabled should be false initially")
|
||||
}
|
||||
if m.proxy != nil {
|
||||
t.Fatal("proxy should be nil initially")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_Register_WithUpstream(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
// Fake upstream to ensure URL is valid
|
||||
upstream := httptest.NewServer(nil)
|
||||
defer upstream.Close()
|
||||
|
||||
accessManager := sdkaccess.NewManager()
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
m := NewLegacy(accessManager, func(c *gin.Context) { c.Next() })
|
||||
|
||||
cfg := &config.Config{
|
||||
AmpCode: config.AmpCode{
|
||||
UpstreamURL: upstream.URL,
|
||||
UpstreamAPIKey: "test-key",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := modules.Context{Engine: r, BaseHandler: base, Config: cfg, AuthMiddleware: func(c *gin.Context) { c.Next() }}
|
||||
if err := m.Register(ctx); err != nil {
|
||||
t.Fatalf("register error: %v", err)
|
||||
}
|
||||
|
||||
if !m.enabled {
|
||||
t.Fatal("module should be enabled with upstream URL")
|
||||
}
|
||||
if m.proxy == nil {
|
||||
t.Fatal("proxy should be initialized")
|
||||
}
|
||||
if m.secretSource == nil {
|
||||
t.Fatal("secretSource should be initialized")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_Register_WithoutUpstream(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
accessManager := sdkaccess.NewManager()
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
m := NewLegacy(accessManager, func(c *gin.Context) { c.Next() })
|
||||
|
||||
cfg := &config.Config{
|
||||
AmpCode: config.AmpCode{
|
||||
UpstreamURL: "", // No upstream
|
||||
},
|
||||
}
|
||||
|
||||
ctx := modules.Context{Engine: r, BaseHandler: base, Config: cfg, AuthMiddleware: func(c *gin.Context) { c.Next() }}
|
||||
if err := m.Register(ctx); err != nil {
|
||||
t.Fatalf("register should not error without upstream: %v", err)
|
||||
}
|
||||
|
||||
if m.enabled {
|
||||
t.Fatal("module should be disabled without upstream URL")
|
||||
}
|
||||
if m.proxy != nil {
|
||||
t.Fatal("proxy should not be initialized without upstream")
|
||||
}
|
||||
|
||||
// But provider aliases should still be registered
|
||||
req := httptest.NewRequest("GET", "/api/provider/openai/models", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code == 404 {
|
||||
t.Fatal("provider aliases should be registered even without upstream")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_Register_InvalidUpstream(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
accessManager := sdkaccess.NewManager()
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
m := NewLegacy(accessManager, func(c *gin.Context) { c.Next() })
|
||||
|
||||
cfg := &config.Config{
|
||||
AmpCode: config.AmpCode{
|
||||
UpstreamURL: "://invalid-url",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := modules.Context{Engine: r, BaseHandler: base, Config: cfg, AuthMiddleware: func(c *gin.Context) { c.Next() }}
|
||||
if err := m.Register(ctx); err == nil {
|
||||
t.Fatal("expected error for invalid upstream URL")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_OnConfigUpdated_CacheInvalidation(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
p := filepath.Join(tmpDir, "secrets.json")
|
||||
if err := os.WriteFile(p, []byte(`{"apiKey@https://ampcode.com/":"v1"}`), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m := &AmpModule{enabled: true}
|
||||
ms := NewMultiSourceSecretWithPath("", p, time.Minute)
|
||||
m.secretSource = ms
|
||||
m.lastConfig = &config.AmpCode{
|
||||
UpstreamAPIKey: "old-key",
|
||||
}
|
||||
|
||||
// Warm the cache
|
||||
if _, err := ms.Get(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if ms.cache == nil {
|
||||
t.Fatal("expected cache to be set")
|
||||
}
|
||||
|
||||
// Update config - should invalidate cache
|
||||
if err := m.OnConfigUpdated(&config.Config{AmpCode: config.AmpCode{UpstreamURL: "http://x", UpstreamAPIKey: "new-key"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if ms.cache != nil {
|
||||
t.Fatal("expected cache to be invalidated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_OnConfigUpdated_NotEnabled(t *testing.T) {
|
||||
m := &AmpModule{enabled: false}
|
||||
|
||||
// Should not error or panic when disabled
|
||||
if err := m.OnConfigUpdated(&config.Config{}); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_OnConfigUpdated_URLRemoved(t *testing.T) {
|
||||
m := &AmpModule{enabled: true}
|
||||
ms := NewMultiSourceSecret("", 0)
|
||||
m.secretSource = ms
|
||||
|
||||
// Config update with empty URL - should log warning but not error
|
||||
cfg := &config.Config{AmpCode: config.AmpCode{UpstreamURL: ""}}
|
||||
|
||||
if err := m.OnConfigUpdated(cfg); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_OnConfigUpdated_NonMultiSourceSecret(t *testing.T) {
|
||||
// Test that OnConfigUpdated doesn't panic with StaticSecretSource
|
||||
m := &AmpModule{enabled: true}
|
||||
m.secretSource = NewStaticSecretSource("static-key")
|
||||
|
||||
cfg := &config.Config{AmpCode: config.AmpCode{UpstreamURL: "http://example.com"}}
|
||||
|
||||
// Should not error or panic
|
||||
if err := m.OnConfigUpdated(cfg); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_AuthMiddleware_Fallback(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
// Create module with no auth middleware
|
||||
m := &AmpModule{authMiddleware_: nil}
|
||||
|
||||
// Get the fallback middleware via getAuthMiddleware
|
||||
ctx := modules.Context{Engine: r, AuthMiddleware: nil}
|
||||
middleware := m.getAuthMiddleware(ctx)
|
||||
|
||||
if middleware == nil {
|
||||
t.Fatal("getAuthMiddleware should return a fallback, not nil")
|
||||
}
|
||||
|
||||
// Test that it works
|
||||
called := false
|
||||
r.GET("/test", middleware, func(c *gin.Context) {
|
||||
called = true
|
||||
c.String(200, "ok")
|
||||
})
|
||||
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if !called {
|
||||
t.Fatal("fallback middleware should allow requests through")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_SecretSource_FromConfig(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
upstream := httptest.NewServer(nil)
|
||||
defer upstream.Close()
|
||||
|
||||
accessManager := sdkaccess.NewManager()
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
m := NewLegacy(accessManager, func(c *gin.Context) { c.Next() })
|
||||
|
||||
// Config with explicit API key
|
||||
cfg := &config.Config{
|
||||
AmpCode: config.AmpCode{
|
||||
UpstreamURL: upstream.URL,
|
||||
UpstreamAPIKey: "config-key",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := modules.Context{Engine: r, BaseHandler: base, Config: cfg, AuthMiddleware: func(c *gin.Context) { c.Next() }}
|
||||
if err := m.Register(ctx); err != nil {
|
||||
t.Fatalf("register error: %v", err)
|
||||
}
|
||||
|
||||
// Secret source should be MultiSourceSecret with config key
|
||||
if m.secretSource == nil {
|
||||
t.Fatal("secretSource should be set")
|
||||
}
|
||||
|
||||
// Verify it returns the config key
|
||||
key, err := m.secretSource.Get(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("Get error: %v", err)
|
||||
}
|
||||
if key != "config-key" {
|
||||
t.Fatalf("want config-key, got %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_ProviderAliasesAlwaysRegistered(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
scenarios := []struct {
|
||||
name string
|
||||
configURL string
|
||||
}{
|
||||
{"with_upstream", "http://example.com"},
|
||||
{"without_upstream", ""},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.name, func(t *testing.T) {
|
||||
r := gin.New()
|
||||
accessManager := sdkaccess.NewManager()
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
m := NewLegacy(accessManager, func(c *gin.Context) { c.Next() })
|
||||
|
||||
cfg := &config.Config{AmpCode: config.AmpCode{UpstreamURL: scenario.configURL}}
|
||||
|
||||
ctx := modules.Context{Engine: r, BaseHandler: base, Config: cfg, AuthMiddleware: func(c *gin.Context) { c.Next() }}
|
||||
if err := m.Register(ctx); err != nil && scenario.configURL != "" {
|
||||
t.Fatalf("register error: %v", err)
|
||||
}
|
||||
|
||||
// Provider aliases should always be available
|
||||
req := httptest.NewRequest("GET", "/api/provider/openai/models", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code == 404 {
|
||||
t.Fatal("provider aliases should be registered")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_hasUpstreamAPIKeysChanged_DetectsRemovedKeyWithDuplicateInput(t *testing.T) {
|
||||
m := &AmpModule{}
|
||||
|
||||
oldCfg := &config.AmpCode{
|
||||
UpstreamAPIKeys: []config.AmpUpstreamAPIKeyEntry{
|
||||
{UpstreamAPIKey: "u1", APIKeys: []string{"k1", "k2"}},
|
||||
},
|
||||
}
|
||||
newCfg := &config.AmpCode{
|
||||
UpstreamAPIKeys: []config.AmpUpstreamAPIKeyEntry{
|
||||
{UpstreamAPIKey: "u1", APIKeys: []string{"k1", "k1"}},
|
||||
},
|
||||
}
|
||||
|
||||
if !m.hasUpstreamAPIKeysChanged(oldCfg, newCfg) {
|
||||
t.Fatal("expected change to be detected when k2 is removed but new list contains duplicates")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAmpModule_hasUpstreamAPIKeysChanged_IgnoresEmptyAndWhitespaceKeys(t *testing.T) {
|
||||
m := &AmpModule{}
|
||||
|
||||
oldCfg := &config.AmpCode{
|
||||
UpstreamAPIKeys: []config.AmpUpstreamAPIKeyEntry{
|
||||
{UpstreamAPIKey: "u1", APIKeys: []string{"k1", "k2"}},
|
||||
},
|
||||
}
|
||||
newCfg := &config.AmpCode{
|
||||
UpstreamAPIKeys: []config.AmpUpstreamAPIKeyEntry{
|
||||
{UpstreamAPIKey: "u1", APIKeys: []string{" k1 ", "", "k2", " "}},
|
||||
},
|
||||
}
|
||||
|
||||
if m.hasUpstreamAPIKeysChanged(oldCfg, newCfg) {
|
||||
t.Fatal("expected no change when only whitespace/empty entries differ")
|
||||
}
|
||||
}
|
||||
382
internal/api/modules/amp/fallback_handlers.go
Normal file
382
internal/api/modules/amp/fallback_handlers.go
Normal file
@@ -0,0 +1,382 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/routing/ctxkeys"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
|
||||
// AmpRouteType represents the type of routing decision made for an Amp request
|
||||
type AmpRouteType string
|
||||
|
||||
const (
|
||||
// RouteTypeLocalProvider indicates the request is handled by a local OAuth provider (free)
|
||||
RouteTypeLocalProvider AmpRouteType = "LOCAL_PROVIDER"
|
||||
// RouteTypeModelMapping indicates the request was remapped to another available model (free)
|
||||
RouteTypeModelMapping AmpRouteType = "MODEL_MAPPING"
|
||||
// RouteTypeAmpCredits indicates the request is forwarded to ampcode.com (uses Amp credits)
|
||||
RouteTypeAmpCredits AmpRouteType = "AMP_CREDITS"
|
||||
// RouteTypeNoProvider indicates no provider or fallback available
|
||||
RouteTypeNoProvider AmpRouteType = "NO_PROVIDER"
|
||||
)
|
||||
|
||||
// MappedModelContextKey is the Gin context key for passing mapped model names.
|
||||
// Deprecated: Use ctxkeys.MappedModel instead.
|
||||
const MappedModelContextKey = string(ctxkeys.MappedModel)
|
||||
|
||||
// FallbackModelsContextKey is the Gin context key for passing fallback model names.
|
||||
// When the primary mapped model fails (e.g., quota exceeded), these models can be tried.
|
||||
// Deprecated: Use ctxkeys.FallbackModels instead.
|
||||
const FallbackModelsContextKey = string(ctxkeys.FallbackModels)
|
||||
|
||||
// logAmpRouting logs the routing decision for an Amp request with structured fields
|
||||
func logAmpRouting(routeType AmpRouteType, requestedModel, resolvedModel, provider, path string) {
|
||||
fields := log.Fields{
|
||||
"component": "amp-routing",
|
||||
"route_type": string(routeType),
|
||||
"requested_model": requestedModel,
|
||||
"path": path,
|
||||
"timestamp": time.Now().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
if resolvedModel != "" && resolvedModel != requestedModel {
|
||||
fields["resolved_model"] = resolvedModel
|
||||
}
|
||||
if provider != "" {
|
||||
fields["provider"] = provider
|
||||
}
|
||||
|
||||
switch routeType {
|
||||
case RouteTypeLocalProvider:
|
||||
fields["cost"] = "free"
|
||||
fields["source"] = "local_oauth"
|
||||
log.WithFields(fields).Debugf("amp using local provider for model: %s", requestedModel)
|
||||
|
||||
case RouteTypeModelMapping:
|
||||
fields["cost"] = "free"
|
||||
fields["source"] = "local_oauth"
|
||||
fields["mapping"] = requestedModel + " -> " + resolvedModel
|
||||
// model mapping already logged in mapper; avoid duplicate here
|
||||
|
||||
case RouteTypeAmpCredits:
|
||||
fields["cost"] = "amp_credits"
|
||||
fields["source"] = "ampcode.com"
|
||||
fields["model_id"] = requestedModel // Explicit model_id for easy config reference
|
||||
log.WithFields(fields).Warnf("forwarding to ampcode.com (uses amp credits) - model_id: %s | To use local provider, add to config: ampcode.model-mappings: [{from: \"%s\", to: \"<your-local-model>\"}]", requestedModel, requestedModel)
|
||||
|
||||
case RouteTypeNoProvider:
|
||||
fields["cost"] = "none"
|
||||
fields["source"] = "error"
|
||||
fields["model_id"] = requestedModel // Explicit model_id for easy config reference
|
||||
log.WithFields(fields).Warnf("no provider available for model_id: %s", requestedModel)
|
||||
}
|
||||
}
|
||||
|
||||
// FallbackHandler wraps a standard handler with fallback logic to ampcode.com
|
||||
// when the model's provider is not available in CLIProxyAPI
|
||||
//
|
||||
// Deprecated: FallbackHandler is deprecated in favor of routing.ModelRoutingWrapper.
|
||||
// Use routing.NewModelRoutingWrapper() instead for unified routing logic.
|
||||
// This type is kept for backward compatibility and test purposes.
|
||||
type FallbackHandler struct {
|
||||
getProxy func() *httputil.ReverseProxy
|
||||
modelMapper ModelMapper
|
||||
forceModelMappings func() bool
|
||||
}
|
||||
|
||||
// NewFallbackHandler creates a new fallback handler wrapper
|
||||
// The getProxy function allows lazy evaluation of the proxy (useful when proxy is created after routes)
|
||||
//
|
||||
// Deprecated: Use routing.NewModelRoutingWrapper() instead.
|
||||
func NewFallbackHandler(getProxy func() *httputil.ReverseProxy) *FallbackHandler {
|
||||
return &FallbackHandler{
|
||||
getProxy: getProxy,
|
||||
forceModelMappings: func() bool { return false },
|
||||
}
|
||||
}
|
||||
|
||||
// NewFallbackHandlerWithMapper creates a new fallback handler with model mapping support
|
||||
//
|
||||
// Deprecated: Use routing.NewModelRoutingWrapper() instead.
|
||||
func NewFallbackHandlerWithMapper(getProxy func() *httputil.ReverseProxy, mapper ModelMapper, forceModelMappings func() bool) *FallbackHandler {
|
||||
if forceModelMappings == nil {
|
||||
forceModelMappings = func() bool { return false }
|
||||
}
|
||||
return &FallbackHandler{
|
||||
getProxy: getProxy,
|
||||
modelMapper: mapper,
|
||||
forceModelMappings: forceModelMappings,
|
||||
}
|
||||
}
|
||||
|
||||
// SetModelMapper sets the model mapper for this handler (allows late binding)
|
||||
func (fh *FallbackHandler) SetModelMapper(mapper ModelMapper) {
|
||||
fh.modelMapper = mapper
|
||||
}
|
||||
|
||||
// WrapHandler wraps a gin.HandlerFunc with fallback logic
|
||||
// If the model's provider is not configured in CLIProxyAPI, it forwards to ampcode.com
|
||||
func (fh *FallbackHandler) WrapHandler(handler gin.HandlerFunc) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Swallow ErrAbortHandler panics from ReverseProxy to avoid noisy stack traces.
|
||||
// ReverseProxy raises this panic when the client connection is closed prematurely
|
||||
// (e.g., user cancels request, network disconnect) or when ServeHTTP is called
|
||||
// with a ResponseWriter that doesn't implement http.CloseNotifier.
|
||||
// This is an expected error condition, not a bug, so we handle it gracefully.
|
||||
defer func() {
|
||||
if rec := recover(); rec != nil {
|
||||
if err, ok := rec.(error); ok && errors.Is(err, http.ErrAbortHandler) {
|
||||
return
|
||||
}
|
||||
panic(rec)
|
||||
}
|
||||
}()
|
||||
|
||||
requestPath := c.Request.URL.Path
|
||||
|
||||
// Read the request body to extract the model name
|
||||
bodyBytes, err := io.ReadAll(c.Request.Body)
|
||||
if err != nil {
|
||||
log.Errorf("amp fallback: failed to read request body: %v", err)
|
||||
handler(c)
|
||||
return
|
||||
}
|
||||
|
||||
// Restore the body for the handler to read
|
||||
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
|
||||
// Try to extract model from request body or URL path (for Gemini)
|
||||
modelName := extractModelFromRequest(bodyBytes, c)
|
||||
if modelName == "" {
|
||||
// Can't determine model, proceed with normal handler
|
||||
handler(c)
|
||||
return
|
||||
}
|
||||
|
||||
// Normalize model (handles dynamic thinking suffixes)
|
||||
suffixResult := thinking.ParseSuffix(modelName)
|
||||
normalizedModel := suffixResult.ModelName
|
||||
thinkingSuffix := ""
|
||||
if suffixResult.HasSuffix {
|
||||
thinkingSuffix = "(" + suffixResult.RawSuffix + ")"
|
||||
}
|
||||
|
||||
// resolveMappedModels returns all mapped models (primary + fallbacks) and providers for the first one.
|
||||
resolveMappedModels := func() ([]string, []string) {
|
||||
if fh.modelMapper == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
mapper, ok := fh.modelMapper.(*DefaultModelMapper)
|
||||
if !ok {
|
||||
// Fallback to single model for non-DefaultModelMapper
|
||||
mappedModel := fh.modelMapper.MapModel(modelName)
|
||||
if mappedModel == "" {
|
||||
mappedModel = fh.modelMapper.MapModel(normalizedModel)
|
||||
}
|
||||
if mappedModel == "" {
|
||||
return nil, nil
|
||||
}
|
||||
mappedBaseModel := thinking.ParseSuffix(mappedModel).ModelName
|
||||
mappedProviders := util.GetProviderName(mappedBaseModel)
|
||||
if len(mappedProviders) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return []string{mappedModel}, mappedProviders
|
||||
}
|
||||
|
||||
// Use MapModelWithFallbacks for DefaultModelMapper
|
||||
mappedModels := mapper.MapModelWithFallbacks(modelName)
|
||||
if len(mappedModels) == 0 {
|
||||
mappedModels = mapper.MapModelWithFallbacks(normalizedModel)
|
||||
}
|
||||
if len(mappedModels) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Apply thinking suffix if needed
|
||||
for i, model := range mappedModels {
|
||||
if thinkingSuffix != "" {
|
||||
suffixResult := thinking.ParseSuffix(model)
|
||||
if !suffixResult.HasSuffix {
|
||||
mappedModels[i] = model + thinkingSuffix
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get providers for the first model
|
||||
firstBaseModel := thinking.ParseSuffix(mappedModels[0]).ModelName
|
||||
providers := util.GetProviderName(firstBaseModel)
|
||||
if len(providers) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return mappedModels, providers
|
||||
}
|
||||
|
||||
// Track resolved model for logging (may change if mapping is applied)
|
||||
resolvedModel := normalizedModel
|
||||
usedMapping := false
|
||||
var providers []string
|
||||
|
||||
// Helper to apply model mapping and update state
|
||||
applyMapping := func(mappedModels []string, mappedProviders []string) {
|
||||
bodyBytes = rewriteModelInRequest(bodyBytes, mappedModels[0])
|
||||
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
c.Set(string(ctxkeys.MappedModel), mappedModels[0])
|
||||
if len(mappedModels) > 1 {
|
||||
c.Set(string(ctxkeys.FallbackModels), mappedModels[1:])
|
||||
}
|
||||
resolvedModel = mappedModels[0]
|
||||
usedMapping = true
|
||||
providers = mappedProviders
|
||||
}
|
||||
|
||||
// Check if model mappings should be forced ahead of local API keys
|
||||
forceMappings := fh.forceModelMappings != nil && fh.forceModelMappings()
|
||||
|
||||
if forceMappings {
|
||||
// FORCE MODE: Check model mappings FIRST (takes precedence over local API keys)
|
||||
// This allows users to route Amp requests to their preferred OAuth providers
|
||||
if mappedModels, mappedProviders := resolveMappedModels(); len(mappedModels) > 0 {
|
||||
applyMapping(mappedModels, mappedProviders)
|
||||
}
|
||||
|
||||
// If no mapping applied, check for local providers
|
||||
if !usedMapping {
|
||||
providers = util.GetProviderName(normalizedModel)
|
||||
}
|
||||
} else {
|
||||
// DEFAULT MODE: Check local providers first, then mappings as fallback
|
||||
providers = util.GetProviderName(normalizedModel)
|
||||
|
||||
if len(providers) == 0 {
|
||||
// No providers configured - check if we have a model mapping
|
||||
if mappedModels, mappedProviders := resolveMappedModels(); len(mappedModels) > 0 {
|
||||
applyMapping(mappedModels, mappedProviders)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no providers available, fallback to ampcode.com
|
||||
if len(providers) == 0 {
|
||||
proxy := fh.getProxy()
|
||||
if proxy != nil {
|
||||
// Log: Forwarding to ampcode.com (uses Amp credits)
|
||||
logAmpRouting(RouteTypeAmpCredits, modelName, "", "", requestPath)
|
||||
|
||||
// Restore body again for the proxy
|
||||
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
|
||||
// Forward to ampcode.com
|
||||
proxy.ServeHTTP(c.Writer, c.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// No proxy available, let the normal handler return the error
|
||||
logAmpRouting(RouteTypeNoProvider, modelName, "", "", requestPath)
|
||||
}
|
||||
|
||||
// Log the routing decision
|
||||
providerName := ""
|
||||
if len(providers) > 0 {
|
||||
providerName = providers[0]
|
||||
}
|
||||
|
||||
if usedMapping {
|
||||
// Log: Model was mapped to another model
|
||||
log.Debugf("amp model mapping: request %s -> %s", normalizedModel, resolvedModel)
|
||||
logAmpRouting(RouteTypeModelMapping, modelName, resolvedModel, providerName, requestPath)
|
||||
rewriter := NewResponseRewriter(c.Writer, modelName)
|
||||
c.Writer = rewriter
|
||||
// Filter Anthropic-Beta header only for local handling paths
|
||||
filterAntropicBetaHeader(c)
|
||||
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
handler(c)
|
||||
rewriter.Flush()
|
||||
log.Debugf("amp model mapping: response %s -> %s", resolvedModel, modelName)
|
||||
} else if len(providers) > 0 {
|
||||
// Log: Using local provider (free)
|
||||
logAmpRouting(RouteTypeLocalProvider, modelName, resolvedModel, providerName, requestPath)
|
||||
// Filter Anthropic-Beta header only for local handling paths
|
||||
filterAntropicBetaHeader(c)
|
||||
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
handler(c)
|
||||
} else {
|
||||
// No provider, no mapping, no proxy: fall back to the wrapped handler so it can return an error response
|
||||
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
handler(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// filterAntropicBetaHeader filters Anthropic-Beta header to remove features requiring special subscription
|
||||
// This is needed when using local providers (bypassing the Amp proxy)
|
||||
func filterAntropicBetaHeader(c *gin.Context) {
|
||||
if betaHeader := c.Request.Header.Get("Anthropic-Beta"); betaHeader != "" {
|
||||
if filtered := filterBetaFeatures(betaHeader, "context-1m-2025-08-07"); filtered != "" {
|
||||
c.Request.Header.Set("Anthropic-Beta", filtered)
|
||||
} else {
|
||||
c.Request.Header.Del("Anthropic-Beta")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// rewriteModelInRequest replaces the model name in a JSON request body
|
||||
func rewriteModelInRequest(body []byte, newModel string) []byte {
|
||||
if !gjson.GetBytes(body, "model").Exists() {
|
||||
return body
|
||||
}
|
||||
result, err := sjson.SetBytes(body, "model", newModel)
|
||||
if err != nil {
|
||||
log.Warnf("amp model mapping: failed to rewrite model in request body: %v", err)
|
||||
return body
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// extractModelFromRequest attempts to extract the model name from various request formats
|
||||
func extractModelFromRequest(body []byte, c *gin.Context) string {
|
||||
// First try to parse from JSON body (OpenAI, Claude, etc.)
|
||||
// Check common model field names
|
||||
if result := gjson.GetBytes(body, "model"); result.Exists() && result.Type == gjson.String {
|
||||
return result.String()
|
||||
}
|
||||
|
||||
// For Gemini requests, model is in the URL path
|
||||
// Standard format: /models/{model}:generateContent -> :action parameter
|
||||
if action := c.Param("action"); action != "" {
|
||||
// Split by colon to get model name (e.g., "gemini-pro:generateContent" -> "gemini-pro")
|
||||
parts := strings.Split(action, ":")
|
||||
if len(parts) > 0 && parts[0] != "" {
|
||||
return parts[0]
|
||||
}
|
||||
}
|
||||
|
||||
// AMP CLI format: /publishers/google/models/{model}:method -> *path parameter
|
||||
// Example: /publishers/google/models/gemini-3-pro-preview:streamGenerateContent
|
||||
if path := c.Param("path"); path != "" {
|
||||
// Look for /models/{model}:method pattern
|
||||
if idx := strings.Index(path, "/models/"); idx >= 0 {
|
||||
modelPart := path[idx+8:] // Skip "/models/"
|
||||
// Split by colon to get model name
|
||||
if colonIdx := strings.Index(modelPart, ":"); colonIdx > 0 {
|
||||
return modelPart[:colonIdx]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
@@ -0,0 +1,326 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/routing/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Characterization tests for fallback_handlers.go using testutil recorders
|
||||
// These tests capture existing behavior before refactoring to routing layer
|
||||
|
||||
func TestCharacterization_LocalProvider(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Register a mock provider for the test model
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("char-test-local", "anthropic", []*registry.ModelInfo{
|
||||
{ID: "test-model-local"},
|
||||
})
|
||||
defer reg.UnregisterClient("char-test-local")
|
||||
|
||||
// Setup recorders
|
||||
proxyRecorder := testutil.NewFakeProxyRecorder()
|
||||
handlerRecorder := testutil.NewFakeHandlerRecorder()
|
||||
|
||||
// Create gin context
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
||||
body := `{"model": "test-model-local", "messages": [{"role": "user", "content": "hello"}]}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/anthropic/v1/messages", bytes.NewReader([]byte(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
c.Request = req
|
||||
|
||||
// Create fallback handler with proxy recorder
|
||||
// Create a test server to act as the proxy target
|
||||
proxyServer := httptest.NewServer(proxyRecorder.ToHandler())
|
||||
defer proxyServer.Close()
|
||||
|
||||
fh := NewFallbackHandler(func() *httputil.ReverseProxy {
|
||||
// Create a reverse proxy that forwards to our test server
|
||||
targetURL, _ := url.Parse(proxyServer.URL)
|
||||
return httputil.NewSingleHostReverseProxy(targetURL)
|
||||
})
|
||||
|
||||
// Execute
|
||||
wrapped := fh.WrapHandler(handlerRecorder.GinHandler())
|
||||
wrapped(c)
|
||||
|
||||
// Assert: proxy NOT called
|
||||
assert.False(t, proxyRecorder.Called, "proxy should NOT be called for local provider")
|
||||
|
||||
// Assert: local handler called once
|
||||
assert.True(t, handlerRecorder.WasCalled(), "local handler should be called")
|
||||
assert.Equal(t, 1, handlerRecorder.GetCallCount(), "local handler should be called exactly once")
|
||||
|
||||
// Assert: request body model unchanged
|
||||
assert.Contains(t, string(handlerRecorder.RequestBody), "test-model-local", "request body model should be unchanged")
|
||||
}
|
||||
|
||||
func TestCharacterization_ModelMapping(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Register a mock provider for the TARGET model (the mapped-to model)
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("char-test-mapped", "openai", []*registry.ModelInfo{
|
||||
{ID: "gpt-4-local"},
|
||||
})
|
||||
defer reg.UnregisterClient("char-test-mapped")
|
||||
|
||||
// Setup recorders
|
||||
proxyRecorder := testutil.NewFakeProxyRecorder()
|
||||
handlerRecorder := testutil.NewFakeHandlerRecorder()
|
||||
|
||||
// Create model mapper with a mapping
|
||||
mapper := NewModelMapper([]config.AmpModelMapping{
|
||||
{From: "gpt-4-turbo", To: "gpt-4-local"},
|
||||
})
|
||||
|
||||
// Create gin context
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
||||
// Request with original model that gets mapped
|
||||
body := `{"model": "gpt-4-turbo", "messages": [{"role": "user", "content": "hello"}]}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/openai/v1/chat/completions", bytes.NewReader([]byte(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
c.Request = req
|
||||
|
||||
// Create fallback handler with mapper
|
||||
proxyServer := httptest.NewServer(proxyRecorder.ToHandler())
|
||||
defer proxyServer.Close()
|
||||
|
||||
fh := NewFallbackHandlerWithMapper(func() *httputil.ReverseProxy {
|
||||
targetURL, _ := url.Parse(proxyServer.URL)
|
||||
return httputil.NewSingleHostReverseProxy(targetURL)
|
||||
}, mapper, func() bool { return false })
|
||||
|
||||
// Execute - use handler that returns model in response for rewriter to work
|
||||
wrapped := fh.WrapHandler(handlerRecorder.GinHandlerWithModel())
|
||||
wrapped(c)
|
||||
|
||||
// Assert: proxy NOT called
|
||||
assert.False(t, proxyRecorder.Called, "proxy should NOT be called for model mapping")
|
||||
|
||||
// Assert: local handler called once
|
||||
assert.True(t, handlerRecorder.WasCalled(), "local handler should be called")
|
||||
assert.Equal(t, 1, handlerRecorder.GetCallCount(), "local handler should be called exactly once")
|
||||
|
||||
// Assert: request body model was rewritten to mapped model
|
||||
assert.Contains(t, string(handlerRecorder.RequestBody), "gpt-4-local", "request body model should be rewritten to mapped model")
|
||||
assert.NotContains(t, string(handlerRecorder.RequestBody), "gpt-4-turbo", "request body should NOT contain original model")
|
||||
|
||||
// Assert: context has mapped_model key set
|
||||
mappedModel, exists := handlerRecorder.GetContextKey("mapped_model")
|
||||
assert.True(t, exists, "context should have mapped_model key")
|
||||
assert.Equal(t, "gpt-4-local", mappedModel, "mapped_model should be the target model")
|
||||
|
||||
// Assert: response body model rewritten back to original
|
||||
// The response writer should rewrite model names in the response
|
||||
responseBody := w.Body.String()
|
||||
assert.Contains(t, responseBody, "gpt-4-turbo", "response should have original model name")
|
||||
}
|
||||
|
||||
func TestCharacterization_AmpCreditsProxy(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Setup recorders - NO local provider registered, NO mapping configured
|
||||
proxyRecorder := testutil.NewFakeProxyRecorder()
|
||||
handlerRecorder := testutil.NewFakeHandlerRecorder()
|
||||
|
||||
// Create gin context with CloseNotifier support (required for ReverseProxy)
|
||||
w := testutil.NewCloseNotifierRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
||||
// Request with a model that has no local provider and no mapping
|
||||
body := `{"model": "unknown-model-no-provider", "messages": [{"role": "user", "content": "hello"}]}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/openai/v1/chat/completions", bytes.NewReader([]byte(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
c.Request = req
|
||||
|
||||
// Create fallback handler
|
||||
proxyServer := httptest.NewServer(proxyRecorder.ToHandler())
|
||||
defer proxyServer.Close()
|
||||
|
||||
fh := NewFallbackHandler(func() *httputil.ReverseProxy {
|
||||
targetURL, _ := url.Parse(proxyServer.URL)
|
||||
return httputil.NewSingleHostReverseProxy(targetURL)
|
||||
})
|
||||
|
||||
// Execute
|
||||
wrapped := fh.WrapHandler(handlerRecorder.GinHandler())
|
||||
wrapped(c)
|
||||
|
||||
// Assert: proxy called once
|
||||
assert.True(t, proxyRecorder.Called, "proxy should be called when no local provider and no mapping")
|
||||
assert.Equal(t, 1, proxyRecorder.GetCallCount(), "proxy should be called exactly once")
|
||||
|
||||
// Assert: local handler NOT called
|
||||
assert.False(t, handlerRecorder.WasCalled(), "local handler should NOT be called when falling back to proxy")
|
||||
|
||||
// Assert: body forwarded to proxy is original (no rewrite)
|
||||
assert.Contains(t, string(proxyRecorder.RequestBody), "unknown-model-no-provider", "request body model should be unchanged when proxying")
|
||||
}
|
||||
|
||||
func TestCharacterization_BodyRestore(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Register a mock provider for the test model
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("char-test-body", "anthropic", []*registry.ModelInfo{
|
||||
{ID: "test-model-body"},
|
||||
})
|
||||
defer reg.UnregisterClient("char-test-body")
|
||||
|
||||
// Setup recorders
|
||||
proxyRecorder := testutil.NewFakeProxyRecorder()
|
||||
handlerRecorder := testutil.NewFakeHandlerRecorder()
|
||||
|
||||
// Create gin context
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
||||
// Create a complex request body that will be read by the wrapper for model extraction
|
||||
originalBody := `{"model": "test-model-body", "messages": [{"role": "user", "content": "hello"}], "temperature": 0.7, "stream": true}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/anthropic/v1/messages", bytes.NewReader([]byte(originalBody)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
c.Request = req
|
||||
|
||||
// Create fallback handler with proxy recorder
|
||||
proxyServer := httptest.NewServer(proxyRecorder.ToHandler())
|
||||
defer proxyServer.Close()
|
||||
|
||||
fh := NewFallbackHandler(func() *httputil.ReverseProxy {
|
||||
targetURL, _ := url.Parse(proxyServer.URL)
|
||||
return httputil.NewSingleHostReverseProxy(targetURL)
|
||||
})
|
||||
|
||||
// Execute
|
||||
wrapped := fh.WrapHandler(handlerRecorder.GinHandler())
|
||||
wrapped(c)
|
||||
|
||||
// Assert: local handler called (not proxy, since we have a local provider)
|
||||
assert.True(t, handlerRecorder.WasCalled(), "local handler should be called")
|
||||
assert.False(t, proxyRecorder.Called, "proxy should NOT be called for local provider")
|
||||
|
||||
// Assert: handler receives complete original body
|
||||
// This verifies that the body was properly restored after the wrapper read it for model extraction
|
||||
assert.Equal(t, originalBody, string(handlerRecorder.RequestBody), "handler should receive complete original body after wrapper reads it for model extraction")
|
||||
}
|
||||
|
||||
// TestCharacterization_GeminiV1Beta1_PostModels tests that POST requests with /models/ path use Gemini bridge handler
|
||||
// This is a characterization test for the route gating logic in routes.go
|
||||
func TestCharacterization_GeminiV1Beta1_PostModels(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Register a mock provider for the test model (Gemini format uses path-based model extraction)
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("char-test-gemini", "google", []*registry.ModelInfo{
|
||||
{ID: "gemini-pro"},
|
||||
})
|
||||
defer reg.UnregisterClient("char-test-gemini")
|
||||
|
||||
// Setup recorders
|
||||
proxyRecorder := testutil.NewFakeProxyRecorder()
|
||||
handlerRecorder := testutil.NewFakeHandlerRecorder()
|
||||
|
||||
// Create a test server for the proxy
|
||||
proxyServer := httptest.NewServer(proxyRecorder.ToHandler())
|
||||
defer proxyServer.Close()
|
||||
|
||||
// Create fallback handler
|
||||
fh := NewFallbackHandler(func() *httputil.ReverseProxy {
|
||||
targetURL, _ := url.Parse(proxyServer.URL)
|
||||
return httputil.NewSingleHostReverseProxy(targetURL)
|
||||
})
|
||||
|
||||
// Create the Gemini bridge handler (simulating what routes.go does)
|
||||
geminiBridge := createGeminiBridgeHandler(handlerRecorder.GinHandler())
|
||||
geminiV1Beta1Handler := fh.WrapHandler(geminiBridge)
|
||||
|
||||
// Create router with the same gating logic as routes.go
|
||||
r := gin.New()
|
||||
r.Any("/api/provider/google/v1beta1/*path", func(c *gin.Context) {
|
||||
if c.Request.Method == "POST" {
|
||||
if path := c.Param("path"); strings.Contains(path, "/models/") {
|
||||
// POST with /models/ path -> use Gemini bridge with fallback handler
|
||||
geminiV1Beta1Handler(c)
|
||||
return
|
||||
}
|
||||
}
|
||||
// Non-POST or no /models/ in path -> proxy upstream
|
||||
proxyRecorder.ServeHTTP(c.Writer, c.Request)
|
||||
})
|
||||
|
||||
// Execute: POST request with /models/ in path
|
||||
body := `{"contents": [{"role": "user", "parts": [{"text": "hello"}]}]}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/google/v1beta1/publishers/google/models/gemini-pro:generateContent", bytes.NewReader([]byte(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
// Assert: local Gemini handler called
|
||||
assert.True(t, handlerRecorder.WasCalled(), "local Gemini handler should be called for POST /models/")
|
||||
|
||||
// Assert: proxy NOT called
|
||||
assert.False(t, proxyRecorder.Called, "proxy should NOT be called for POST /models/ path")
|
||||
}
|
||||
|
||||
// TestCharacterization_GeminiV1Beta1_GetProxies tests that GET requests to Gemini v1beta1 always use proxy
|
||||
// This is a characterization test for the route gating logic in routes.go
|
||||
func TestCharacterization_GeminiV1Beta1_GetProxies(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Setup recorders
|
||||
proxyRecorder := testutil.NewFakeProxyRecorder()
|
||||
handlerRecorder := testutil.NewFakeHandlerRecorder()
|
||||
|
||||
// Create a test server for the proxy
|
||||
proxyServer := httptest.NewServer(proxyRecorder.ToHandler())
|
||||
defer proxyServer.Close()
|
||||
|
||||
// Create fallback handler
|
||||
fh := NewFallbackHandler(func() *httputil.ReverseProxy {
|
||||
targetURL, _ := url.Parse(proxyServer.URL)
|
||||
return httputil.NewSingleHostReverseProxy(targetURL)
|
||||
})
|
||||
|
||||
// Create the Gemini bridge handler
|
||||
geminiBridge := createGeminiBridgeHandler(handlerRecorder.GinHandler())
|
||||
geminiV1Beta1Handler := fh.WrapHandler(geminiBridge)
|
||||
|
||||
// Create router with the same gating logic as routes.go
|
||||
r := gin.New()
|
||||
r.Any("/api/provider/google/v1beta1/*path", func(c *gin.Context) {
|
||||
if c.Request.Method == "POST" {
|
||||
if path := c.Param("path"); strings.Contains(path, "/models/") {
|
||||
geminiV1Beta1Handler(c)
|
||||
return
|
||||
}
|
||||
}
|
||||
proxyRecorder.ServeHTTP(c.Writer, c.Request)
|
||||
})
|
||||
|
||||
// Execute: GET request (even with /models/ in path)
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/provider/google/v1beta1/publishers/google/models/gemini-pro", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
// Assert: proxy called
|
||||
assert.True(t, proxyRecorder.Called, "proxy should be called for GET requests")
|
||||
assert.Equal(t, 1, proxyRecorder.GetCallCount(), "proxy should be called exactly once")
|
||||
|
||||
// Assert: local handler NOT called
|
||||
assert.False(t, handlerRecorder.WasCalled(), "local handler should NOT be called for GET requests")
|
||||
}
|
||||
148
internal/api/modules/amp/fallback_handlers_test.go
Normal file
148
internal/api/modules/amp/fallback_handlers_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/http/httputil"
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Characterization tests for fallback_handlers.go
|
||||
// These tests capture existing behavior before refactoring to routing layer
|
||||
|
||||
func TestFallbackHandler_WrapHandler_LocalProvider_NoMapping(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Setup: model that has local providers (gemini-2.5-pro is registered)
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
||||
body := `{"model": "gemini-2.5-pro", "messages": [{"role": "user", "content": "hello"}]}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/anthropic/v1/messages", bytes.NewReader([]byte(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
c.Request = req
|
||||
|
||||
// Handler that should be called (not proxy)
|
||||
handlerCalled := false
|
||||
handler := func(c *gin.Context) {
|
||||
handlerCalled = true
|
||||
c.JSON(200, gin.H{"status": "ok"})
|
||||
}
|
||||
|
||||
// Create fallback handler
|
||||
fh := NewFallbackHandler(func() *httputil.ReverseProxy {
|
||||
return nil // no proxy
|
||||
})
|
||||
|
||||
// Execute
|
||||
wrapped := fh.WrapHandler(handler)
|
||||
wrapped(c)
|
||||
|
||||
// Assert: handler should be called directly (no mapping needed)
|
||||
assert.True(t, handlerCalled, "handler should be called for local provider")
|
||||
assert.Equal(t, 200, w.Code)
|
||||
}
|
||||
|
||||
func TestFallbackHandler_WrapHandler_MappingApplied(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Register a mock provider for the target model
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("test-client", "anthropic", []*registry.ModelInfo{
|
||||
{ID: "claude-opus-4-5-thinking"},
|
||||
})
|
||||
|
||||
// Setup: model that needs mapping
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
||||
body := `{"model": "claude-opus-4-5-20251101", "messages": [{"role": "user", "content": "hello"}]}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/anthropic/v1/messages", bytes.NewReader([]byte(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
c.Request = req
|
||||
|
||||
// Handler to capture rewritten body
|
||||
var capturedBody []byte
|
||||
handler := func(c *gin.Context) {
|
||||
capturedBody, _ = io.ReadAll(c.Request.Body)
|
||||
c.JSON(200, gin.H{"status": "ok"})
|
||||
}
|
||||
|
||||
// Create fallback handler with mapper
|
||||
mapper := NewModelMapper([]config.AmpModelMapping{
|
||||
{From: "claude-opus-4-5-20251101", To: "claude-opus-4-5-thinking"},
|
||||
})
|
||||
|
||||
fh := NewFallbackHandlerWithMapper(
|
||||
func() *httputil.ReverseProxy { return nil },
|
||||
mapper,
|
||||
func() bool { return false },
|
||||
)
|
||||
|
||||
// Execute
|
||||
wrapped := fh.WrapHandler(handler)
|
||||
wrapped(c)
|
||||
|
||||
// Assert: body should be rewritten
|
||||
assert.Contains(t, string(capturedBody), "claude-opus-4-5-thinking")
|
||||
|
||||
// Assert: context should have mapped model
|
||||
mappedModel, exists := c.Get(MappedModelContextKey)
|
||||
assert.True(t, exists, "MappedModelContextKey should be set")
|
||||
assert.NotEmpty(t, mappedModel)
|
||||
}
|
||||
|
||||
func TestFallbackHandler_WrapHandler_ThinkingSuffixPreserved(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Register a mock provider for the target model
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("test-client-2", "anthropic", []*registry.ModelInfo{
|
||||
{ID: "claude-opus-4-5-thinking"},
|
||||
})
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
||||
// Model with thinking suffix
|
||||
body := `{"model": "claude-opus-4-5-20251101(xhigh)", "messages": []}`
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/anthropic/v1/messages", bytes.NewReader([]byte(body)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
c.Request = req
|
||||
|
||||
var capturedBody []byte
|
||||
handler := func(c *gin.Context) {
|
||||
capturedBody, _ = io.ReadAll(c.Request.Body)
|
||||
c.JSON(200, gin.H{"status": "ok"})
|
||||
}
|
||||
|
||||
mapper := NewModelMapper([]config.AmpModelMapping{
|
||||
{From: "claude-opus-4-5-20251101", To: "claude-opus-4-5-thinking"},
|
||||
})
|
||||
|
||||
fh := NewFallbackHandlerWithMapper(
|
||||
func() *httputil.ReverseProxy { return nil },
|
||||
mapper,
|
||||
func() bool { return false },
|
||||
)
|
||||
|
||||
wrapped := fh.WrapHandler(handler)
|
||||
wrapped(c)
|
||||
|
||||
// Assert: thinking suffix should be preserved
|
||||
assert.Contains(t, string(capturedBody), "(xhigh)")
|
||||
}
|
||||
|
||||
func TestFallbackHandler_WrapHandler_NoProvider_NoMapping_ProxyEnabled(t *testing.T) {
|
||||
// Skip: httptest.ResponseRecorder doesn't implement http.CloseNotifier
|
||||
// which is required by httputil.ReverseProxy. This test requires a real
|
||||
// HTTP server and client to properly test proxy behavior.
|
||||
t.Skip("requires real HTTP server for proxy testing")
|
||||
}
|
||||
59
internal/api/modules/amp/gemini_bridge.go
Normal file
59
internal/api/modules/amp/gemini_bridge.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// createGeminiBridgeHandler creates a handler that bridges AMP CLI's non-standard Gemini paths
|
||||
// to our standard Gemini handler by rewriting the request context.
|
||||
//
|
||||
// AMP CLI format: /publishers/google/models/gemini-3-pro-preview:streamGenerateContent
|
||||
// Standard format: /models/gemini-3-pro-preview:streamGenerateContent
|
||||
//
|
||||
// This extracts the model+method from the AMP path and sets it as the :action parameter
|
||||
// so the standard Gemini handler can process it.
|
||||
//
|
||||
// The handler parameter should be a Gemini-compatible handler that expects the :action param.
|
||||
func createGeminiBridgeHandler(handler gin.HandlerFunc) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Get the full path from the catch-all parameter
|
||||
path := c.Param("path")
|
||||
|
||||
// Extract model:method from AMP CLI path format
|
||||
// Example: /publishers/google/models/gemini-3-pro-preview:streamGenerateContent
|
||||
const modelsPrefix = "/models/"
|
||||
if idx := strings.Index(path, modelsPrefix); idx >= 0 {
|
||||
// Extract everything after modelsPrefix
|
||||
actionPart := path[idx+len(modelsPrefix):]
|
||||
|
||||
// Check if model was mapped by FallbackHandler
|
||||
if mappedModel, exists := c.Get(MappedModelContextKey); exists {
|
||||
if strModel, ok := mappedModel.(string); ok && strModel != "" {
|
||||
// Replace the model part in the action
|
||||
// actionPart is like "model-name:method"
|
||||
if colonIdx := strings.Index(actionPart, ":"); colonIdx > 0 {
|
||||
method := actionPart[colonIdx:] // ":method"
|
||||
actionPart = strModel + method
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set this as the :action parameter that the Gemini handler expects
|
||||
c.Params = append(c.Params, gin.Param{
|
||||
Key: "action",
|
||||
Value: actionPart,
|
||||
})
|
||||
|
||||
// Call the handler
|
||||
handler(c)
|
||||
return
|
||||
}
|
||||
|
||||
// If we can't parse the path, return 400
|
||||
c.JSON(400, gin.H{
|
||||
"error": "Invalid Gemini API path format",
|
||||
})
|
||||
}
|
||||
}
|
||||
93
internal/api/modules/amp/gemini_bridge_test.go
Normal file
93
internal/api/modules/amp/gemini_bridge_test.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func TestCreateGeminiBridgeHandler_ActionParameterExtraction(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
mappedModel string // empty string means no mapping
|
||||
expectedAction string
|
||||
}{
|
||||
{
|
||||
name: "no_mapping_uses_url_model",
|
||||
path: "/publishers/google/models/gemini-pro:generateContent",
|
||||
mappedModel: "",
|
||||
expectedAction: "gemini-pro:generateContent",
|
||||
},
|
||||
{
|
||||
name: "mapped_model_replaces_url_model",
|
||||
path: "/publishers/google/models/gemini-exp:generateContent",
|
||||
mappedModel: "gemini-2.0-flash",
|
||||
expectedAction: "gemini-2.0-flash:generateContent",
|
||||
},
|
||||
{
|
||||
name: "mapping_preserves_method",
|
||||
path: "/publishers/google/models/gemini-2.5-preview:streamGenerateContent",
|
||||
mappedModel: "gemini-flash",
|
||||
expectedAction: "gemini-flash:streamGenerateContent",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var capturedAction string
|
||||
|
||||
mockGeminiHandler := func(c *gin.Context) {
|
||||
capturedAction = c.Param("action")
|
||||
c.JSON(http.StatusOK, gin.H{"captured": capturedAction})
|
||||
}
|
||||
|
||||
// Use the actual createGeminiBridgeHandler function
|
||||
bridgeHandler := createGeminiBridgeHandler(mockGeminiHandler)
|
||||
|
||||
r := gin.New()
|
||||
if tt.mappedModel != "" {
|
||||
r.Use(func(c *gin.Context) {
|
||||
c.Set(MappedModelContextKey, tt.mappedModel)
|
||||
c.Next()
|
||||
})
|
||||
}
|
||||
r.POST("/api/provider/google/v1beta1/*path", bridgeHandler)
|
||||
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/google/v1beta1"+tt.path, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d", w.Code)
|
||||
}
|
||||
if capturedAction != tt.expectedAction {
|
||||
t.Errorf("Expected action '%s', got '%s'", tt.expectedAction, capturedAction)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateGeminiBridgeHandler_InvalidPath(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
mockHandler := func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{"ok": true})
|
||||
}
|
||||
bridgeHandler := createGeminiBridgeHandler(mockHandler)
|
||||
|
||||
r := gin.New()
|
||||
r.POST("/api/provider/google/v1beta1/*path", bridgeHandler)
|
||||
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/provider/google/v1beta1/invalid/path", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("Expected status 400 for invalid path, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
298
internal/api/modules/amp/model_mapping.go
Normal file
298
internal/api/modules/amp/model_mapping.go
Normal file
@@ -0,0 +1,298 @@
|
||||
// Package amp provides model mapping functionality for routing Amp CLI requests
|
||||
// to alternative models when the requested model is not available locally.
|
||||
package amp
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/thinking"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ModelMapper provides model name mapping/aliasing for Amp CLI requests.
|
||||
// When an Amp request comes in for a model that isn't available locally,
|
||||
// this mapper can redirect it to an alternative model that IS available.
|
||||
type ModelMapper interface {
|
||||
// MapModel returns the target model name if a mapping exists and the target
|
||||
// model has available providers. Returns empty string if no mapping applies.
|
||||
MapModel(requestedModel string) string
|
||||
|
||||
// UpdateMappings refreshes the mapping configuration (for hot-reload).
|
||||
UpdateMappings(mappings []config.AmpModelMapping)
|
||||
}
|
||||
|
||||
// DefaultModelMapper implements ModelMapper with thread-safe mapping storage.
|
||||
type DefaultModelMapper struct {
|
||||
mu sync.RWMutex
|
||||
mappings map[string]string // exact: from -> to (normalized lowercase keys)
|
||||
regexps []regexMapping // regex rules evaluated in order
|
||||
|
||||
// oauthAliasForward maps channel -> name (lower) -> []alias for oauth-model-alias lookup.
|
||||
// This allows model-mappings targets to find providers via their aliases.
|
||||
oauthAliasForward map[string]map[string][]string
|
||||
}
|
||||
|
||||
// NewModelMapper creates a new model mapper with the given initial mappings.
|
||||
func NewModelMapper(mappings []config.AmpModelMapping) *DefaultModelMapper {
|
||||
m := &DefaultModelMapper{
|
||||
mappings: make(map[string]string),
|
||||
regexps: nil,
|
||||
oauthAliasForward: nil,
|
||||
}
|
||||
m.UpdateMappings(mappings)
|
||||
return m
|
||||
}
|
||||
|
||||
// UpdateOAuthModelAlias updates the oauth-model-alias lookup table.
|
||||
// This is called during initialization and on config hot-reload.
|
||||
func (m *DefaultModelMapper) UpdateOAuthModelAlias(aliases map[string][]config.OAuthModelAlias) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if len(aliases) == 0 {
|
||||
m.oauthAliasForward = nil
|
||||
return
|
||||
}
|
||||
|
||||
forward := make(map[string]map[string][]string, len(aliases))
|
||||
for rawChannel, entries := range aliases {
|
||||
channel := strings.ToLower(strings.TrimSpace(rawChannel))
|
||||
if channel == "" || len(entries) == 0 {
|
||||
continue
|
||||
}
|
||||
channelMap := make(map[string][]string)
|
||||
for _, entry := range entries {
|
||||
name := strings.TrimSpace(entry.Name)
|
||||
alias := strings.TrimSpace(entry.Alias)
|
||||
if name == "" || alias == "" {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(name, alias) {
|
||||
continue
|
||||
}
|
||||
nameKey := strings.ToLower(name)
|
||||
channelMap[nameKey] = append(channelMap[nameKey], alias)
|
||||
}
|
||||
if len(channelMap) > 0 {
|
||||
forward[channel] = channelMap
|
||||
}
|
||||
}
|
||||
if len(forward) == 0 {
|
||||
m.oauthAliasForward = nil
|
||||
return
|
||||
}
|
||||
m.oauthAliasForward = forward
|
||||
log.Debugf("amp model mapping: loaded oauth-model-alias for %d channel(s)", len(forward))
|
||||
}
|
||||
|
||||
// findAllAliasesWithProviders returns all oauth-model-alias aliases for targetModel
|
||||
// that have available providers. Useful for fallback when one alias is quota-exceeded.
|
||||
func (m *DefaultModelMapper) findAllAliasesWithProviders(targetModel string) []string {
|
||||
if m.oauthAliasForward == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
targetKey := strings.ToLower(strings.TrimSpace(targetModel))
|
||||
if targetKey == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var result []string
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
// Check all channels for this model name
|
||||
for _, channelMap := range m.oauthAliasForward {
|
||||
aliases := channelMap[targetKey]
|
||||
for _, alias := range aliases {
|
||||
aliasLower := strings.ToLower(alias)
|
||||
if _, exists := seen[aliasLower]; exists {
|
||||
continue
|
||||
}
|
||||
providers := util.GetProviderName(alias)
|
||||
if len(providers) > 0 {
|
||||
result = append(result, alias)
|
||||
seen[aliasLower] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// MapModel checks if a mapping exists for the requested model and if the
|
||||
// target model has available local providers. Returns the mapped model name
|
||||
// or empty string if no valid mapping exists.
|
||||
//
|
||||
// If the requested model contains a thinking suffix (e.g., "g25p(8192)"),
|
||||
// the suffix is preserved in the returned model name (e.g., "gemini-2.5-pro(8192)").
|
||||
// However, if the mapping target already contains a suffix, the config suffix
|
||||
// takes priority over the user's suffix.
|
||||
func (m *DefaultModelMapper) MapModel(requestedModel string) string {
|
||||
models := m.MapModelWithFallbacks(requestedModel)
|
||||
if len(models) == 0 {
|
||||
return ""
|
||||
}
|
||||
return models[0]
|
||||
}
|
||||
|
||||
// MapModelWithFallbacks returns all possible target models for the requested model,
|
||||
// including fallback aliases from oauth-model-alias. The first model is the primary target,
|
||||
// and subsequent models are fallbacks to try if the primary is unavailable (e.g., quota exceeded).
|
||||
func (m *DefaultModelMapper) MapModelWithFallbacks(requestedModel string) []string {
|
||||
if requestedModel == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
// Extract thinking suffix from requested model using ParseSuffix
|
||||
requestResult := thinking.ParseSuffix(requestedModel)
|
||||
baseModel := requestResult.ModelName
|
||||
|
||||
// Normalize the base model for lookup (case-insensitive)
|
||||
normalizedBase := strings.ToLower(strings.TrimSpace(baseModel))
|
||||
|
||||
// Check for direct mapping using base model name
|
||||
targetModel, exists := m.mappings[normalizedBase]
|
||||
if !exists {
|
||||
// Try regex mappings in order using base model only
|
||||
// (suffix is handled separately via ParseSuffix)
|
||||
for _, rm := range m.regexps {
|
||||
if rm.re.MatchString(baseModel) {
|
||||
targetModel = rm.to
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check if target model already has a thinking suffix (config priority)
|
||||
targetResult := thinking.ParseSuffix(targetModel)
|
||||
targetBase := targetResult.ModelName
|
||||
|
||||
// Helper to apply suffix to a model
|
||||
applySuffix := func(model string) string {
|
||||
modelResult := thinking.ParseSuffix(model)
|
||||
if modelResult.HasSuffix {
|
||||
return model
|
||||
}
|
||||
if requestResult.HasSuffix && requestResult.RawSuffix != "" {
|
||||
return model + "(" + requestResult.RawSuffix + ")"
|
||||
}
|
||||
return model
|
||||
}
|
||||
|
||||
// Verify target model has available providers (use base model for lookup)
|
||||
providers := util.GetProviderName(targetBase)
|
||||
|
||||
// If direct provider available, return it as primary
|
||||
if len(providers) > 0 {
|
||||
return []string{applySuffix(targetModel)}
|
||||
}
|
||||
|
||||
// No direct providers - check oauth-model-alias for all aliases that have providers
|
||||
allAliases := m.findAllAliasesWithProviders(targetBase)
|
||||
if len(allAliases) == 0 {
|
||||
log.Debugf("amp model mapping: target model %s has no available providers, skipping mapping", targetModel)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log resolution
|
||||
if len(allAliases) == 1 {
|
||||
log.Debugf("amp model mapping: resolved %s -> %s via oauth-model-alias", targetModel, allAliases[0])
|
||||
} else {
|
||||
log.Debugf("amp model mapping: resolved %s -> %v via oauth-model-alias (%d fallbacks)", targetModel, allAliases, len(allAliases)-1)
|
||||
}
|
||||
|
||||
// Apply suffix to all aliases
|
||||
result := make([]string, len(allAliases))
|
||||
for i, alias := range allAliases {
|
||||
result[i] = applySuffix(alias)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// UpdateMappings refreshes the mapping configuration from config.
|
||||
// This is called during initialization and on config hot-reload.
|
||||
func (m *DefaultModelMapper) UpdateMappings(mappings []config.AmpModelMapping) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Clear and rebuild mappings
|
||||
m.mappings = make(map[string]string, len(mappings))
|
||||
m.regexps = make([]regexMapping, 0, len(mappings))
|
||||
|
||||
for _, mapping := range mappings {
|
||||
from := strings.TrimSpace(mapping.From)
|
||||
to := strings.TrimSpace(mapping.To)
|
||||
|
||||
if from == "" || to == "" {
|
||||
log.Warnf("amp model mapping: skipping invalid mapping (from=%q, to=%q)", from, to)
|
||||
continue
|
||||
}
|
||||
|
||||
if mapping.Regex {
|
||||
// Compile case-insensitive regex; wrap with (?i) to match behavior of exact lookups
|
||||
pattern := "(?i)" + from
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
log.Warnf("amp model mapping: invalid regex %q: %v", from, err)
|
||||
continue
|
||||
}
|
||||
m.regexps = append(m.regexps, regexMapping{re: re, to: to})
|
||||
log.Debugf("amp model regex mapping registered: /%s/ -> %s", from, to)
|
||||
} else {
|
||||
// Store with normalized lowercase key for case-insensitive lookup
|
||||
normalizedFrom := strings.ToLower(from)
|
||||
m.mappings[normalizedFrom] = to
|
||||
log.Debugf("amp model mapping registered: %s -> %s", from, to)
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.mappings) > 0 {
|
||||
log.Infof("amp model mapping: loaded %d mapping(s)", len(m.mappings))
|
||||
}
|
||||
if n := len(m.regexps); n > 0 {
|
||||
log.Infof("amp model mapping: loaded %d regex mapping(s)", n)
|
||||
}
|
||||
}
|
||||
|
||||
// GetMappings returns a copy of current mappings (for debugging/status).
|
||||
func (m *DefaultModelMapper) GetMappings() map[string]string {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
result := make(map[string]string, len(m.mappings))
|
||||
for k, v := range m.mappings {
|
||||
result[k] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetMappingsAsConfig returns the current model mappings as config.AmpModelMapping slice.
|
||||
// Safe for concurrent use.
|
||||
func (m *DefaultModelMapper) GetMappingsAsConfig() []config.AmpModelMapping {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
result := make([]config.AmpModelMapping, 0, len(m.mappings))
|
||||
for from, to := range m.mappings {
|
||||
result = append(result, config.AmpModelMapping{
|
||||
From: from,
|
||||
To: to,
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type regexMapping struct {
|
||||
re *regexp.Regexp
|
||||
to string
|
||||
}
|
||||
375
internal/api/modules/amp/model_mapping_test.go
Normal file
375
internal/api/modules/amp/model_mapping_test.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
)
|
||||
|
||||
func TestNewModelMapper(t *testing.T) {
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "claude-opus-4.5", To: "claude-sonnet-4"},
|
||||
{From: "gpt-5", To: "gemini-2.5-pro"},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
if mapper == nil {
|
||||
t.Fatal("Expected non-nil mapper")
|
||||
}
|
||||
|
||||
result := mapper.GetMappings()
|
||||
if len(result) != 2 {
|
||||
t.Errorf("Expected 2 mappings, got %d", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewModelMapper_Empty(t *testing.T) {
|
||||
mapper := NewModelMapper(nil)
|
||||
if mapper == nil {
|
||||
t.Fatal("Expected non-nil mapper")
|
||||
}
|
||||
|
||||
result := mapper.GetMappings()
|
||||
if len(result) != 0 {
|
||||
t.Errorf("Expected 0 mappings, got %d", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_MapModel_NoProvider(t *testing.T) {
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "claude-opus-4.5", To: "claude-sonnet-4"},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
// Without a registered provider for the target, mapping should return empty
|
||||
result := mapper.MapModel("claude-opus-4.5")
|
||||
if result != "" {
|
||||
t.Errorf("Expected empty result when target has no provider, got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_MapModel_WithProvider(t *testing.T) {
|
||||
// Register a mock provider for the target model
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("test-client", "claude", []*registry.ModelInfo{
|
||||
{ID: "claude-sonnet-4", OwnedBy: "anthropic", Type: "claude"},
|
||||
})
|
||||
defer reg.UnregisterClient("test-client")
|
||||
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "claude-opus-4.5", To: "claude-sonnet-4"},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
// With a registered provider, mapping should work
|
||||
result := mapper.MapModel("claude-opus-4.5")
|
||||
if result != "claude-sonnet-4" {
|
||||
t.Errorf("Expected claude-sonnet-4, got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_MapModel_TargetWithThinkingSuffix(t *testing.T) {
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("test-client-thinking", "codex", []*registry.ModelInfo{
|
||||
{ID: "gpt-5.2", OwnedBy: "openai", Type: "codex"},
|
||||
})
|
||||
defer reg.UnregisterClient("test-client-thinking")
|
||||
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "gpt-5.2-alias", To: "gpt-5.2(xhigh)"},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
result := mapper.MapModel("gpt-5.2-alias")
|
||||
if result != "gpt-5.2(xhigh)" {
|
||||
t.Errorf("Expected gpt-5.2(xhigh), got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_MapModel_CaseInsensitive(t *testing.T) {
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("test-client2", "claude", []*registry.ModelInfo{
|
||||
{ID: "claude-sonnet-4", OwnedBy: "anthropic", Type: "claude"},
|
||||
})
|
||||
defer reg.UnregisterClient("test-client2")
|
||||
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "Claude-Opus-4.5", To: "claude-sonnet-4"},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
// Should match case-insensitively
|
||||
result := mapper.MapModel("claude-opus-4.5")
|
||||
if result != "claude-sonnet-4" {
|
||||
t.Errorf("Expected claude-sonnet-4, got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_MapModel_NotFound(t *testing.T) {
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "claude-opus-4.5", To: "claude-sonnet-4"},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
// Unknown model should return empty
|
||||
result := mapper.MapModel("unknown-model")
|
||||
if result != "" {
|
||||
t.Errorf("Expected empty for unknown model, got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_MapModel_EmptyInput(t *testing.T) {
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "claude-opus-4.5", To: "claude-sonnet-4"},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
result := mapper.MapModel("")
|
||||
if result != "" {
|
||||
t.Errorf("Expected empty for empty input, got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_UpdateMappings(t *testing.T) {
|
||||
mapper := NewModelMapper(nil)
|
||||
|
||||
// Initially empty
|
||||
if len(mapper.GetMappings()) != 0 {
|
||||
t.Error("Expected 0 initial mappings")
|
||||
}
|
||||
|
||||
// Update with new mappings
|
||||
mapper.UpdateMappings([]config.AmpModelMapping{
|
||||
{From: "model-a", To: "model-b"},
|
||||
{From: "model-c", To: "model-d"},
|
||||
})
|
||||
|
||||
result := mapper.GetMappings()
|
||||
if len(result) != 2 {
|
||||
t.Errorf("Expected 2 mappings after update, got %d", len(result))
|
||||
}
|
||||
|
||||
// Update again should replace, not append
|
||||
mapper.UpdateMappings([]config.AmpModelMapping{
|
||||
{From: "model-x", To: "model-y"},
|
||||
})
|
||||
|
||||
result = mapper.GetMappings()
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected 1 mapping after second update, got %d", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_UpdateMappings_SkipsInvalid(t *testing.T) {
|
||||
mapper := NewModelMapper(nil)
|
||||
|
||||
mapper.UpdateMappings([]config.AmpModelMapping{
|
||||
{From: "", To: "model-b"}, // Invalid: empty from
|
||||
{From: "model-a", To: ""}, // Invalid: empty to
|
||||
{From: " ", To: "model-b"}, // Invalid: whitespace from
|
||||
{From: "model-c", To: "model-d"}, // Valid
|
||||
})
|
||||
|
||||
result := mapper.GetMappings()
|
||||
if len(result) != 1 {
|
||||
t.Errorf("Expected 1 valid mapping, got %d", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_GetMappings_ReturnsCopy(t *testing.T) {
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "model-a", To: "model-b"},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
// Get mappings and modify the returned map
|
||||
result := mapper.GetMappings()
|
||||
result["new-key"] = "new-value"
|
||||
|
||||
// Original should be unchanged
|
||||
original := mapper.GetMappings()
|
||||
if len(original) != 1 {
|
||||
t.Errorf("Expected original to have 1 mapping, got %d", len(original))
|
||||
}
|
||||
if _, exists := original["new-key"]; exists {
|
||||
t.Error("Original map was modified")
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_Regex_MatchBaseWithoutParens(t *testing.T) {
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("test-client-regex-1", "gemini", []*registry.ModelInfo{
|
||||
{ID: "gemini-2.5-pro", OwnedBy: "google", Type: "gemini"},
|
||||
})
|
||||
defer reg.UnregisterClient("test-client-regex-1")
|
||||
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "^gpt-5$", To: "gemini-2.5-pro", Regex: true},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
// Incoming model has reasoning suffix, regex matches base, suffix is preserved
|
||||
result := mapper.MapModel("gpt-5(high)")
|
||||
if result != "gemini-2.5-pro(high)" {
|
||||
t.Errorf("Expected gemini-2.5-pro(high), got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_Regex_ExactPrecedence(t *testing.T) {
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("test-client-regex-2", "claude", []*registry.ModelInfo{
|
||||
{ID: "claude-sonnet-4", OwnedBy: "anthropic", Type: "claude"},
|
||||
})
|
||||
reg.RegisterClient("test-client-regex-3", "gemini", []*registry.ModelInfo{
|
||||
{ID: "gemini-2.5-pro", OwnedBy: "google", Type: "gemini"},
|
||||
})
|
||||
defer reg.UnregisterClient("test-client-regex-2")
|
||||
defer reg.UnregisterClient("test-client-regex-3")
|
||||
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "gpt-5", To: "claude-sonnet-4"}, // exact
|
||||
{From: "^gpt-5.*$", To: "gemini-2.5-pro", Regex: true}, // regex
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
// Exact match should win over regex
|
||||
result := mapper.MapModel("gpt-5")
|
||||
if result != "claude-sonnet-4" {
|
||||
t.Errorf("Expected claude-sonnet-4, got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_Regex_InvalidPattern_Skipped(t *testing.T) {
|
||||
// Invalid regex should be skipped and not cause panic
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "(", To: "target", Regex: true},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
result := mapper.MapModel("anything")
|
||||
if result != "" {
|
||||
t.Errorf("Expected empty result due to invalid regex, got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_Regex_CaseInsensitive(t *testing.T) {
|
||||
reg := registry.GetGlobalRegistry()
|
||||
reg.RegisterClient("test-client-regex-4", "claude", []*registry.ModelInfo{
|
||||
{ID: "claude-sonnet-4", OwnedBy: "anthropic", Type: "claude"},
|
||||
})
|
||||
defer reg.UnregisterClient("test-client-regex-4")
|
||||
|
||||
mappings := []config.AmpModelMapping{
|
||||
{From: "^CLAUDE-OPUS-.*$", To: "claude-sonnet-4", Regex: true},
|
||||
}
|
||||
|
||||
mapper := NewModelMapper(mappings)
|
||||
|
||||
result := mapper.MapModel("claude-opus-4.5")
|
||||
if result != "claude-sonnet-4" {
|
||||
t.Errorf("Expected claude-sonnet-4, got %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelMapper_SuffixPreservation(t *testing.T) {
|
||||
reg := registry.GetGlobalRegistry()
|
||||
|
||||
// Register test models
|
||||
reg.RegisterClient("test-client-suffix", "gemini", []*registry.ModelInfo{
|
||||
{ID: "gemini-2.5-pro", OwnedBy: "google", Type: "gemini"},
|
||||
})
|
||||
reg.RegisterClient("test-client-suffix-2", "claude", []*registry.ModelInfo{
|
||||
{ID: "claude-sonnet-4", OwnedBy: "anthropic", Type: "claude"},
|
||||
})
|
||||
defer reg.UnregisterClient("test-client-suffix")
|
||||
defer reg.UnregisterClient("test-client-suffix-2")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
mappings []config.AmpModelMapping
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "numeric suffix preserved",
|
||||
mappings: []config.AmpModelMapping{{From: "g25p", To: "gemini-2.5-pro"}},
|
||||
input: "g25p(8192)",
|
||||
want: "gemini-2.5-pro(8192)",
|
||||
},
|
||||
{
|
||||
name: "level suffix preserved",
|
||||
mappings: []config.AmpModelMapping{{From: "g25p", To: "gemini-2.5-pro"}},
|
||||
input: "g25p(high)",
|
||||
want: "gemini-2.5-pro(high)",
|
||||
},
|
||||
{
|
||||
name: "no suffix unchanged",
|
||||
mappings: []config.AmpModelMapping{{From: "g25p", To: "gemini-2.5-pro"}},
|
||||
input: "g25p",
|
||||
want: "gemini-2.5-pro",
|
||||
},
|
||||
{
|
||||
name: "config suffix takes priority",
|
||||
mappings: []config.AmpModelMapping{{From: "alias", To: "gemini-2.5-pro(medium)"}},
|
||||
input: "alias(high)",
|
||||
want: "gemini-2.5-pro(medium)",
|
||||
},
|
||||
{
|
||||
name: "regex with suffix preserved",
|
||||
mappings: []config.AmpModelMapping{{From: "^g25.*", To: "gemini-2.5-pro", Regex: true}},
|
||||
input: "g25p(8192)",
|
||||
want: "gemini-2.5-pro(8192)",
|
||||
},
|
||||
{
|
||||
name: "auto suffix preserved",
|
||||
mappings: []config.AmpModelMapping{{From: "g25p", To: "gemini-2.5-pro"}},
|
||||
input: "g25p(auto)",
|
||||
want: "gemini-2.5-pro(auto)",
|
||||
},
|
||||
{
|
||||
name: "none suffix preserved",
|
||||
mappings: []config.AmpModelMapping{{From: "g25p", To: "gemini-2.5-pro"}},
|
||||
input: "g25p(none)",
|
||||
want: "gemini-2.5-pro(none)",
|
||||
},
|
||||
{
|
||||
name: "case insensitive base lookup with suffix",
|
||||
mappings: []config.AmpModelMapping{{From: "G25P", To: "gemini-2.5-pro"}},
|
||||
input: "g25p(high)",
|
||||
want: "gemini-2.5-pro(high)",
|
||||
},
|
||||
{
|
||||
name: "empty suffix filtered out",
|
||||
mappings: []config.AmpModelMapping{{From: "g25p", To: "gemini-2.5-pro"}},
|
||||
input: "g25p()",
|
||||
want: "gemini-2.5-pro",
|
||||
},
|
||||
{
|
||||
name: "incomplete suffix treated as no suffix",
|
||||
mappings: []config.AmpModelMapping{{From: "g25p(high", To: "gemini-2.5-pro"}},
|
||||
input: "g25p(high",
|
||||
want: "gemini-2.5-pro",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mapper := NewModelMapper(tt.mappings)
|
||||
got := mapper.MapModel(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("MapModel(%q) = %q, want %q", tt.input, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
235
internal/api/modules/amp/proxy.go
Normal file
235
internal/api/modules/amp/proxy.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func removeQueryValuesMatching(req *http.Request, key string, match string) {
|
||||
if req == nil || req.URL == nil || match == "" {
|
||||
return
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
values, ok := q[key]
|
||||
if !ok || len(values) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
kept := make([]string, 0, len(values))
|
||||
for _, v := range values {
|
||||
if v == match {
|
||||
continue
|
||||
}
|
||||
kept = append(kept, v)
|
||||
}
|
||||
|
||||
if len(kept) == 0 {
|
||||
q.Del(key)
|
||||
} else {
|
||||
q[key] = kept
|
||||
}
|
||||
req.URL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
// readCloser wraps a reader and forwards Close to a separate closer.
|
||||
// Used to restore peeked bytes while preserving upstream body Close behavior.
|
||||
type readCloser struct {
|
||||
r io.Reader
|
||||
c io.Closer
|
||||
}
|
||||
|
||||
func (rc *readCloser) Read(p []byte) (int, error) { return rc.r.Read(p) }
|
||||
func (rc *readCloser) Close() error { return rc.c.Close() }
|
||||
|
||||
// createReverseProxy creates a reverse proxy handler for Amp upstream
|
||||
// with automatic gzip decompression via ModifyResponse
|
||||
func createReverseProxy(upstreamURL string, secretSource SecretSource) (*httputil.ReverseProxy, error) {
|
||||
parsed, err := url.Parse(upstreamURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid amp upstream url: %w", err)
|
||||
}
|
||||
|
||||
proxy := httputil.NewSingleHostReverseProxy(parsed)
|
||||
originalDirector := proxy.Director
|
||||
|
||||
// Modify outgoing requests to inject API key and fix routing
|
||||
proxy.Director = func(req *http.Request) {
|
||||
originalDirector(req)
|
||||
req.Host = parsed.Host
|
||||
|
||||
// Remove client's Authorization header - it was only used for CLI Proxy API authentication
|
||||
// We will set our own Authorization using the configured upstream-api-key
|
||||
req.Header.Del("Authorization")
|
||||
req.Header.Del("X-Api-Key")
|
||||
req.Header.Del("X-Goog-Api-Key")
|
||||
|
||||
// Remove query-based credentials if they match the authenticated client API key.
|
||||
// This prevents leaking client auth material to the Amp upstream while avoiding
|
||||
// breaking unrelated upstream query parameters.
|
||||
clientKey := getClientAPIKeyFromContext(req.Context())
|
||||
removeQueryValuesMatching(req, "key", clientKey)
|
||||
removeQueryValuesMatching(req, "auth_token", clientKey)
|
||||
|
||||
// Preserve correlation headers for debugging
|
||||
if req.Header.Get("X-Request-ID") == "" {
|
||||
// Could generate one here if needed
|
||||
}
|
||||
|
||||
// Note: We do NOT filter Anthropic-Beta headers in the proxy path
|
||||
// Users going through ampcode.com proxy are paying for the service and should get all features
|
||||
// including 1M context window (context-1m-2025-08-07)
|
||||
|
||||
// Inject API key from secret source (only uses upstream-api-key from config)
|
||||
if key, err := secretSource.Get(req.Context()); err == nil && key != "" {
|
||||
req.Header.Set("X-Api-Key", key)
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", key))
|
||||
} else if err != nil {
|
||||
log.Warnf("amp secret source error (continuing without auth): %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Modify incoming responses to handle gzip without Content-Encoding
|
||||
// This addresses the same issue as inline handler gzip handling, but at the proxy level
|
||||
proxy.ModifyResponse = func(resp *http.Response) error {
|
||||
// Only process successful responses
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip if already marked as gzip (Content-Encoding set)
|
||||
if resp.Header.Get("Content-Encoding") != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip streaming responses (SSE, chunked)
|
||||
if isStreamingResponse(resp) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save reference to original upstream body for proper cleanup
|
||||
originalBody := resp.Body
|
||||
|
||||
// Peek at first 2 bytes to detect gzip magic bytes
|
||||
header := make([]byte, 2)
|
||||
n, _ := io.ReadFull(originalBody, header)
|
||||
|
||||
// Check for gzip magic bytes (0x1f 0x8b)
|
||||
// If n < 2, we didn't get enough bytes, so it's not gzip
|
||||
if n >= 2 && header[0] == 0x1f && header[1] == 0x8b {
|
||||
// It's gzip - read the rest of the body
|
||||
rest, err := io.ReadAll(originalBody)
|
||||
if err != nil {
|
||||
// Restore what we read and return original body (preserve Close behavior)
|
||||
resp.Body = &readCloser{
|
||||
r: io.MultiReader(bytes.NewReader(header[:n]), originalBody),
|
||||
c: originalBody,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconstruct complete gzipped data
|
||||
gzippedData := append(header[:n], rest...)
|
||||
|
||||
// Decompress
|
||||
gzipReader, err := gzip.NewReader(bytes.NewReader(gzippedData))
|
||||
if err != nil {
|
||||
log.Warnf("amp proxy: gzip header detected but decompress failed: %v", err)
|
||||
// Close original body and return in-memory copy
|
||||
_ = originalBody.Close()
|
||||
resp.Body = io.NopCloser(bytes.NewReader(gzippedData))
|
||||
return nil
|
||||
}
|
||||
|
||||
decompressed, err := io.ReadAll(gzipReader)
|
||||
_ = gzipReader.Close()
|
||||
if err != nil {
|
||||
log.Warnf("amp proxy: gzip decompress error: %v", err)
|
||||
// Close original body and return in-memory copy
|
||||
_ = originalBody.Close()
|
||||
resp.Body = io.NopCloser(bytes.NewReader(gzippedData))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close original body since we're replacing with in-memory decompressed content
|
||||
_ = originalBody.Close()
|
||||
|
||||
// Replace body with decompressed content
|
||||
resp.Body = io.NopCloser(bytes.NewReader(decompressed))
|
||||
resp.ContentLength = int64(len(decompressed))
|
||||
|
||||
// Update headers to reflect decompressed state
|
||||
resp.Header.Del("Content-Encoding") // No longer compressed
|
||||
resp.Header.Del("Content-Length") // Remove stale compressed length
|
||||
resp.Header.Set("Content-Length", strconv.FormatInt(resp.ContentLength, 10)) // Set decompressed length
|
||||
|
||||
log.Debugf("amp proxy: decompressed gzip response (%d -> %d bytes)", len(gzippedData), len(decompressed))
|
||||
} else {
|
||||
// Not gzip - restore peeked bytes while preserving Close behavior
|
||||
// Handle edge cases: n might be 0, 1, or 2 depending on EOF
|
||||
resp.Body = &readCloser{
|
||||
r: io.MultiReader(bytes.NewReader(header[:n]), originalBody),
|
||||
c: originalBody,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error handler for proxy failures
|
||||
proxy.ErrorHandler = func(rw http.ResponseWriter, req *http.Request, err error) {
|
||||
log.Errorf("amp upstream proxy error for %s %s: %v", req.Method, req.URL.Path, err)
|
||||
rw.Header().Set("Content-Type", "application/json")
|
||||
rw.WriteHeader(http.StatusBadGateway)
|
||||
_, _ = rw.Write([]byte(`{"error":"amp_upstream_proxy_error","message":"Failed to reach Amp upstream"}`))
|
||||
}
|
||||
|
||||
return proxy, nil
|
||||
}
|
||||
|
||||
// isStreamingResponse detects if the response is streaming (SSE only)
|
||||
// Note: We only treat text/event-stream as streaming. Chunked transfer encoding
|
||||
// is a transport-level detail and doesn't mean we can't decompress the full response.
|
||||
// Many JSON APIs use chunked encoding for normal responses.
|
||||
func isStreamingResponse(resp *http.Response) bool {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
|
||||
// Only Server-Sent Events are true streaming responses
|
||||
if strings.Contains(contentType, "text/event-stream") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// proxyHandler converts httputil.ReverseProxy to gin.HandlerFunc
|
||||
func proxyHandler(proxy *httputil.ReverseProxy) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
proxy.ServeHTTP(c.Writer, c.Request)
|
||||
}
|
||||
}
|
||||
|
||||
// filterBetaFeatures removes a specific beta feature from comma-separated list
|
||||
func filterBetaFeatures(header, featureToRemove string) string {
|
||||
features := strings.Split(header, ",")
|
||||
filtered := make([]string, 0, len(features))
|
||||
|
||||
for _, feature := range features {
|
||||
trimmed := strings.TrimSpace(feature)
|
||||
if trimmed != "" && trimmed != featureToRemove {
|
||||
filtered = append(filtered, trimmed)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(filtered, ",")
|
||||
}
|
||||
657
internal/api/modules/amp/proxy_test.go
Normal file
657
internal/api/modules/amp/proxy_test.go
Normal file
@@ -0,0 +1,657 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
)
|
||||
|
||||
// Helper: compress data with gzip
|
||||
func gzipBytes(b []byte) []byte {
|
||||
var buf bytes.Buffer
|
||||
zw := gzip.NewWriter(&buf)
|
||||
zw.Write(b)
|
||||
zw.Close()
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Helper: create a mock http.Response
|
||||
func mkResp(status int, hdr http.Header, body []byte) *http.Response {
|
||||
if hdr == nil {
|
||||
hdr = http.Header{}
|
||||
}
|
||||
return &http.Response{
|
||||
StatusCode: status,
|
||||
Header: hdr,
|
||||
Body: io.NopCloser(bytes.NewReader(body)),
|
||||
ContentLength: int64(len(body)),
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateReverseProxy_ValidURL(t *testing.T) {
|
||||
proxy, err := createReverseProxy("http://example.com", NewStaticSecretSource("key"))
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if proxy == nil {
|
||||
t.Fatal("expected proxy to be created")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateReverseProxy_InvalidURL(t *testing.T) {
|
||||
_, err := createReverseProxy("://invalid", NewStaticSecretSource("key"))
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid URL")
|
||||
}
|
||||
}
|
||||
|
||||
func TestModifyResponse_GzipScenarios(t *testing.T) {
|
||||
proxy, err := createReverseProxy("http://example.com", NewStaticSecretSource("k"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
goodJSON := []byte(`{"ok":true}`)
|
||||
good := gzipBytes(goodJSON)
|
||||
truncated := good[:10]
|
||||
corrupted := append([]byte{0x1f, 0x8b}, []byte("notgzip")...)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
header http.Header
|
||||
body []byte
|
||||
status int
|
||||
wantBody []byte
|
||||
wantCE string
|
||||
}{
|
||||
{
|
||||
name: "decompresses_valid_gzip_no_header",
|
||||
header: http.Header{},
|
||||
body: good,
|
||||
status: 200,
|
||||
wantBody: goodJSON,
|
||||
wantCE: "",
|
||||
},
|
||||
{
|
||||
name: "skips_when_ce_present",
|
||||
header: http.Header{"Content-Encoding": []string{"gzip"}},
|
||||
body: good,
|
||||
status: 200,
|
||||
wantBody: good,
|
||||
wantCE: "gzip",
|
||||
},
|
||||
{
|
||||
name: "passes_truncated_unchanged",
|
||||
header: http.Header{},
|
||||
body: truncated,
|
||||
status: 200,
|
||||
wantBody: truncated,
|
||||
wantCE: "",
|
||||
},
|
||||
{
|
||||
name: "passes_corrupted_unchanged",
|
||||
header: http.Header{},
|
||||
body: corrupted,
|
||||
status: 200,
|
||||
wantBody: corrupted,
|
||||
wantCE: "",
|
||||
},
|
||||
{
|
||||
name: "non_gzip_unchanged",
|
||||
header: http.Header{},
|
||||
body: []byte("plain"),
|
||||
status: 200,
|
||||
wantBody: []byte("plain"),
|
||||
wantCE: "",
|
||||
},
|
||||
{
|
||||
name: "empty_body",
|
||||
header: http.Header{},
|
||||
body: []byte{},
|
||||
status: 200,
|
||||
wantBody: []byte{},
|
||||
wantCE: "",
|
||||
},
|
||||
{
|
||||
name: "single_byte_body",
|
||||
header: http.Header{},
|
||||
body: []byte{0x1f},
|
||||
status: 200,
|
||||
wantBody: []byte{0x1f},
|
||||
wantCE: "",
|
||||
},
|
||||
{
|
||||
name: "skips_non_2xx_status",
|
||||
header: http.Header{},
|
||||
body: good,
|
||||
status: 404,
|
||||
wantBody: good,
|
||||
wantCE: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resp := mkResp(tc.status, tc.header, tc.body)
|
||||
if err := proxy.ModifyResponse(resp); err != nil {
|
||||
t.Fatalf("ModifyResponse error: %v", err)
|
||||
}
|
||||
got, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(got, tc.wantBody) {
|
||||
t.Fatalf("body mismatch:\nwant: %q\ngot: %q", tc.wantBody, got)
|
||||
}
|
||||
if ce := resp.Header.Get("Content-Encoding"); ce != tc.wantCE {
|
||||
t.Fatalf("Content-Encoding: want %q, got %q", tc.wantCE, ce)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestModifyResponse_UpdatesContentLengthHeader(t *testing.T) {
|
||||
proxy, err := createReverseProxy("http://example.com", NewStaticSecretSource("k"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
goodJSON := []byte(`{"message":"test response"}`)
|
||||
gzipped := gzipBytes(goodJSON)
|
||||
|
||||
// Simulate upstream response with gzip body AND Content-Length header
|
||||
// (this is the scenario the bot flagged - stale Content-Length after decompression)
|
||||
resp := mkResp(200, http.Header{
|
||||
"Content-Length": []string{fmt.Sprintf("%d", len(gzipped))}, // Compressed size
|
||||
}, gzipped)
|
||||
|
||||
if err := proxy.ModifyResponse(resp); err != nil {
|
||||
t.Fatalf("ModifyResponse error: %v", err)
|
||||
}
|
||||
|
||||
// Verify body is decompressed
|
||||
got, _ := io.ReadAll(resp.Body)
|
||||
if !bytes.Equal(got, goodJSON) {
|
||||
t.Fatalf("body should be decompressed, got: %q, want: %q", got, goodJSON)
|
||||
}
|
||||
|
||||
// Verify Content-Length header is updated to decompressed size
|
||||
wantCL := fmt.Sprintf("%d", len(goodJSON))
|
||||
gotCL := resp.Header.Get("Content-Length")
|
||||
if gotCL != wantCL {
|
||||
t.Fatalf("Content-Length header mismatch: want %q (decompressed), got %q", wantCL, gotCL)
|
||||
}
|
||||
|
||||
// Verify struct field also matches
|
||||
if resp.ContentLength != int64(len(goodJSON)) {
|
||||
t.Fatalf("resp.ContentLength mismatch: want %d, got %d", len(goodJSON), resp.ContentLength)
|
||||
}
|
||||
}
|
||||
|
||||
func TestModifyResponse_SkipsStreamingResponses(t *testing.T) {
|
||||
proxy, err := createReverseProxy("http://example.com", NewStaticSecretSource("k"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
goodJSON := []byte(`{"ok":true}`)
|
||||
gzipped := gzipBytes(goodJSON)
|
||||
|
||||
t.Run("sse_skips_decompression", func(t *testing.T) {
|
||||
resp := mkResp(200, http.Header{"Content-Type": []string{"text/event-stream"}}, gzipped)
|
||||
if err := proxy.ModifyResponse(resp); err != nil {
|
||||
t.Fatalf("ModifyResponse error: %v", err)
|
||||
}
|
||||
// SSE should NOT be decompressed
|
||||
got, _ := io.ReadAll(resp.Body)
|
||||
if !bytes.Equal(got, gzipped) {
|
||||
t.Fatal("SSE response should not be decompressed")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestModifyResponse_DecompressesChunkedJSON(t *testing.T) {
|
||||
proxy, err := createReverseProxy("http://example.com", NewStaticSecretSource("k"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
goodJSON := []byte(`{"ok":true}`)
|
||||
gzipped := gzipBytes(goodJSON)
|
||||
|
||||
t.Run("chunked_json_decompresses", func(t *testing.T) {
|
||||
// Chunked JSON responses (like thread APIs) should be decompressed
|
||||
resp := mkResp(200, http.Header{"Transfer-Encoding": []string{"chunked"}}, gzipped)
|
||||
if err := proxy.ModifyResponse(resp); err != nil {
|
||||
t.Fatalf("ModifyResponse error: %v", err)
|
||||
}
|
||||
// Should decompress because it's not SSE
|
||||
got, _ := io.ReadAll(resp.Body)
|
||||
if !bytes.Equal(got, goodJSON) {
|
||||
t.Fatalf("chunked JSON should be decompressed, got: %q, want: %q", got, goodJSON)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReverseProxy_InjectsHeaders(t *testing.T) {
|
||||
gotHeaders := make(chan http.Header, 1)
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotHeaders <- r.Header.Clone()
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte(`ok`))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy, err := createReverseProxy(upstream.URL, NewStaticSecretSource("secret"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
proxy.ServeHTTP(w, r)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
res, err := http.Get(srv.URL + "/test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
hdr := <-gotHeaders
|
||||
if hdr.Get("X-Api-Key") != "secret" {
|
||||
t.Fatalf("X-Api-Key missing or wrong, got: %q", hdr.Get("X-Api-Key"))
|
||||
}
|
||||
if hdr.Get("Authorization") != "Bearer secret" {
|
||||
t.Fatalf("Authorization missing or wrong, got: %q", hdr.Get("Authorization"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReverseProxy_EmptySecret(t *testing.T) {
|
||||
gotHeaders := make(chan http.Header, 1)
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotHeaders <- r.Header.Clone()
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte(`ok`))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy, err := createReverseProxy(upstream.URL, NewStaticSecretSource(""))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
proxy.ServeHTTP(w, r)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
res, err := http.Get(srv.URL + "/test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
hdr := <-gotHeaders
|
||||
// Should NOT inject headers when secret is empty
|
||||
if hdr.Get("X-Api-Key") != "" {
|
||||
t.Fatalf("X-Api-Key should not be set, got: %q", hdr.Get("X-Api-Key"))
|
||||
}
|
||||
if authVal := hdr.Get("Authorization"); authVal != "" && authVal != "Bearer " {
|
||||
t.Fatalf("Authorization should not be set, got: %q", authVal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReverseProxy_StripsClientCredentialsFromHeadersAndQuery(t *testing.T) {
|
||||
type captured struct {
|
||||
headers http.Header
|
||||
query string
|
||||
}
|
||||
got := make(chan captured, 1)
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
got <- captured{headers: r.Header.Clone(), query: r.URL.RawQuery}
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte(`ok`))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy, err := createReverseProxy(upstream.URL, NewStaticSecretSource("upstream"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Simulate clientAPIKeyMiddleware injection (per-request)
|
||||
ctx := context.WithValue(r.Context(), clientAPIKeyContextKey{}, "client-key")
|
||||
proxy.ServeHTTP(w, r.WithContext(ctx))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, srv.URL+"/test?key=client-key&key=keep&auth_token=client-key&foo=bar", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer client-key")
|
||||
req.Header.Set("X-Api-Key", "client-key")
|
||||
req.Header.Set("X-Goog-Api-Key", "client-key")
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
c := <-got
|
||||
|
||||
// These are client-provided credentials and must not reach the upstream.
|
||||
if v := c.headers.Get("X-Goog-Api-Key"); v != "" {
|
||||
t.Fatalf("X-Goog-Api-Key should be stripped, got: %q", v)
|
||||
}
|
||||
|
||||
// We inject upstream Authorization/X-Api-Key, so the client auth must not survive.
|
||||
if v := c.headers.Get("Authorization"); v != "Bearer upstream" {
|
||||
t.Fatalf("Authorization should be upstream-injected, got: %q", v)
|
||||
}
|
||||
if v := c.headers.Get("X-Api-Key"); v != "upstream" {
|
||||
t.Fatalf("X-Api-Key should be upstream-injected, got: %q", v)
|
||||
}
|
||||
|
||||
// Query-based credentials should be stripped only when they match the authenticated client key.
|
||||
// Should keep unrelated values and parameters.
|
||||
if strings.Contains(c.query, "auth_token=client-key") || strings.Contains(c.query, "key=client-key") {
|
||||
t.Fatalf("query credentials should be stripped, got raw query: %q", c.query)
|
||||
}
|
||||
if !strings.Contains(c.query, "key=keep") || !strings.Contains(c.query, "foo=bar") {
|
||||
t.Fatalf("expected query to keep non-credential params, got raw query: %q", c.query)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReverseProxy_InjectsMappedSecret_FromRequestContext(t *testing.T) {
|
||||
gotHeaders := make(chan http.Header, 1)
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotHeaders <- r.Header.Clone()
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte(`ok`))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
defaultSource := NewStaticSecretSource("default")
|
||||
mapped := NewMappedSecretSource(defaultSource)
|
||||
mapped.UpdateMappings([]config.AmpUpstreamAPIKeyEntry{
|
||||
{
|
||||
UpstreamAPIKey: "u1",
|
||||
APIKeys: []string{"k1"},
|
||||
},
|
||||
})
|
||||
|
||||
proxy, err := createReverseProxy(upstream.URL, mapped)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Simulate clientAPIKeyMiddleware injection (per-request)
|
||||
ctx := context.WithValue(r.Context(), clientAPIKeyContextKey{}, "k1")
|
||||
proxy.ServeHTTP(w, r.WithContext(ctx))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
res, err := http.Get(srv.URL + "/test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
hdr := <-gotHeaders
|
||||
if hdr.Get("X-Api-Key") != "u1" {
|
||||
t.Fatalf("X-Api-Key missing or wrong, got: %q", hdr.Get("X-Api-Key"))
|
||||
}
|
||||
if hdr.Get("Authorization") != "Bearer u1" {
|
||||
t.Fatalf("Authorization missing or wrong, got: %q", hdr.Get("Authorization"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReverseProxy_MappedSecret_FallsBackToDefault(t *testing.T) {
|
||||
gotHeaders := make(chan http.Header, 1)
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotHeaders <- r.Header.Clone()
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte(`ok`))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
defaultSource := NewStaticSecretSource("default")
|
||||
mapped := NewMappedSecretSource(defaultSource)
|
||||
mapped.UpdateMappings([]config.AmpUpstreamAPIKeyEntry{
|
||||
{
|
||||
UpstreamAPIKey: "u1",
|
||||
APIKeys: []string{"k1"},
|
||||
},
|
||||
})
|
||||
|
||||
proxy, err := createReverseProxy(upstream.URL, mapped)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.WithValue(r.Context(), clientAPIKeyContextKey{}, "k2")
|
||||
proxy.ServeHTTP(w, r.WithContext(ctx))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
res, err := http.Get(srv.URL + "/test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
hdr := <-gotHeaders
|
||||
if hdr.Get("X-Api-Key") != "default" {
|
||||
t.Fatalf("X-Api-Key fallback missing or wrong, got: %q", hdr.Get("X-Api-Key"))
|
||||
}
|
||||
if hdr.Get("Authorization") != "Bearer default" {
|
||||
t.Fatalf("Authorization fallback missing or wrong, got: %q", hdr.Get("Authorization"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReverseProxy_ErrorHandler(t *testing.T) {
|
||||
// Point proxy to a non-routable address to trigger error
|
||||
proxy, err := createReverseProxy("http://127.0.0.1:1", NewStaticSecretSource(""))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
proxy.ServeHTTP(w, r)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
res, err := http.Get(srv.URL + "/any")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusBadGateway {
|
||||
t.Fatalf("want 502, got %d", res.StatusCode)
|
||||
}
|
||||
if !bytes.Contains(body, []byte(`"amp_upstream_proxy_error"`)) {
|
||||
t.Fatalf("unexpected body: %s", body)
|
||||
}
|
||||
if ct := res.Header.Get("Content-Type"); ct != "application/json" {
|
||||
t.Fatalf("content-type: want application/json, got %s", ct)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReverseProxy_FullRoundTrip_Gzip(t *testing.T) {
|
||||
// Upstream returns gzipped JSON without Content-Encoding header
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
w.Write(gzipBytes([]byte(`{"upstream":"ok"}`)))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy, err := createReverseProxy(upstream.URL, NewStaticSecretSource("key"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
proxy.ServeHTTP(w, r)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
res, err := http.Get(srv.URL + "/test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
|
||||
expected := []byte(`{"upstream":"ok"}`)
|
||||
if !bytes.Equal(body, expected) {
|
||||
t.Fatalf("want decompressed JSON, got: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReverseProxy_FullRoundTrip_PlainJSON(t *testing.T) {
|
||||
// Upstream returns plain JSON
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte(`{"plain":"json"}`))
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
proxy, err := createReverseProxy(upstream.URL, NewStaticSecretSource("key"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
proxy.ServeHTTP(w, r)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
res, err := http.Get(srv.URL + "/test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
|
||||
expected := []byte(`{"plain":"json"}`)
|
||||
if !bytes.Equal(body, expected) {
|
||||
t.Fatalf("want plain JSON unchanged, got: %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsStreamingResponse(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
header http.Header
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "sse",
|
||||
header: http.Header{"Content-Type": []string{"text/event-stream"}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "chunked_not_streaming",
|
||||
header: http.Header{"Transfer-Encoding": []string{"chunked"}},
|
||||
want: false, // Chunked is transport-level, not streaming
|
||||
},
|
||||
{
|
||||
name: "normal_json",
|
||||
header: http.Header{"Content-Type": []string{"application/json"}},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
header: http.Header{},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resp := &http.Response{Header: tc.header}
|
||||
got := isStreamingResponse(resp)
|
||||
if got != tc.want {
|
||||
t.Fatalf("want %v, got %v", tc.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterBetaFeatures(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
header string
|
||||
featureToRemove string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Remove context-1m from middle",
|
||||
header: "fine-grained-tool-streaming-2025-05-14,context-1m-2025-08-07,oauth-2025-04-20",
|
||||
featureToRemove: "context-1m-2025-08-07",
|
||||
expected: "fine-grained-tool-streaming-2025-05-14,oauth-2025-04-20",
|
||||
},
|
||||
{
|
||||
name: "Remove context-1m from start",
|
||||
header: "context-1m-2025-08-07,fine-grained-tool-streaming-2025-05-14",
|
||||
featureToRemove: "context-1m-2025-08-07",
|
||||
expected: "fine-grained-tool-streaming-2025-05-14",
|
||||
},
|
||||
{
|
||||
name: "Remove context-1m from end",
|
||||
header: "fine-grained-tool-streaming-2025-05-14,context-1m-2025-08-07",
|
||||
featureToRemove: "context-1m-2025-08-07",
|
||||
expected: "fine-grained-tool-streaming-2025-05-14",
|
||||
},
|
||||
{
|
||||
name: "Feature not present",
|
||||
header: "fine-grained-tool-streaming-2025-05-14,oauth-2025-04-20",
|
||||
featureToRemove: "context-1m-2025-08-07",
|
||||
expected: "fine-grained-tool-streaming-2025-05-14,oauth-2025-04-20",
|
||||
},
|
||||
{
|
||||
name: "Only feature to remove",
|
||||
header: "context-1m-2025-08-07",
|
||||
featureToRemove: "context-1m-2025-08-07",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Empty header",
|
||||
header: "",
|
||||
featureToRemove: "context-1m-2025-08-07",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Header with spaces",
|
||||
header: "fine-grained-tool-streaming-2025-05-14, context-1m-2025-08-07 , oauth-2025-04-20",
|
||||
featureToRemove: "context-1m-2025-08-07",
|
||||
expected: "fine-grained-tool-streaming-2025-05-14,oauth-2025-04-20",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := filterBetaFeatures(tt.header, tt.featureToRemove)
|
||||
if result != tt.expected {
|
||||
t.Errorf("filterBetaFeatures() = %q, want %q", result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
127
internal/api/modules/amp/response_rewriter.go
Normal file
127
internal/api/modules/amp/response_rewriter.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
)
|
||||
|
||||
// ResponseRewriter wraps a gin.ResponseWriter to intercept and modify the response body
|
||||
// It's used to rewrite model names in responses when model mapping is used
|
||||
type ResponseRewriter struct {
|
||||
gin.ResponseWriter
|
||||
body *bytes.Buffer
|
||||
originalModel string
|
||||
isStreaming bool
|
||||
}
|
||||
|
||||
// NewResponseRewriter creates a new response rewriter for model name substitution
|
||||
func NewResponseRewriter(w gin.ResponseWriter, originalModel string) *ResponseRewriter {
|
||||
return &ResponseRewriter{
|
||||
ResponseWriter: w,
|
||||
body: &bytes.Buffer{},
|
||||
originalModel: originalModel,
|
||||
}
|
||||
}
|
||||
|
||||
// Write intercepts response writes and buffers them for model name replacement
|
||||
func (rw *ResponseRewriter) Write(data []byte) (int, error) {
|
||||
// Detect streaming on first write
|
||||
if rw.body.Len() == 0 && !rw.isStreaming {
|
||||
contentType := rw.Header().Get("Content-Type")
|
||||
rw.isStreaming = strings.Contains(contentType, "text/event-stream") ||
|
||||
strings.Contains(contentType, "stream")
|
||||
}
|
||||
|
||||
if rw.isStreaming {
|
||||
n, err := rw.ResponseWriter.Write(rw.rewriteStreamChunk(data))
|
||||
if err == nil {
|
||||
if flusher, ok := rw.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
return rw.body.Write(data)
|
||||
}
|
||||
|
||||
// Flush writes the buffered response with model names rewritten
|
||||
func (rw *ResponseRewriter) Flush() {
|
||||
if rw.isStreaming {
|
||||
if flusher, ok := rw.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
return
|
||||
}
|
||||
if rw.body.Len() > 0 {
|
||||
if _, err := rw.ResponseWriter.Write(rw.rewriteModelInResponse(rw.body.Bytes())); err != nil {
|
||||
log.Warnf("amp response rewriter: failed to write rewritten response: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// modelFieldPaths lists all JSON paths where model name may appear
|
||||
var modelFieldPaths = []string{"model", "modelVersion", "response.modelVersion", "message.model"}
|
||||
|
||||
// rewriteModelInResponse replaces all occurrences of the mapped model with the original model in JSON
|
||||
// It also suppresses "thinking" blocks if "tool_use" is present to ensure Amp client compatibility
|
||||
func (rw *ResponseRewriter) rewriteModelInResponse(data []byte) []byte {
|
||||
// 1. Amp Compatibility: Suppress thinking blocks if tool use is detected
|
||||
// The Amp client struggles when both thinking and tool_use blocks are present
|
||||
if gjson.GetBytes(data, `content.#(type=="tool_use")`).Exists() {
|
||||
filtered := gjson.GetBytes(data, `content.#(type!="thinking")#`)
|
||||
if filtered.Exists() {
|
||||
originalCount := gjson.GetBytes(data, "content.#").Int()
|
||||
filteredCount := filtered.Get("#").Int()
|
||||
|
||||
if originalCount > filteredCount {
|
||||
var err error
|
||||
data, err = sjson.SetBytes(data, "content", filtered.Value())
|
||||
if err != nil {
|
||||
log.Warnf("Amp ResponseRewriter: failed to suppress thinking blocks: %v", err)
|
||||
} else {
|
||||
log.Debugf("Amp ResponseRewriter: Suppressed %d thinking blocks due to tool usage", originalCount-filteredCount)
|
||||
// Log the result for verification
|
||||
log.Debugf("Amp ResponseRewriter: Resulting content: %s", gjson.GetBytes(data, "content").String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if rw.originalModel == "" {
|
||||
return data
|
||||
}
|
||||
for _, path := range modelFieldPaths {
|
||||
if gjson.GetBytes(data, path).Exists() {
|
||||
data, _ = sjson.SetBytes(data, path, rw.originalModel)
|
||||
}
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// rewriteStreamChunk rewrites model names in SSE stream chunks
|
||||
func (rw *ResponseRewriter) rewriteStreamChunk(chunk []byte) []byte {
|
||||
if rw.originalModel == "" {
|
||||
return chunk
|
||||
}
|
||||
|
||||
// SSE format: "data: {json}\n\n"
|
||||
lines := bytes.Split(chunk, []byte("\n"))
|
||||
for i, line := range lines {
|
||||
if bytes.HasPrefix(line, []byte("data: ")) {
|
||||
jsonData := bytes.TrimPrefix(line, []byte("data: "))
|
||||
if len(jsonData) > 0 && jsonData[0] == '{' {
|
||||
// Rewrite JSON in the data line
|
||||
rewritten := rw.rewriteModelInResponse(jsonData)
|
||||
lines[i] = append([]byte("data: "), rewritten...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bytes.Join(lines, []byte("\n"))
|
||||
}
|
||||
371
internal/api/modules/amp/routes.go
Normal file
371
internal/api/modules/amp/routes.go
Normal file
@@ -0,0 +1,371 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/routing"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/claude"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/openai"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// clientAPIKeyContextKey is the context key used to pass the client API key
|
||||
// from gin.Context to the request context for SecretSource lookup.
|
||||
type clientAPIKeyContextKey struct{}
|
||||
|
||||
// clientAPIKeyMiddleware injects the authenticated client API key from gin.Context["apiKey"]
|
||||
// into the request context so that SecretSource can look it up for per-client upstream routing.
|
||||
func clientAPIKeyMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Extract the client API key from gin context (set by AuthMiddleware)
|
||||
if apiKey, exists := c.Get("apiKey"); exists {
|
||||
if keyStr, ok := apiKey.(string); ok && keyStr != "" {
|
||||
// Inject into request context for SecretSource.Get(ctx) to read
|
||||
ctx := context.WithValue(c.Request.Context(), clientAPIKeyContextKey{}, keyStr)
|
||||
c.Request = c.Request.WithContext(ctx)
|
||||
}
|
||||
}
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// getClientAPIKeyFromContext retrieves the client API key from request context.
|
||||
// Returns empty string if not present.
|
||||
func getClientAPIKeyFromContext(ctx context.Context) string {
|
||||
if val := ctx.Value(clientAPIKeyContextKey{}); val != nil {
|
||||
if keyStr, ok := val.(string); ok {
|
||||
return keyStr
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// localhostOnlyMiddleware returns a middleware that dynamically checks the module's
|
||||
// localhost restriction setting. This allows hot-reload of the restriction without restarting.
|
||||
func (m *AmpModule) localhostOnlyMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Check current setting (hot-reloadable)
|
||||
if !m.IsRestrictedToLocalhost() {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// Use actual TCP connection address (RemoteAddr) to prevent header spoofing
|
||||
// This cannot be forged by X-Forwarded-For or other client-controlled headers
|
||||
remoteAddr := c.Request.RemoteAddr
|
||||
|
||||
// RemoteAddr format is "IP:port" or "[IPv6]:port", extract just the IP
|
||||
host, _, err := net.SplitHostPort(remoteAddr)
|
||||
if err != nil {
|
||||
// Try parsing as raw IP (shouldn't happen with standard HTTP, but be defensive)
|
||||
host = remoteAddr
|
||||
}
|
||||
|
||||
// Parse the IP to handle both IPv4 and IPv6
|
||||
ip := net.ParseIP(host)
|
||||
if ip == nil {
|
||||
log.Warnf("amp management: invalid RemoteAddr %s, denying access", remoteAddr)
|
||||
c.AbortWithStatusJSON(403, gin.H{
|
||||
"error": "Access denied: management routes restricted to localhost",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Check if IP is loopback (127.0.0.1 or ::1)
|
||||
if !ip.IsLoopback() {
|
||||
log.Warnf("amp management: non-localhost connection from %s attempted access, denying", remoteAddr)
|
||||
c.AbortWithStatusJSON(403, gin.H{
|
||||
"error": "Access denied: management routes restricted to localhost",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// noCORSMiddleware disables CORS for management routes to prevent browser-based attacks.
|
||||
// This overwrites any global CORS headers set by the server.
|
||||
func noCORSMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Remove CORS headers to prevent cross-origin access from browsers
|
||||
c.Header("Access-Control-Allow-Origin", "")
|
||||
c.Header("Access-Control-Allow-Methods", "")
|
||||
c.Header("Access-Control-Allow-Headers", "")
|
||||
c.Header("Access-Control-Allow-Credentials", "")
|
||||
|
||||
// For OPTIONS preflight, deny with 403
|
||||
if c.Request.Method == "OPTIONS" {
|
||||
c.AbortWithStatus(403)
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// managementAvailabilityMiddleware short-circuits management routes when the upstream
|
||||
// proxy is disabled, preventing noisy localhost warnings and accidental exposure.
|
||||
func (m *AmpModule) managementAvailabilityMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if m.getProxy() == nil {
|
||||
logging.SkipGinRequestLogging(c)
|
||||
c.AbortWithStatusJSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": "amp upstream proxy not available",
|
||||
})
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// wrapManagementAuth skips auth for selected management paths while keeping authentication elsewhere.
|
||||
func wrapManagementAuth(auth gin.HandlerFunc, prefixes ...string) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
path := c.Request.URL.Path
|
||||
for _, prefix := range prefixes {
|
||||
if strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '/') {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
}
|
||||
auth(c)
|
||||
}
|
||||
}
|
||||
|
||||
// registerManagementRoutes registers Amp management proxy routes
|
||||
// These routes proxy through to the Amp control plane for OAuth, user management, etc.
|
||||
// Uses dynamic middleware and proxy getter for hot-reload support.
|
||||
// The auth middleware validates Authorization header against configured API keys.
|
||||
func (m *AmpModule) registerManagementRoutes(engine *gin.Engine, baseHandler *handlers.BaseAPIHandler, auth gin.HandlerFunc) {
|
||||
ampAPI := engine.Group("/api")
|
||||
|
||||
// Always disable CORS for management routes to prevent browser-based attacks
|
||||
ampAPI.Use(m.managementAvailabilityMiddleware(), noCORSMiddleware())
|
||||
|
||||
// Apply dynamic localhost-only restriction (hot-reloadable via m.IsRestrictedToLocalhost())
|
||||
ampAPI.Use(m.localhostOnlyMiddleware())
|
||||
|
||||
// Apply authentication middleware - requires valid API key in Authorization header
|
||||
var authWithBypass gin.HandlerFunc
|
||||
if auth != nil {
|
||||
ampAPI.Use(auth)
|
||||
authWithBypass = wrapManagementAuth(auth, "/threads", "/auth", "/docs", "/settings")
|
||||
}
|
||||
|
||||
// Inject client API key into request context for per-client upstream routing
|
||||
ampAPI.Use(clientAPIKeyMiddleware())
|
||||
|
||||
// Dynamic proxy handler that uses m.getProxy() for hot-reload support
|
||||
proxyHandler := func(c *gin.Context) {
|
||||
// Swallow ErrAbortHandler panics from ReverseProxy copyResponse to avoid noisy stack traces
|
||||
defer func() {
|
||||
if rec := recover(); rec != nil {
|
||||
if err, ok := rec.(error); ok && errors.Is(err, http.ErrAbortHandler) {
|
||||
// Upstream already wrote the status (often 404) before the client/stream ended.
|
||||
return
|
||||
}
|
||||
panic(rec)
|
||||
}
|
||||
}()
|
||||
|
||||
proxy := m.getProxy()
|
||||
if proxy == nil {
|
||||
c.JSON(503, gin.H{"error": "amp upstream proxy not available"})
|
||||
return
|
||||
}
|
||||
proxy.ServeHTTP(c.Writer, c.Request)
|
||||
}
|
||||
|
||||
// Management routes - these are proxied directly to Amp upstream
|
||||
ampAPI.Any("/internal", proxyHandler)
|
||||
ampAPI.Any("/internal/*path", proxyHandler)
|
||||
ampAPI.Any("/user", proxyHandler)
|
||||
ampAPI.Any("/user/*path", proxyHandler)
|
||||
ampAPI.Any("/auth", proxyHandler)
|
||||
ampAPI.Any("/auth/*path", proxyHandler)
|
||||
ampAPI.Any("/meta", proxyHandler)
|
||||
ampAPI.Any("/meta/*path", proxyHandler)
|
||||
ampAPI.Any("/ads", proxyHandler)
|
||||
ampAPI.Any("/telemetry", proxyHandler)
|
||||
ampAPI.Any("/telemetry/*path", proxyHandler)
|
||||
ampAPI.Any("/threads", proxyHandler)
|
||||
ampAPI.Any("/threads/*path", proxyHandler)
|
||||
ampAPI.Any("/otel", proxyHandler)
|
||||
ampAPI.Any("/otel/*path", proxyHandler)
|
||||
ampAPI.Any("/tab", proxyHandler)
|
||||
ampAPI.Any("/tab/*path", proxyHandler)
|
||||
|
||||
// Root-level routes that AMP CLI expects without /api prefix
|
||||
// These need the same security middleware as the /api/* routes (dynamic for hot-reload)
|
||||
rootMiddleware := []gin.HandlerFunc{m.managementAvailabilityMiddleware(), noCORSMiddleware(), m.localhostOnlyMiddleware()}
|
||||
if authWithBypass != nil {
|
||||
rootMiddleware = append(rootMiddleware, authWithBypass)
|
||||
}
|
||||
// Add clientAPIKeyMiddleware after auth for per-client upstream routing
|
||||
rootMiddleware = append(rootMiddleware, clientAPIKeyMiddleware())
|
||||
engine.GET("/threads", append(rootMiddleware, proxyHandler)...)
|
||||
engine.GET("/threads/*path", append(rootMiddleware, proxyHandler)...)
|
||||
engine.GET("/docs", append(rootMiddleware, proxyHandler)...)
|
||||
engine.GET("/docs/*path", append(rootMiddleware, proxyHandler)...)
|
||||
engine.GET("/settings", append(rootMiddleware, proxyHandler)...)
|
||||
engine.GET("/settings/*path", append(rootMiddleware, proxyHandler)...)
|
||||
|
||||
engine.GET("/threads.rss", append(rootMiddleware, proxyHandler)...)
|
||||
engine.GET("/news.rss", append(rootMiddleware, proxyHandler)...)
|
||||
|
||||
// Root-level auth routes for CLI login flow
|
||||
// Amp uses multiple auth routes: /auth/cli-login, /auth/callback, /auth/sign-in, /auth/logout
|
||||
// We proxy all /auth/* to support the complete OAuth flow
|
||||
engine.Any("/auth", append(rootMiddleware, proxyHandler)...)
|
||||
engine.Any("/auth/*path", append(rootMiddleware, proxyHandler)...)
|
||||
|
||||
// Google v1beta1 passthrough with OAuth fallback
|
||||
// AMP CLI uses non-standard paths like /publishers/google/models/...
|
||||
// We bridge these to our standard Gemini handler to enable local OAuth.
|
||||
// If no local OAuth is available, falls back to ampcode.com proxy.
|
||||
geminiHandlers := gemini.NewGeminiAPIHandler(baseHandler)
|
||||
geminiBridge := createGeminiBridgeHandler(geminiHandlers.GeminiHandler)
|
||||
|
||||
// T-025: Migrated Gemini v1beta1 bridge to use ModelRoutingWrapper
|
||||
// Create a dedicated routing wrapper for the Gemini bridge
|
||||
geminiBridgeWrapper := m.createModelRoutingWrapper()
|
||||
geminiV1Beta1Handler := geminiBridgeWrapper.Wrap(geminiBridge)
|
||||
|
||||
// Route POST model calls through Gemini bridge with ModelRoutingWrapper.
|
||||
// ModelRoutingWrapper checks provider -> mapping -> proxy fallback automatically.
|
||||
// All other methods (e.g., GET model listing) always proxy to upstream to preserve Amp CLI behavior.
|
||||
ampAPI.Any("/provider/google/v1beta1/*path", func(c *gin.Context) {
|
||||
if c.Request.Method == "POST" {
|
||||
if path := c.Param("path"); strings.Contains(path, "/models/") {
|
||||
// POST with /models/ path -> use Gemini bridge with unified routing wrapper
|
||||
// ModelRoutingWrapper will check provider/mapping and proxy if needed
|
||||
geminiV1Beta1Handler(c)
|
||||
return
|
||||
}
|
||||
}
|
||||
// Non-POST or no local provider available -> proxy upstream
|
||||
proxyHandler(c)
|
||||
})
|
||||
}
|
||||
|
||||
// createModelRoutingWrapper creates a new ModelRoutingWrapper for unified routing.
|
||||
// This is used for testing the new routing implementation (T-021 onwards).
|
||||
func (m *AmpModule) createModelRoutingWrapper() *routing.ModelRoutingWrapper {
|
||||
// Create a registry - in production this would be populated with actual providers
|
||||
registry := routing.NewRegistry()
|
||||
|
||||
// Create a minimal config with just AmpCode settings
|
||||
// The Router only needs AmpCode.ModelMappings and OAuthModelAlias
|
||||
cfg := &config.Config{
|
||||
AmpCode: func() config.AmpCode {
|
||||
if m.modelMapper != nil {
|
||||
return config.AmpCode{
|
||||
ModelMappings: m.modelMapper.GetMappingsAsConfig(),
|
||||
}
|
||||
}
|
||||
return config.AmpCode{}
|
||||
}(),
|
||||
}
|
||||
|
||||
// Create router with registry and config
|
||||
router := routing.NewRouter(registry, cfg)
|
||||
|
||||
// Create wrapper with proxy function
|
||||
proxyFunc := func(c *gin.Context) {
|
||||
proxy := m.getProxy()
|
||||
if proxy != nil {
|
||||
proxy.ServeHTTP(c.Writer, c.Request)
|
||||
} else {
|
||||
c.JSON(503, gin.H{"error": "amp upstream proxy not available"})
|
||||
}
|
||||
}
|
||||
|
||||
return routing.NewModelRoutingWrapper(router, nil, nil, proxyFunc)
|
||||
}
|
||||
|
||||
// registerProviderAliases registers /api/provider/{provider}/... routes
|
||||
// These allow Amp CLI to route requests like:
|
||||
//
|
||||
// /api/provider/openai/v1/chat/completions
|
||||
// /api/provider/anthropic/v1/messages
|
||||
// /api/provider/google/v1beta/models
|
||||
func (m *AmpModule) registerProviderAliases(engine *gin.Engine, baseHandler *handlers.BaseAPIHandler, auth gin.HandlerFunc) {
|
||||
// Create handler instances for different providers
|
||||
openaiHandlers := openai.NewOpenAIAPIHandler(baseHandler)
|
||||
geminiHandlers := gemini.NewGeminiAPIHandler(baseHandler)
|
||||
claudeCodeHandlers := claude.NewClaudeCodeAPIHandler(baseHandler)
|
||||
openaiResponsesHandlers := openai.NewOpenAIResponsesAPIHandler(baseHandler)
|
||||
|
||||
// Create unified routing wrapper (T-021 onwards)
|
||||
// Replaces FallbackHandler with Router-based unified routing
|
||||
routingWrapper := m.createModelRoutingWrapper()
|
||||
|
||||
// Provider-specific routes under /api/provider/:provider
|
||||
ampProviders := engine.Group("/api/provider")
|
||||
if auth != nil {
|
||||
ampProviders.Use(auth)
|
||||
}
|
||||
// Inject client API key into request context for per-client upstream routing
|
||||
ampProviders.Use(clientAPIKeyMiddleware())
|
||||
|
||||
provider := ampProviders.Group("/:provider")
|
||||
|
||||
// Dynamic models handler - routes to appropriate provider based on path parameter
|
||||
ampModelsHandler := func(c *gin.Context) {
|
||||
providerName := strings.ToLower(c.Param("provider"))
|
||||
|
||||
switch providerName {
|
||||
case "anthropic":
|
||||
claudeCodeHandlers.ClaudeModels(c)
|
||||
case "google":
|
||||
geminiHandlers.GeminiModels(c)
|
||||
default:
|
||||
// Default to OpenAI-compatible (works for openai, groq, cerebras, etc.)
|
||||
openaiHandlers.OpenAIModels(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Root-level routes (for providers that omit /v1, like groq/cerebras)
|
||||
// T-022: Migrated all OpenAI routes to use ModelRoutingWrapper for unified routing
|
||||
provider.GET("/models", ampModelsHandler) // Models endpoint doesn't need fallback (no body to check)
|
||||
provider.POST("/chat/completions", routingWrapper.Wrap(openaiHandlers.ChatCompletions))
|
||||
provider.POST("/completions", routingWrapper.Wrap(openaiHandlers.Completions))
|
||||
provider.POST("/responses", routingWrapper.Wrap(openaiResponsesHandlers.Responses))
|
||||
|
||||
// /v1 routes (OpenAI/Claude-compatible endpoints)
|
||||
v1Amp := provider.Group("/v1")
|
||||
{
|
||||
v1Amp.GET("/models", ampModelsHandler) // Models endpoint doesn't need fallback
|
||||
|
||||
// OpenAI-compatible endpoints with ModelRoutingWrapper
|
||||
// T-021, T-022: Migrated to unified routing wrapper
|
||||
v1Amp.POST("/chat/completions", routingWrapper.Wrap(openaiHandlers.ChatCompletions))
|
||||
v1Amp.POST("/completions", routingWrapper.Wrap(openaiHandlers.Completions))
|
||||
v1Amp.POST("/responses", routingWrapper.Wrap(openaiResponsesHandlers.Responses))
|
||||
|
||||
// Claude/Anthropic-compatible endpoints with ModelRoutingWrapper
|
||||
// T-023: Migrated Claude routes to unified routing wrapper
|
||||
v1Amp.POST("/messages", routingWrapper.Wrap(claudeCodeHandlers.ClaudeMessages))
|
||||
v1Amp.POST("/messages/count_tokens", routingWrapper.Wrap(claudeCodeHandlers.ClaudeCountTokens))
|
||||
}
|
||||
|
||||
// /v1beta routes (Gemini native API)
|
||||
// Note: Gemini handler extracts model from URL path, so fallback logic needs special handling
|
||||
// T-024: Migrated Gemini v1beta routes to unified routing wrapper
|
||||
v1betaAmp := provider.Group("/v1beta")
|
||||
{
|
||||
v1betaAmp.GET("/models", geminiHandlers.GeminiModels)
|
||||
v1betaAmp.POST("/models/*action", routingWrapper.Wrap(geminiHandlers.GeminiHandler))
|
||||
v1betaAmp.GET("/models/*action", geminiHandlers.GeminiGetHandler)
|
||||
}
|
||||
}
|
||||
381
internal/api/modules/amp/routes_test.go
Normal file
381
internal/api/modules/amp/routes_test.go
Normal file
@@ -0,0 +1,381 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
)
|
||||
|
||||
func TestRegisterManagementRoutes(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
// Create module with proxy for testing
|
||||
m := &AmpModule{
|
||||
restrictToLocalhost: false, // disable localhost restriction for tests
|
||||
}
|
||||
|
||||
// Create a mock proxy that tracks calls
|
||||
proxyCalled := false
|
||||
mockProxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
proxyCalled = true
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte("proxied"))
|
||||
}))
|
||||
defer mockProxy.Close()
|
||||
|
||||
// Create real proxy to mock server
|
||||
proxy, _ := createReverseProxy(mockProxy.URL, NewStaticSecretSource(""))
|
||||
m.setProxy(proxy)
|
||||
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
m.registerManagementRoutes(r, base, nil)
|
||||
srv := httptest.NewServer(r)
|
||||
defer srv.Close()
|
||||
|
||||
managementPaths := []struct {
|
||||
path string
|
||||
method string
|
||||
}{
|
||||
{"/api/internal", http.MethodGet},
|
||||
{"/api/internal/some/path", http.MethodGet},
|
||||
{"/api/user", http.MethodGet},
|
||||
{"/api/user/profile", http.MethodGet},
|
||||
{"/api/auth", http.MethodGet},
|
||||
{"/api/auth/login", http.MethodGet},
|
||||
{"/api/meta", http.MethodGet},
|
||||
{"/api/telemetry", http.MethodGet},
|
||||
{"/api/threads", http.MethodGet},
|
||||
{"/threads/", http.MethodGet},
|
||||
{"/threads.rss", http.MethodGet}, // Root-level route (no /api prefix)
|
||||
{"/api/otel", http.MethodGet},
|
||||
{"/api/tab", http.MethodGet},
|
||||
{"/api/tab/some/path", http.MethodGet},
|
||||
{"/auth", http.MethodGet}, // Root-level auth route
|
||||
{"/auth/cli-login", http.MethodGet}, // CLI login flow
|
||||
{"/auth/callback", http.MethodGet}, // OAuth callback
|
||||
// Google v1beta1 bridge should still proxy non-model requests (GET) and allow POST
|
||||
{"/api/provider/google/v1beta1/models", http.MethodGet},
|
||||
{"/api/provider/google/v1beta1/models", http.MethodPost},
|
||||
}
|
||||
|
||||
for _, path := range managementPaths {
|
||||
t.Run(path.path, func(t *testing.T) {
|
||||
proxyCalled = false
|
||||
req, err := http.NewRequest(path.method, srv.URL+path.path, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build request: %v", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
t.Fatalf("route %s not registered", path.path)
|
||||
}
|
||||
if !proxyCalled {
|
||||
t.Fatalf("proxy handler not called for %s", path.path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterProviderAliases_AllProvidersRegistered(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
// Minimal base handler setup (no need to initialize, just check routing)
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
// Track if auth middleware was called
|
||||
authCalled := false
|
||||
authMiddleware := func(c *gin.Context) {
|
||||
authCalled = true
|
||||
c.Header("X-Auth", "ok")
|
||||
// Abort with success to avoid calling the actual handler (which needs full setup)
|
||||
c.AbortWithStatus(http.StatusOK)
|
||||
}
|
||||
|
||||
m := &AmpModule{authMiddleware_: authMiddleware}
|
||||
m.registerProviderAliases(r, base, authMiddleware)
|
||||
|
||||
paths := []struct {
|
||||
path string
|
||||
method string
|
||||
}{
|
||||
{"/api/provider/openai/models", http.MethodGet},
|
||||
{"/api/provider/anthropic/models", http.MethodGet},
|
||||
{"/api/provider/google/models", http.MethodGet},
|
||||
{"/api/provider/groq/models", http.MethodGet},
|
||||
{"/api/provider/openai/chat/completions", http.MethodPost},
|
||||
{"/api/provider/anthropic/v1/messages", http.MethodPost},
|
||||
{"/api/provider/google/v1beta/models", http.MethodGet},
|
||||
}
|
||||
|
||||
for _, tc := range paths {
|
||||
t.Run(tc.path, func(t *testing.T) {
|
||||
authCalled = false
|
||||
req := httptest.NewRequest(tc.method, tc.path, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code == http.StatusNotFound {
|
||||
t.Fatalf("route %s %s not registered", tc.method, tc.path)
|
||||
}
|
||||
if !authCalled {
|
||||
t.Fatalf("auth middleware not executed for %s", tc.path)
|
||||
}
|
||||
if w.Header().Get("X-Auth") != "ok" {
|
||||
t.Fatalf("auth middleware header not set for %s", tc.path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterProviderAliases_DynamicModelsHandler(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
m := &AmpModule{authMiddleware_: func(c *gin.Context) { c.AbortWithStatus(http.StatusOK) }}
|
||||
m.registerProviderAliases(r, base, func(c *gin.Context) { c.AbortWithStatus(http.StatusOK) })
|
||||
|
||||
providers := []string{"openai", "anthropic", "google", "groq", "cerebras"}
|
||||
|
||||
for _, provider := range providers {
|
||||
t.Run(provider, func(t *testing.T) {
|
||||
path := "/api/provider/" + provider + "/models"
|
||||
req := httptest.NewRequest(http.MethodGet, path, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
// Should not 404
|
||||
if w.Code == http.StatusNotFound {
|
||||
t.Fatalf("models route not found for provider: %s", provider)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterProviderAliases_V1Routes(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
m := &AmpModule{authMiddleware_: func(c *gin.Context) { c.AbortWithStatus(http.StatusOK) }}
|
||||
m.registerProviderAliases(r, base, func(c *gin.Context) { c.AbortWithStatus(http.StatusOK) })
|
||||
|
||||
v1Paths := []struct {
|
||||
path string
|
||||
method string
|
||||
}{
|
||||
{"/api/provider/openai/v1/models", http.MethodGet},
|
||||
{"/api/provider/openai/v1/chat/completions", http.MethodPost},
|
||||
{"/api/provider/openai/v1/completions", http.MethodPost},
|
||||
{"/api/provider/anthropic/v1/messages", http.MethodPost},
|
||||
{"/api/provider/anthropic/v1/messages/count_tokens", http.MethodPost},
|
||||
}
|
||||
|
||||
for _, tc := range v1Paths {
|
||||
t.Run(tc.path, func(t *testing.T) {
|
||||
req := httptest.NewRequest(tc.method, tc.path, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code == http.StatusNotFound {
|
||||
t.Fatalf("v1 route %s %s not registered", tc.method, tc.path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterProviderAliases_V1BetaRoutes(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
m := &AmpModule{authMiddleware_: func(c *gin.Context) { c.AbortWithStatus(http.StatusOK) }}
|
||||
m.registerProviderAliases(r, base, func(c *gin.Context) { c.AbortWithStatus(http.StatusOK) })
|
||||
|
||||
v1betaPaths := []struct {
|
||||
path string
|
||||
method string
|
||||
}{
|
||||
{"/api/provider/google/v1beta/models", http.MethodGet},
|
||||
{"/api/provider/google/v1beta/models/generateContent", http.MethodPost},
|
||||
}
|
||||
|
||||
for _, tc := range v1betaPaths {
|
||||
t.Run(tc.path, func(t *testing.T) {
|
||||
req := httptest.NewRequest(tc.method, tc.path, nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code == http.StatusNotFound {
|
||||
t.Fatalf("v1beta route %s %s not registered", tc.method, tc.path)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterProviderAliases_NoAuthMiddleware(t *testing.T) {
|
||||
// Test that routes still register even if auth middleware is nil (fallback behavior)
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
base := &handlers.BaseAPIHandler{}
|
||||
|
||||
m := &AmpModule{authMiddleware_: nil} // No auth middleware
|
||||
m.registerProviderAliases(r, base, func(c *gin.Context) { c.AbortWithStatus(http.StatusOK) })
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/provider/openai/models", nil)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
// Should still work (with fallback no-op auth)
|
||||
if w.Code == http.StatusNotFound {
|
||||
t.Fatal("routes should register even without auth middleware")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalhostOnlyMiddleware_PreventsSpoofing(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
// Create module with localhost restriction enabled
|
||||
m := &AmpModule{
|
||||
restrictToLocalhost: true,
|
||||
}
|
||||
|
||||
// Apply dynamic localhost-only middleware
|
||||
r.Use(m.localhostOnlyMiddleware())
|
||||
r.GET("/test", func(c *gin.Context) {
|
||||
c.String(http.StatusOK, "ok")
|
||||
})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
remoteAddr string
|
||||
forwardedFor string
|
||||
expectedStatus int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "spoofed_header_remote_connection",
|
||||
remoteAddr: "192.168.1.100:12345",
|
||||
forwardedFor: "127.0.0.1",
|
||||
expectedStatus: http.StatusForbidden,
|
||||
description: "Spoofed X-Forwarded-For header should be ignored",
|
||||
},
|
||||
{
|
||||
name: "real_localhost_ipv4",
|
||||
remoteAddr: "127.0.0.1:54321",
|
||||
forwardedFor: "",
|
||||
expectedStatus: http.StatusOK,
|
||||
description: "Real localhost IPv4 connection should work",
|
||||
},
|
||||
{
|
||||
name: "real_localhost_ipv6",
|
||||
remoteAddr: "[::1]:54321",
|
||||
forwardedFor: "",
|
||||
expectedStatus: http.StatusOK,
|
||||
description: "Real localhost IPv6 connection should work",
|
||||
},
|
||||
{
|
||||
name: "remote_ipv4",
|
||||
remoteAddr: "203.0.113.42:8080",
|
||||
forwardedFor: "",
|
||||
expectedStatus: http.StatusForbidden,
|
||||
description: "Remote IPv4 connection should be blocked",
|
||||
},
|
||||
{
|
||||
name: "remote_ipv6",
|
||||
remoteAddr: "[2001:db8::1]:9090",
|
||||
forwardedFor: "",
|
||||
expectedStatus: http.StatusForbidden,
|
||||
description: "Remote IPv6 connection should be blocked",
|
||||
},
|
||||
{
|
||||
name: "spoofed_localhost_ipv6",
|
||||
remoteAddr: "203.0.113.42:8080",
|
||||
forwardedFor: "::1",
|
||||
expectedStatus: http.StatusForbidden,
|
||||
description: "Spoofed X-Forwarded-For with IPv6 localhost should be ignored",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
req.RemoteAddr = tt.remoteAddr
|
||||
if tt.forwardedFor != "" {
|
||||
req.Header.Set("X-Forwarded-For", tt.forwardedFor)
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != tt.expectedStatus {
|
||||
t.Errorf("%s: expected status %d, got %d", tt.description, tt.expectedStatus, w.Code)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalhostOnlyMiddleware_HotReload(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
r := gin.New()
|
||||
|
||||
// Create module with localhost restriction initially enabled
|
||||
m := &AmpModule{
|
||||
restrictToLocalhost: true,
|
||||
}
|
||||
|
||||
// Apply dynamic localhost-only middleware
|
||||
r.Use(m.localhostOnlyMiddleware())
|
||||
r.GET("/test", func(c *gin.Context) {
|
||||
c.String(http.StatusOK, "ok")
|
||||
})
|
||||
|
||||
// Test 1: Remote IP should be blocked when restriction is enabled
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
req.RemoteAddr = "192.168.1.100:12345"
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusForbidden {
|
||||
t.Errorf("Expected 403 when restriction enabled, got %d", w.Code)
|
||||
}
|
||||
|
||||
// Test 2: Hot-reload - disable restriction
|
||||
m.setRestrictToLocalhost(false)
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
req.RemoteAddr = "192.168.1.100:12345"
|
||||
w = httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("Expected 200 after disabling restriction, got %d", w.Code)
|
||||
}
|
||||
|
||||
// Test 3: Hot-reload - re-enable restriction
|
||||
m.setRestrictToLocalhost(true)
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/test", nil)
|
||||
req.RemoteAddr = "192.168.1.100:12345"
|
||||
w = httptest.NewRecorder()
|
||||
r.ServeHTTP(w, req)
|
||||
|
||||
if w.Code != http.StatusForbidden {
|
||||
t.Errorf("Expected 403 after re-enabling restriction, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
248
internal/api/modules/amp/secret.go
Normal file
248
internal/api/modules/amp/secret.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SecretSource provides Amp API keys with configurable precedence and caching
|
||||
type SecretSource interface {
|
||||
Get(ctx context.Context) (string, error)
|
||||
}
|
||||
|
||||
// cachedSecret holds a secret value with expiration
|
||||
type cachedSecret struct {
|
||||
value string
|
||||
expiresAt time.Time
|
||||
}
|
||||
|
||||
// MultiSourceSecret implements precedence-based secret lookup:
|
||||
// 1. Explicit config value (highest priority)
|
||||
// 2. Environment variable AMP_API_KEY
|
||||
// 3. File-based secret (lowest priority)
|
||||
type MultiSourceSecret struct {
|
||||
explicitKey string
|
||||
envKey string
|
||||
filePath string
|
||||
cacheTTL time.Duration
|
||||
|
||||
mu sync.RWMutex
|
||||
cache *cachedSecret
|
||||
}
|
||||
|
||||
// NewMultiSourceSecret creates a secret source with precedence and caching
|
||||
func NewMultiSourceSecret(explicitKey string, cacheTTL time.Duration) *MultiSourceSecret {
|
||||
if cacheTTL == 0 {
|
||||
cacheTTL = 5 * time.Minute // Default 5 minute cache
|
||||
}
|
||||
|
||||
home, _ := os.UserHomeDir()
|
||||
filePath := filepath.Join(home, ".local", "share", "amp", "secrets.json")
|
||||
|
||||
return &MultiSourceSecret{
|
||||
explicitKey: strings.TrimSpace(explicitKey),
|
||||
envKey: "AMP_API_KEY",
|
||||
filePath: filePath,
|
||||
cacheTTL: cacheTTL,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMultiSourceSecretWithPath creates a secret source with a custom file path (for testing)
|
||||
func NewMultiSourceSecretWithPath(explicitKey string, filePath string, cacheTTL time.Duration) *MultiSourceSecret {
|
||||
if cacheTTL == 0 {
|
||||
cacheTTL = 5 * time.Minute
|
||||
}
|
||||
|
||||
return &MultiSourceSecret{
|
||||
explicitKey: strings.TrimSpace(explicitKey),
|
||||
envKey: "AMP_API_KEY",
|
||||
filePath: filePath,
|
||||
cacheTTL: cacheTTL,
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves the Amp API key using precedence: config > env > file
|
||||
// Results are cached for cacheTTL duration to avoid excessive file reads
|
||||
func (s *MultiSourceSecret) Get(ctx context.Context) (string, error) {
|
||||
// Precedence 1: Explicit config key (highest priority, no caching needed)
|
||||
if s.explicitKey != "" {
|
||||
return s.explicitKey, nil
|
||||
}
|
||||
|
||||
// Precedence 2: Environment variable
|
||||
if envValue := strings.TrimSpace(os.Getenv(s.envKey)); envValue != "" {
|
||||
return envValue, nil
|
||||
}
|
||||
|
||||
// Precedence 3: File-based secret (lowest priority, cached)
|
||||
// Check cache first
|
||||
s.mu.RLock()
|
||||
if s.cache != nil && time.Now().Before(s.cache.expiresAt) {
|
||||
value := s.cache.value
|
||||
s.mu.RUnlock()
|
||||
return value, nil
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
|
||||
// Cache miss or expired - read from file
|
||||
key, err := s.readFromFile()
|
||||
if err != nil {
|
||||
// Cache empty result to avoid repeated file reads on missing files
|
||||
s.updateCache("")
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Cache the result
|
||||
s.updateCache(key)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// readFromFile reads the Amp API key from the secrets file
|
||||
func (s *MultiSourceSecret) readFromFile() (string, error) {
|
||||
content, err := os.ReadFile(s.filePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", nil // Missing file is not an error, just no key available
|
||||
}
|
||||
return "", fmt.Errorf("failed to read amp secrets from %s: %w", s.filePath, err)
|
||||
}
|
||||
|
||||
var secrets map[string]string
|
||||
if err := json.Unmarshal(content, &secrets); err != nil {
|
||||
return "", fmt.Errorf("failed to parse amp secrets from %s: %w", s.filePath, err)
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(secrets["apiKey@https://ampcode.com/"])
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// updateCache updates the cached secret value
|
||||
func (s *MultiSourceSecret) updateCache(value string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.cache = &cachedSecret{
|
||||
value: value,
|
||||
expiresAt: time.Now().Add(s.cacheTTL),
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidateCache clears the cached secret, forcing a fresh read on next Get
|
||||
func (s *MultiSourceSecret) InvalidateCache() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.cache = nil
|
||||
}
|
||||
|
||||
// UpdateExplicitKey refreshes the config-provided key and clears cache.
|
||||
func (s *MultiSourceSecret) UpdateExplicitKey(key string) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.explicitKey = strings.TrimSpace(key)
|
||||
s.cache = nil
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// StaticSecretSource returns a fixed API key (for testing)
|
||||
type StaticSecretSource struct {
|
||||
key string
|
||||
}
|
||||
|
||||
// NewStaticSecretSource creates a secret source with a fixed key
|
||||
func NewStaticSecretSource(key string) *StaticSecretSource {
|
||||
return &StaticSecretSource{key: strings.TrimSpace(key)}
|
||||
}
|
||||
|
||||
// Get returns the static API key
|
||||
func (s *StaticSecretSource) Get(ctx context.Context) (string, error) {
|
||||
return s.key, nil
|
||||
}
|
||||
|
||||
// MappedSecretSource wraps a default SecretSource and adds per-client API key mapping.
|
||||
// When a request context contains a client API key that matches a configured mapping,
|
||||
// the corresponding upstream key is returned. Otherwise, falls back to the default source.
|
||||
type MappedSecretSource struct {
|
||||
defaultSource SecretSource
|
||||
mu sync.RWMutex
|
||||
lookup map[string]string // clientKey -> upstreamKey
|
||||
}
|
||||
|
||||
// NewMappedSecretSource creates a MappedSecretSource wrapping the given default source.
|
||||
func NewMappedSecretSource(defaultSource SecretSource) *MappedSecretSource {
|
||||
return &MappedSecretSource{
|
||||
defaultSource: defaultSource,
|
||||
lookup: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves the Amp API key, checking per-client mappings first.
|
||||
// If the request context contains a client API key that matches a configured mapping,
|
||||
// returns the corresponding upstream key. Otherwise, falls back to the default source.
|
||||
func (s *MappedSecretSource) Get(ctx context.Context) (string, error) {
|
||||
// Try to get client API key from request context
|
||||
clientKey := getClientAPIKeyFromContext(ctx)
|
||||
if clientKey != "" {
|
||||
s.mu.RLock()
|
||||
if upstreamKey, ok := s.lookup[clientKey]; ok && upstreamKey != "" {
|
||||
s.mu.RUnlock()
|
||||
return upstreamKey, nil
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
}
|
||||
|
||||
// Fall back to default source
|
||||
return s.defaultSource.Get(ctx)
|
||||
}
|
||||
|
||||
// UpdateMappings rebuilds the client-to-upstream key mapping from configuration entries.
|
||||
// If the same client key appears in multiple entries, logs a warning and uses the first one.
|
||||
func (s *MappedSecretSource) UpdateMappings(entries []config.AmpUpstreamAPIKeyEntry) {
|
||||
newLookup := make(map[string]string)
|
||||
|
||||
for _, entry := range entries {
|
||||
upstreamKey := strings.TrimSpace(entry.UpstreamAPIKey)
|
||||
if upstreamKey == "" {
|
||||
continue
|
||||
}
|
||||
for _, clientKey := range entry.APIKeys {
|
||||
trimmedKey := strings.TrimSpace(clientKey)
|
||||
if trimmedKey == "" {
|
||||
continue
|
||||
}
|
||||
if _, exists := newLookup[trimmedKey]; exists {
|
||||
// Log warning for duplicate client key, first one wins
|
||||
log.Warnf("amp upstream-api-keys: client API key appears in multiple entries; using first mapping.")
|
||||
continue
|
||||
}
|
||||
newLookup[trimmedKey] = upstreamKey
|
||||
}
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.lookup = newLookup
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// UpdateDefaultExplicitKey updates the explicit key on the underlying MultiSourceSecret (if applicable).
|
||||
func (s *MappedSecretSource) UpdateDefaultExplicitKey(key string) {
|
||||
if ms, ok := s.defaultSource.(*MultiSourceSecret); ok {
|
||||
ms.UpdateExplicitKey(key)
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidateCache invalidates cache on the underlying MultiSourceSecret (if applicable).
|
||||
func (s *MappedSecretSource) InvalidateCache() {
|
||||
if ms, ok := s.defaultSource.(*MultiSourceSecret); ok {
|
||||
ms.InvalidateCache()
|
||||
}
|
||||
}
|
||||
366
internal/api/modules/amp/secret_test.go
Normal file
366
internal/api/modules/amp/secret_test.go
Normal file
@@ -0,0 +1,366 @@
|
||||
package amp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestMultiSourceSecret_PrecedenceOrder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
configKey string
|
||||
envKey string
|
||||
fileJSON string
|
||||
want string
|
||||
}{
|
||||
{"config_wins", "cfg", "env", `{"apiKey@https://ampcode.com/":"file"}`, "cfg"},
|
||||
{"env_wins_when_no_cfg", "", "env", `{"apiKey@https://ampcode.com/":"file"}`, "env"},
|
||||
{"file_when_no_cfg_env", "", "", `{"apiKey@https://ampcode.com/":"file"}`, "file"},
|
||||
{"empty_cfg_trims_then_env", " ", "env", `{"apiKey@https://ampcode.com/":"file"}`, "env"},
|
||||
{"empty_env_then_file", "", " ", `{"apiKey@https://ampcode.com/":"file"}`, "file"},
|
||||
{"missing_file_returns_empty", "", "", "", ""},
|
||||
{"all_empty_returns_empty", " ", " ", `{"apiKey@https://ampcode.com/":" "}`, ""},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc // capture range variable
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
secretsPath := filepath.Join(tmpDir, "secrets.json")
|
||||
|
||||
if tc.fileJSON != "" {
|
||||
if err := os.WriteFile(secretsPath, []byte(tc.fileJSON), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Setenv("AMP_API_KEY", tc.envKey)
|
||||
|
||||
s := NewMultiSourceSecretWithPath(tc.configKey, secretsPath, 100*time.Millisecond)
|
||||
got, err := s.Get(ctx)
|
||||
if err != nil && tc.fileJSON != "" && json.Valid([]byte(tc.fileJSON)) {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != tc.want {
|
||||
t.Fatalf("want %q, got %q", tc.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiSourceSecret_CacheBehavior(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tmpDir := t.TempDir()
|
||||
p := filepath.Join(tmpDir, "secrets.json")
|
||||
|
||||
// Initial value
|
||||
if err := os.WriteFile(p, []byte(`{"apiKey@https://ampcode.com/":"v1"}`), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := NewMultiSourceSecretWithPath("", p, 50*time.Millisecond)
|
||||
|
||||
// First read - should return v1
|
||||
got1, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Get failed: %v", err)
|
||||
}
|
||||
if got1 != "v1" {
|
||||
t.Fatalf("expected v1, got %s", got1)
|
||||
}
|
||||
|
||||
// Change file; within TTL we should still see v1 (cached)
|
||||
if err := os.WriteFile(p, []byte(`{"apiKey@https://ampcode.com/":"v2"}`), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got2, _ := s.Get(ctx)
|
||||
if got2 != "v1" {
|
||||
t.Fatalf("cache hit expected v1, got %s", got2)
|
||||
}
|
||||
|
||||
// After TTL expires, should see v2
|
||||
time.Sleep(60 * time.Millisecond)
|
||||
got3, _ := s.Get(ctx)
|
||||
if got3 != "v2" {
|
||||
t.Fatalf("cache miss expected v2, got %s", got3)
|
||||
}
|
||||
|
||||
// Invalidate forces re-read immediately
|
||||
if err := os.WriteFile(p, []byte(`{"apiKey@https://ampcode.com/":"v3"}`), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s.InvalidateCache()
|
||||
got4, _ := s.Get(ctx)
|
||||
if got4 != "v3" {
|
||||
t.Fatalf("invalidate expected v3, got %s", got4)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiSourceSecret_FileHandling(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("missing_file_no_error", func(t *testing.T) {
|
||||
s := NewMultiSourceSecretWithPath("", "/nonexistent/path/secrets.json", 100*time.Millisecond)
|
||||
got, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error for missing file, got: %v", err)
|
||||
}
|
||||
if got != "" {
|
||||
t.Fatalf("expected empty string, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid_json", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
p := filepath.Join(tmpDir, "secrets.json")
|
||||
if err := os.WriteFile(p, []byte(`{invalid json`), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := NewMultiSourceSecretWithPath("", p, 100*time.Millisecond)
|
||||
_, err := s.Get(ctx)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid JSON")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("missing_key_in_json", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
p := filepath.Join(tmpDir, "secrets.json")
|
||||
if err := os.WriteFile(p, []byte(`{"other":"value"}`), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := NewMultiSourceSecretWithPath("", p, 100*time.Millisecond)
|
||||
got, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != "" {
|
||||
t.Fatalf("expected empty string for missing key, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty_key_value", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
p := filepath.Join(tmpDir, "secrets.json")
|
||||
if err := os.WriteFile(p, []byte(`{"apiKey@https://ampcode.com/":" "}`), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := NewMultiSourceSecretWithPath("", p, 100*time.Millisecond)
|
||||
got, _ := s.Get(ctx)
|
||||
if got != "" {
|
||||
t.Fatalf("expected empty after trim, got %q", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultiSourceSecret_Concurrency(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
p := filepath.Join(tmpDir, "secrets.json")
|
||||
if err := os.WriteFile(p, []byte(`{"apiKey@https://ampcode.com/":"concurrent"}`), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := NewMultiSourceSecretWithPath("", p, 5*time.Second)
|
||||
ctx := context.Background()
|
||||
|
||||
// Spawn many goroutines calling Get concurrently
|
||||
const goroutines = 50
|
||||
const iterations = 100
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, goroutines)
|
||||
|
||||
for i := 0; i < goroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for j := 0; j < iterations; j++ {
|
||||
val, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
if val != "concurrent" {
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
for err := range errors {
|
||||
t.Errorf("concurrency error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStaticSecretSource(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("returns_provided_key", func(t *testing.T) {
|
||||
s := NewStaticSecretSource("test-key-123")
|
||||
got, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != "test-key-123" {
|
||||
t.Fatalf("want test-key-123, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("trims_whitespace", func(t *testing.T) {
|
||||
s := NewStaticSecretSource(" test-key ")
|
||||
got, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != "test-key" {
|
||||
t.Fatalf("want test-key, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty_string", func(t *testing.T) {
|
||||
s := NewStaticSecretSource("")
|
||||
got, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != "" {
|
||||
t.Fatalf("want empty string, got %q", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultiSourceSecret_CacheEmptyResult(t *testing.T) {
|
||||
// Test that missing file results are cached to avoid repeated file reads
|
||||
tmpDir := t.TempDir()
|
||||
p := filepath.Join(tmpDir, "nonexistent.json")
|
||||
|
||||
s := NewMultiSourceSecretWithPath("", p, 100*time.Millisecond)
|
||||
ctx := context.Background()
|
||||
|
||||
// First call - file doesn't exist, should cache empty result
|
||||
got1, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error for missing file, got: %v", err)
|
||||
}
|
||||
if got1 != "" {
|
||||
t.Fatalf("expected empty string, got %q", got1)
|
||||
}
|
||||
|
||||
// Create the file now
|
||||
if err := os.WriteFile(p, []byte(`{"apiKey@https://ampcode.com/":"new-value"}`), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Second call - should still return empty (cached), not read the new file
|
||||
got2, _ := s.Get(ctx)
|
||||
if got2 != "" {
|
||||
t.Fatalf("cache should return empty, got %q", got2)
|
||||
}
|
||||
|
||||
// After TTL expires, should see the new value
|
||||
time.Sleep(110 * time.Millisecond)
|
||||
got3, _ := s.Get(ctx)
|
||||
if got3 != "new-value" {
|
||||
t.Fatalf("after cache expiry, expected new-value, got %q", got3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMappedSecretSource_UsesMappingFromContext(t *testing.T) {
|
||||
defaultSource := NewStaticSecretSource("default")
|
||||
s := NewMappedSecretSource(defaultSource)
|
||||
s.UpdateMappings([]config.AmpUpstreamAPIKeyEntry{
|
||||
{
|
||||
UpstreamAPIKey: "u1",
|
||||
APIKeys: []string{"k1"},
|
||||
},
|
||||
})
|
||||
|
||||
ctx := context.WithValue(context.Background(), clientAPIKeyContextKey{}, "k1")
|
||||
got, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != "u1" {
|
||||
t.Fatalf("want u1, got %q", got)
|
||||
}
|
||||
|
||||
ctx = context.WithValue(context.Background(), clientAPIKeyContextKey{}, "k2")
|
||||
got, err = s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != "default" {
|
||||
t.Fatalf("want default fallback, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMappedSecretSource_DuplicateClientKey_FirstWins(t *testing.T) {
|
||||
defaultSource := NewStaticSecretSource("default")
|
||||
s := NewMappedSecretSource(defaultSource)
|
||||
s.UpdateMappings([]config.AmpUpstreamAPIKeyEntry{
|
||||
{
|
||||
UpstreamAPIKey: "u1",
|
||||
APIKeys: []string{"k1"},
|
||||
},
|
||||
{
|
||||
UpstreamAPIKey: "u2",
|
||||
APIKeys: []string{"k1"},
|
||||
},
|
||||
})
|
||||
|
||||
ctx := context.WithValue(context.Background(), clientAPIKeyContextKey{}, "k1")
|
||||
got, err := s.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != "u1" {
|
||||
t.Fatalf("want u1 (first wins), got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMappedSecretSource_DuplicateClientKey_LogsWarning(t *testing.T) {
|
||||
hook := test.NewLocal(log.StandardLogger())
|
||||
defer hook.Reset()
|
||||
|
||||
defaultSource := NewStaticSecretSource("default")
|
||||
s := NewMappedSecretSource(defaultSource)
|
||||
s.UpdateMappings([]config.AmpUpstreamAPIKeyEntry{
|
||||
{
|
||||
UpstreamAPIKey: "u1",
|
||||
APIKeys: []string{"k1"},
|
||||
},
|
||||
{
|
||||
UpstreamAPIKey: "u2",
|
||||
APIKeys: []string{"k1"},
|
||||
},
|
||||
})
|
||||
|
||||
foundWarning := false
|
||||
for _, entry := range hook.AllEntries() {
|
||||
if entry.Level == log.WarnLevel && entry.Message == "amp upstream-api-keys: client API key appears in multiple entries; using first mapping." {
|
||||
foundWarning = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundWarning {
|
||||
t.Fatal("expected warning log for duplicate client key, but none was found")
|
||||
}
|
||||
}
|
||||
92
internal/api/modules/modules.go
Normal file
92
internal/api/modules/modules.go
Normal file
@@ -0,0 +1,92 @@
|
||||
// Package modules provides a pluggable routing module system for extending
|
||||
// the API server with optional features without modifying core routing logic.
|
||||
package modules
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
)
|
||||
|
||||
// Context encapsulates the dependencies exposed to routing modules during
|
||||
// registration. Modules can use the Gin engine to attach routes, the shared
|
||||
// BaseAPIHandler for constructing SDK-specific handlers, and the resolved
|
||||
// authentication middleware for protecting routes that require API keys.
|
||||
type Context struct {
|
||||
Engine *gin.Engine
|
||||
BaseHandler *handlers.BaseAPIHandler
|
||||
Config *config.Config
|
||||
AuthMiddleware gin.HandlerFunc
|
||||
}
|
||||
|
||||
// RouteModule represents a pluggable routing module that can register routes
|
||||
// and handle configuration updates independently of the core server.
|
||||
//
|
||||
// DEPRECATED: Use RouteModuleV2 for new modules. This interface is kept for
|
||||
// backwards compatibility and will be removed in a future version.
|
||||
type RouteModule interface {
|
||||
// Name returns a human-readable identifier for the module
|
||||
Name() string
|
||||
|
||||
// Register sets up routes and handlers for this module.
|
||||
// It receives the Gin engine, base handlers, and current configuration.
|
||||
// Returns an error if registration fails (errors are logged but don't stop the server).
|
||||
Register(engine *gin.Engine, baseHandler *handlers.BaseAPIHandler, cfg *config.Config) error
|
||||
|
||||
// OnConfigUpdated is called when the configuration is reloaded.
|
||||
// Modules can respond to configuration changes here.
|
||||
// Returns an error if the update cannot be applied.
|
||||
OnConfigUpdated(cfg *config.Config) error
|
||||
}
|
||||
|
||||
// RouteModuleV2 represents a pluggable bundle of routes that can integrate with
|
||||
// the API server without modifying its core routing logic. Implementations can
|
||||
// attach routes during Register and react to configuration updates via
|
||||
// OnConfigUpdated.
|
||||
//
|
||||
// This is the preferred interface for new modules. It uses Context for cleaner
|
||||
// dependency injection and supports idempotent registration.
|
||||
type RouteModuleV2 interface {
|
||||
// Name returns a unique identifier for logging and diagnostics.
|
||||
Name() string
|
||||
|
||||
// Register wires the module's routes into the provided Gin engine. Modules
|
||||
// should treat multiple calls as idempotent and avoid duplicate route
|
||||
// registration when invoked more than once.
|
||||
Register(ctx Context) error
|
||||
|
||||
// OnConfigUpdated notifies the module when the server configuration changes
|
||||
// via hot reload. Implementations can refresh cached state or emit warnings.
|
||||
OnConfigUpdated(cfg *config.Config) error
|
||||
}
|
||||
|
||||
// RegisterModule is a helper that registers a module using either the V1 or V2
|
||||
// interface. This allows gradual migration from V1 to V2 without breaking
|
||||
// existing modules.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// ctx := modules.Context{
|
||||
// Engine: engine,
|
||||
// BaseHandler: baseHandler,
|
||||
// Config: cfg,
|
||||
// AuthMiddleware: authMiddleware,
|
||||
// }
|
||||
// if err := modules.RegisterModule(ctx, ampModule); err != nil {
|
||||
// log.Errorf("Failed to register module: %v", err)
|
||||
// }
|
||||
func RegisterModule(ctx Context, mod interface{}) error {
|
||||
// Try V2 interface first (preferred)
|
||||
if v2, ok := mod.(RouteModuleV2); ok {
|
||||
return v2.Register(ctx)
|
||||
}
|
||||
|
||||
// Fall back to V1 interface for backwards compatibility
|
||||
if v1, ok := mod.(RouteModule); ok {
|
||||
return v1.Register(ctx.Engine, ctx.BaseHandler, ctx.Config)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported module type %T (must implement RouteModule or RouteModuleV2)", mod)
|
||||
}
|
||||
@@ -6,40 +6,63 @@ package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/claude"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/access"
|
||||
managementHandlers "github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/management"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/handlers/openai"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/middleware"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api/modules"
|
||||
ampmodule "github.com/router-for-me/CLIProxyAPI/v6/internal/api/modules/amp"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/logging"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/managementasset"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/usage"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/claude"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers/openai"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const oauthCallbackSuccessHTML = `<html><head><meta charset="utf-8"><title>Authentication successful</title><script>setTimeout(function(){window.close();},5000);</script></head><body><h1>Authentication successful!</h1><p>You can close this window.</p><p>This window will close automatically in 5 seconds.</p></body></html>`
|
||||
|
||||
type serverOptionConfig struct {
|
||||
extraMiddleware []gin.HandlerFunc
|
||||
engineConfigurator func(*gin.Engine)
|
||||
routerConfigurator func(*gin.Engine, *handlers.BaseAPIHandler, *config.Config)
|
||||
requestLoggerFactory func(*config.Config, string) logging.RequestLogger
|
||||
localPassword string
|
||||
keepAliveEnabled bool
|
||||
keepAliveTimeout time.Duration
|
||||
keepAliveOnTimeout func()
|
||||
}
|
||||
|
||||
// ServerOption customises HTTP server construction.
|
||||
type ServerOption func(*serverOptionConfig)
|
||||
|
||||
func defaultRequestLoggerFactory(cfg *config.Config, configPath string) logging.RequestLogger {
|
||||
return logging.NewFileRequestLogger(cfg.RequestLog, "logs", filepath.Dir(configPath))
|
||||
configDir := filepath.Dir(configPath)
|
||||
if base := util.WritablePath(); base != "" {
|
||||
return logging.NewFileRequestLogger(cfg.RequestLog, filepath.Join(base, "logs"), configDir, cfg.ErrorLogsMaxFiles)
|
||||
}
|
||||
return logging.NewFileRequestLogger(cfg.RequestLog, "logs", configDir, cfg.ErrorLogsMaxFiles)
|
||||
}
|
||||
|
||||
// WithMiddleware appends additional Gin middleware during server construction.
|
||||
@@ -63,6 +86,25 @@ func WithRouterConfigurator(fn func(*gin.Engine, *handlers.BaseAPIHandler, *conf
|
||||
}
|
||||
}
|
||||
|
||||
// WithLocalManagementPassword stores a runtime-only management password accepted for localhost requests.
|
||||
func WithLocalManagementPassword(password string) ServerOption {
|
||||
return func(cfg *serverOptionConfig) {
|
||||
cfg.localPassword = password
|
||||
}
|
||||
}
|
||||
|
||||
// WithKeepAliveEndpoint enables a keep-alive endpoint with the provided timeout and callback.
|
||||
func WithKeepAliveEndpoint(timeout time.Duration, onTimeout func()) ServerOption {
|
||||
return func(cfg *serverOptionConfig) {
|
||||
if timeout <= 0 || onTimeout == nil {
|
||||
return
|
||||
}
|
||||
cfg.keepAliveEnabled = true
|
||||
cfg.keepAliveTimeout = timeout
|
||||
cfg.keepAliveOnTimeout = onTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithRequestLoggerFactory customises request logger creation.
|
||||
func WithRequestLoggerFactory(factory func(*config.Config, string) logging.RequestLogger) ServerOption {
|
||||
return func(cfg *serverOptionConfig) {
|
||||
@@ -85,6 +127,10 @@ type Server struct {
|
||||
// cfg holds the current server configuration.
|
||||
cfg *config.Config
|
||||
|
||||
// oldConfigYaml stores a YAML snapshot of the previous configuration for change detection.
|
||||
// This prevents issues when the config object is modified in place by Management API.
|
||||
oldConfigYaml []byte
|
||||
|
||||
// accessManager handles request authentication providers.
|
||||
accessManager *sdkaccess.Manager
|
||||
|
||||
@@ -95,8 +141,36 @@ type Server struct {
|
||||
// configFilePath is the absolute path to the YAML config file for persistence.
|
||||
configFilePath string
|
||||
|
||||
// currentPath is the absolute path to the current working directory.
|
||||
currentPath string
|
||||
|
||||
// wsRoutes tracks registered websocket upgrade paths.
|
||||
wsRouteMu sync.Mutex
|
||||
wsRoutes map[string]struct{}
|
||||
wsAuthChanged func(bool, bool)
|
||||
wsAuthEnabled atomic.Bool
|
||||
|
||||
// management handler
|
||||
mgmt *managementHandlers.Handler
|
||||
|
||||
// ampModule is the Amp routing module for model mapping hot-reload
|
||||
ampModule *ampmodule.AmpModule
|
||||
|
||||
// managementRoutesRegistered tracks whether the management routes have been attached to the engine.
|
||||
managementRoutesRegistered atomic.Bool
|
||||
// managementRoutesEnabled controls whether management endpoints serve real handlers.
|
||||
managementRoutesEnabled atomic.Bool
|
||||
|
||||
// envManagementSecret indicates whether MANAGEMENT_PASSWORD is configured.
|
||||
envManagementSecret bool
|
||||
|
||||
localPassword string
|
||||
|
||||
keepAliveEnabled bool
|
||||
keepAliveTimeout time.Duration
|
||||
keepAliveOnTimeout func()
|
||||
keepAliveHeartbeat chan struct{}
|
||||
keepAliveStop chan struct{}
|
||||
}
|
||||
|
||||
// NewServer creates and initializes a new API server instance.
|
||||
@@ -138,6 +212,7 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
||||
// Resolve logs directory relative to the configuration file directory.
|
||||
var requestLogger logging.RequestLogger
|
||||
var toggle func(bool)
|
||||
if !cfg.CommercialMode {
|
||||
if optionState.requestLoggerFactory != nil {
|
||||
requestLogger = optionState.requestLoggerFactory(cfg, configFilePath)
|
||||
}
|
||||
@@ -147,32 +222,84 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
||||
toggle = setter.SetEnabled
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
engine.Use(corsMiddleware())
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
wd = configFilePath
|
||||
}
|
||||
|
||||
envAdminPassword, envAdminPasswordSet := os.LookupEnv("MANAGEMENT_PASSWORD")
|
||||
envAdminPassword = strings.TrimSpace(envAdminPassword)
|
||||
envManagementSecret := envAdminPasswordSet && envAdminPassword != ""
|
||||
|
||||
// Create server instance
|
||||
s := &Server{
|
||||
engine: engine,
|
||||
handlers: handlers.NewBaseAPIHandlers(cfg, authManager),
|
||||
handlers: handlers.NewBaseAPIHandlers(&cfg.SDKConfig, authManager),
|
||||
cfg: cfg,
|
||||
accessManager: accessManager,
|
||||
requestLogger: requestLogger,
|
||||
loggerToggle: toggle,
|
||||
configFilePath: configFilePath,
|
||||
currentPath: wd,
|
||||
envManagementSecret: envManagementSecret,
|
||||
wsRoutes: make(map[string]struct{}),
|
||||
}
|
||||
s.applyAccessConfig(cfg)
|
||||
s.wsAuthEnabled.Store(cfg.WebsocketAuth)
|
||||
// Save initial YAML snapshot
|
||||
s.oldConfigYaml, _ = yaml.Marshal(cfg)
|
||||
s.applyAccessConfig(nil, cfg)
|
||||
if authManager != nil {
|
||||
authManager.SetRetryConfig(cfg.RequestRetry, time.Duration(cfg.MaxRetryInterval)*time.Second)
|
||||
}
|
||||
managementasset.SetCurrentConfig(cfg)
|
||||
auth.SetQuotaCooldownDisabled(cfg.DisableCooling)
|
||||
misc.SetCodexInstructionsEnabled(cfg.CodexInstructionsEnabled)
|
||||
// Initialize management handler
|
||||
s.mgmt = managementHandlers.NewHandler(cfg, configFilePath, authManager)
|
||||
if optionState.localPassword != "" {
|
||||
s.mgmt.SetLocalPassword(optionState.localPassword)
|
||||
}
|
||||
logDir := logging.ResolveLogDirectory(cfg)
|
||||
s.mgmt.SetLogDirectory(logDir)
|
||||
s.localPassword = optionState.localPassword
|
||||
|
||||
// Setup routes
|
||||
s.setupRoutes()
|
||||
|
||||
// Register Amp module using V2 interface with Context
|
||||
s.ampModule = ampmodule.NewLegacy(accessManager, AuthMiddleware(accessManager))
|
||||
ctx := modules.Context{
|
||||
Engine: engine,
|
||||
BaseHandler: s.handlers,
|
||||
Config: cfg,
|
||||
AuthMiddleware: AuthMiddleware(accessManager),
|
||||
}
|
||||
if err := modules.RegisterModule(ctx, s.ampModule); err != nil {
|
||||
log.Errorf("Failed to register Amp module: %v", err)
|
||||
}
|
||||
|
||||
// Apply additional router configurators from options
|
||||
if optionState.routerConfigurator != nil {
|
||||
optionState.routerConfigurator(engine, s.handlers, cfg)
|
||||
}
|
||||
|
||||
// Register management routes when configuration or environment secrets are available.
|
||||
hasManagementSecret := cfg.RemoteManagement.SecretKey != "" || envManagementSecret
|
||||
s.managementRoutesEnabled.Store(hasManagementSecret)
|
||||
if hasManagementSecret {
|
||||
s.registerManagementRoutes()
|
||||
}
|
||||
|
||||
if optionState.keepAliveEnabled {
|
||||
s.enableKeepAlive(optionState.keepAliveTimeout, optionState.keepAliveOnTimeout)
|
||||
}
|
||||
|
||||
// Create HTTP server
|
||||
s.server = &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", cfg.Port),
|
||||
Addr: fmt.Sprintf("%s:%d", cfg.Host, cfg.Port),
|
||||
Handler: engine,
|
||||
}
|
||||
|
||||
@@ -182,6 +309,7 @@ func NewServer(cfg *config.Config, authManager *auth.Manager, accessManager *sdk
|
||||
// setupRoutes configures the API routes for the server.
|
||||
// It defines the endpoints and associates them with their respective handlers.
|
||||
func (s *Server) setupRoutes() {
|
||||
s.engine.GET("/management.html", s.serveManagementControlPanel)
|
||||
openaiHandlers := openai.NewOpenAIAPIHandler(s.handlers)
|
||||
geminiHandlers := gemini.NewGeminiAPIHandler(s.handlers)
|
||||
geminiCLIHandlers := gemini.NewGeminiCLIAPIHandler(s.handlers)
|
||||
@@ -198,6 +326,7 @@ func (s *Server) setupRoutes() {
|
||||
v1.POST("/messages", claudeCodeHandlers.ClaudeMessages)
|
||||
v1.POST("/messages/count_tokens", claudeCodeHandlers.ClaudeCountTokens)
|
||||
v1.POST("/responses", openaiResponsesHandlers.Responses)
|
||||
v1.POST("/responses/compact", openaiResponsesHandlers.Compact)
|
||||
}
|
||||
|
||||
// Gemini compatible API routes
|
||||
@@ -205,15 +334,14 @@ func (s *Server) setupRoutes() {
|
||||
v1beta.Use(AuthMiddleware(s.accessManager))
|
||||
{
|
||||
v1beta.GET("/models", geminiHandlers.GeminiModels)
|
||||
v1beta.POST("/models/:action", geminiHandlers.GeminiHandler)
|
||||
v1beta.GET("/models/:action", geminiHandlers.GeminiGetHandler)
|
||||
v1beta.POST("/models/*action", geminiHandlers.GeminiHandler)
|
||||
v1beta.GET("/models/*action", geminiHandlers.GeminiGetHandler)
|
||||
}
|
||||
|
||||
// Root endpoint
|
||||
s.engine.GET("/", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "CLI Proxy API Server",
|
||||
"version": "1.0.0",
|
||||
"endpoints": []string{
|
||||
"POST /v1/chat/completions",
|
||||
"POST /v1/completions",
|
||||
@@ -230,57 +358,160 @@ func (s *Server) setupRoutes() {
|
||||
code := c.Query("code")
|
||||
state := c.Query("state")
|
||||
errStr := c.Query("error")
|
||||
// Persist to a temporary file keyed by state
|
||||
if errStr == "" {
|
||||
errStr = c.Query("error_description")
|
||||
}
|
||||
if state != "" {
|
||||
file := fmt.Sprintf("%s/.oauth-anthropic-%s.oauth", s.cfg.AuthDir, state)
|
||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||
_, _ = managementHandlers.WriteOAuthCallbackFileForPendingSession(s.cfg.AuthDir, "anthropic", state, code, errStr)
|
||||
}
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.String(http.StatusOK, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
||||
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||
})
|
||||
|
||||
s.engine.GET("/codex/callback", func(c *gin.Context) {
|
||||
code := c.Query("code")
|
||||
state := c.Query("state")
|
||||
errStr := c.Query("error")
|
||||
if errStr == "" {
|
||||
errStr = c.Query("error_description")
|
||||
}
|
||||
if state != "" {
|
||||
file := fmt.Sprintf("%s/.oauth-codex-%s.oauth", s.cfg.AuthDir, state)
|
||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||
_, _ = managementHandlers.WriteOAuthCallbackFileForPendingSession(s.cfg.AuthDir, "codex", state, code, errStr)
|
||||
}
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.String(http.StatusOK, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
||||
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||
})
|
||||
|
||||
s.engine.GET("/google/callback", func(c *gin.Context) {
|
||||
code := c.Query("code")
|
||||
state := c.Query("state")
|
||||
errStr := c.Query("error")
|
||||
if errStr == "" {
|
||||
errStr = c.Query("error_description")
|
||||
}
|
||||
if state != "" {
|
||||
file := fmt.Sprintf("%s/.oauth-gemini-%s.oauth", s.cfg.AuthDir, state)
|
||||
_ = os.WriteFile(file, []byte(fmt.Sprintf(`{"code":"%s","state":"%s","error":"%s"}`, code, state, errStr)), 0o600)
|
||||
_, _ = managementHandlers.WriteOAuthCallbackFileForPendingSession(s.cfg.AuthDir, "gemini", state, code, errStr)
|
||||
}
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.String(http.StatusOK, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
||||
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||
})
|
||||
|
||||
// Management API routes (delegated to management handlers)
|
||||
// New logic: if remote-management-key is empty, do not expose any management endpoint (404).
|
||||
if s.cfg.RemoteManagement.SecretKey != "" {
|
||||
s.engine.GET("/iflow/callback", func(c *gin.Context) {
|
||||
code := c.Query("code")
|
||||
state := c.Query("state")
|
||||
errStr := c.Query("error")
|
||||
if errStr == "" {
|
||||
errStr = c.Query("error_description")
|
||||
}
|
||||
if state != "" {
|
||||
_, _ = managementHandlers.WriteOAuthCallbackFileForPendingSession(s.cfg.AuthDir, "iflow", state, code, errStr)
|
||||
}
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||
})
|
||||
|
||||
s.engine.GET("/antigravity/callback", func(c *gin.Context) {
|
||||
code := c.Query("code")
|
||||
state := c.Query("state")
|
||||
errStr := c.Query("error")
|
||||
if errStr == "" {
|
||||
errStr = c.Query("error_description")
|
||||
}
|
||||
if state != "" {
|
||||
_, _ = managementHandlers.WriteOAuthCallbackFileForPendingSession(s.cfg.AuthDir, "antigravity", state, code, errStr)
|
||||
}
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.String(http.StatusOK, oauthCallbackSuccessHTML)
|
||||
})
|
||||
|
||||
// Management routes are registered lazily by registerManagementRoutes when a secret is configured.
|
||||
}
|
||||
|
||||
// AttachWebsocketRoute registers a websocket upgrade handler on the primary Gin engine.
|
||||
// The handler is served as-is without additional middleware beyond the standard stack already configured.
|
||||
func (s *Server) AttachWebsocketRoute(path string, handler http.Handler) {
|
||||
if s == nil || s.engine == nil || handler == nil {
|
||||
return
|
||||
}
|
||||
trimmed := strings.TrimSpace(path)
|
||||
if trimmed == "" {
|
||||
trimmed = "/v1/ws"
|
||||
}
|
||||
if !strings.HasPrefix(trimmed, "/") {
|
||||
trimmed = "/" + trimmed
|
||||
}
|
||||
s.wsRouteMu.Lock()
|
||||
if _, exists := s.wsRoutes[trimmed]; exists {
|
||||
s.wsRouteMu.Unlock()
|
||||
return
|
||||
}
|
||||
s.wsRoutes[trimmed] = struct{}{}
|
||||
s.wsRouteMu.Unlock()
|
||||
|
||||
authMiddleware := AuthMiddleware(s.accessManager)
|
||||
conditionalAuth := func(c *gin.Context) {
|
||||
if !s.wsAuthEnabled.Load() {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
authMiddleware(c)
|
||||
}
|
||||
finalHandler := func(c *gin.Context) {
|
||||
handler.ServeHTTP(c.Writer, c.Request)
|
||||
c.Abort()
|
||||
}
|
||||
|
||||
s.engine.GET(trimmed, conditionalAuth, finalHandler)
|
||||
}
|
||||
|
||||
func (s *Server) registerManagementRoutes() {
|
||||
if s == nil || s.engine == nil || s.mgmt == nil {
|
||||
return
|
||||
}
|
||||
if !s.managementRoutesRegistered.CompareAndSwap(false, true) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("management routes registered after secret key configuration")
|
||||
|
||||
mgmt := s.engine.Group("/v0/management")
|
||||
mgmt.Use(s.mgmt.Middleware())
|
||||
mgmt.Use(s.managementAvailabilityMiddleware(), s.mgmt.Middleware())
|
||||
{
|
||||
mgmt.GET("/usage", s.mgmt.GetUsageStatistics)
|
||||
mgmt.GET("/usage/export", s.mgmt.ExportUsageStatistics)
|
||||
mgmt.POST("/usage/import", s.mgmt.ImportUsageStatistics)
|
||||
mgmt.GET("/config", s.mgmt.GetConfig)
|
||||
mgmt.GET("/config.yaml", s.mgmt.GetConfigYAML)
|
||||
mgmt.PUT("/config.yaml", s.mgmt.PutConfigYAML)
|
||||
mgmt.GET("/latest-version", s.mgmt.GetLatestVersion)
|
||||
|
||||
mgmt.GET("/debug", s.mgmt.GetDebug)
|
||||
mgmt.PUT("/debug", s.mgmt.PutDebug)
|
||||
mgmt.PATCH("/debug", s.mgmt.PutDebug)
|
||||
|
||||
mgmt.GET("/logging-to-file", s.mgmt.GetLoggingToFile)
|
||||
mgmt.PUT("/logging-to-file", s.mgmt.PutLoggingToFile)
|
||||
mgmt.PATCH("/logging-to-file", s.mgmt.PutLoggingToFile)
|
||||
|
||||
mgmt.GET("/logs-max-total-size-mb", s.mgmt.GetLogsMaxTotalSizeMB)
|
||||
mgmt.PUT("/logs-max-total-size-mb", s.mgmt.PutLogsMaxTotalSizeMB)
|
||||
mgmt.PATCH("/logs-max-total-size-mb", s.mgmt.PutLogsMaxTotalSizeMB)
|
||||
|
||||
mgmt.GET("/error-logs-max-files", s.mgmt.GetErrorLogsMaxFiles)
|
||||
mgmt.PUT("/error-logs-max-files", s.mgmt.PutErrorLogsMaxFiles)
|
||||
mgmt.PATCH("/error-logs-max-files", s.mgmt.PutErrorLogsMaxFiles)
|
||||
|
||||
mgmt.GET("/usage-statistics-enabled", s.mgmt.GetUsageStatisticsEnabled)
|
||||
mgmt.PUT("/usage-statistics-enabled", s.mgmt.PutUsageStatisticsEnabled)
|
||||
mgmt.PATCH("/usage-statistics-enabled", s.mgmt.PutUsageStatisticsEnabled)
|
||||
|
||||
mgmt.GET("/proxy-url", s.mgmt.GetProxyURL)
|
||||
mgmt.PUT("/proxy-url", s.mgmt.PutProxyURL)
|
||||
mgmt.PATCH("/proxy-url", s.mgmt.PutProxyURL)
|
||||
mgmt.DELETE("/proxy-url", s.mgmt.DeleteProxyURL)
|
||||
|
||||
mgmt.POST("/api-call", s.mgmt.APICall)
|
||||
|
||||
mgmt.GET("/quota-exceeded/switch-project", s.mgmt.GetSwitchProject)
|
||||
mgmt.PUT("/quota-exceeded/switch-project", s.mgmt.PutSwitchProject)
|
||||
mgmt.PATCH("/quota-exceeded/switch-project", s.mgmt.PutSwitchProject)
|
||||
@@ -294,18 +525,61 @@ func (s *Server) setupRoutes() {
|
||||
mgmt.PATCH("/api-keys", s.mgmt.PatchAPIKeys)
|
||||
mgmt.DELETE("/api-keys", s.mgmt.DeleteAPIKeys)
|
||||
|
||||
mgmt.GET("/generative-language-api-key", s.mgmt.GetGlKeys)
|
||||
mgmt.PUT("/generative-language-api-key", s.mgmt.PutGlKeys)
|
||||
mgmt.PATCH("/generative-language-api-key", s.mgmt.PatchGlKeys)
|
||||
mgmt.DELETE("/generative-language-api-key", s.mgmt.DeleteGlKeys)
|
||||
mgmt.GET("/gemini-api-key", s.mgmt.GetGeminiKeys)
|
||||
mgmt.PUT("/gemini-api-key", s.mgmt.PutGeminiKeys)
|
||||
mgmt.PATCH("/gemini-api-key", s.mgmt.PatchGeminiKey)
|
||||
mgmt.DELETE("/gemini-api-key", s.mgmt.DeleteGeminiKey)
|
||||
|
||||
mgmt.GET("/logs", s.mgmt.GetLogs)
|
||||
mgmt.DELETE("/logs", s.mgmt.DeleteLogs)
|
||||
mgmt.GET("/request-error-logs", s.mgmt.GetRequestErrorLogs)
|
||||
mgmt.GET("/request-error-logs/:name", s.mgmt.DownloadRequestErrorLog)
|
||||
mgmt.GET("/request-log-by-id/:id", s.mgmt.GetRequestLogByID)
|
||||
mgmt.GET("/request-log", s.mgmt.GetRequestLog)
|
||||
mgmt.PUT("/request-log", s.mgmt.PutRequestLog)
|
||||
mgmt.PATCH("/request-log", s.mgmt.PutRequestLog)
|
||||
mgmt.GET("/ws-auth", s.mgmt.GetWebsocketAuth)
|
||||
mgmt.PUT("/ws-auth", s.mgmt.PutWebsocketAuth)
|
||||
mgmt.PATCH("/ws-auth", s.mgmt.PutWebsocketAuth)
|
||||
|
||||
mgmt.GET("/ampcode", s.mgmt.GetAmpCode)
|
||||
mgmt.GET("/ampcode/upstream-url", s.mgmt.GetAmpUpstreamURL)
|
||||
mgmt.PUT("/ampcode/upstream-url", s.mgmt.PutAmpUpstreamURL)
|
||||
mgmt.PATCH("/ampcode/upstream-url", s.mgmt.PutAmpUpstreamURL)
|
||||
mgmt.DELETE("/ampcode/upstream-url", s.mgmt.DeleteAmpUpstreamURL)
|
||||
mgmt.GET("/ampcode/upstream-api-key", s.mgmt.GetAmpUpstreamAPIKey)
|
||||
mgmt.PUT("/ampcode/upstream-api-key", s.mgmt.PutAmpUpstreamAPIKey)
|
||||
mgmt.PATCH("/ampcode/upstream-api-key", s.mgmt.PutAmpUpstreamAPIKey)
|
||||
mgmt.DELETE("/ampcode/upstream-api-key", s.mgmt.DeleteAmpUpstreamAPIKey)
|
||||
mgmt.GET("/ampcode/restrict-management-to-localhost", s.mgmt.GetAmpRestrictManagementToLocalhost)
|
||||
mgmt.PUT("/ampcode/restrict-management-to-localhost", s.mgmt.PutAmpRestrictManagementToLocalhost)
|
||||
mgmt.PATCH("/ampcode/restrict-management-to-localhost", s.mgmt.PutAmpRestrictManagementToLocalhost)
|
||||
mgmt.GET("/ampcode/model-mappings", s.mgmt.GetAmpModelMappings)
|
||||
mgmt.PUT("/ampcode/model-mappings", s.mgmt.PutAmpModelMappings)
|
||||
mgmt.PATCH("/ampcode/model-mappings", s.mgmt.PatchAmpModelMappings)
|
||||
mgmt.DELETE("/ampcode/model-mappings", s.mgmt.DeleteAmpModelMappings)
|
||||
mgmt.GET("/ampcode/force-model-mappings", s.mgmt.GetAmpForceModelMappings)
|
||||
mgmt.PUT("/ampcode/force-model-mappings", s.mgmt.PutAmpForceModelMappings)
|
||||
mgmt.PATCH("/ampcode/force-model-mappings", s.mgmt.PutAmpForceModelMappings)
|
||||
mgmt.GET("/ampcode/upstream-api-keys", s.mgmt.GetAmpUpstreamAPIKeys)
|
||||
mgmt.PUT("/ampcode/upstream-api-keys", s.mgmt.PutAmpUpstreamAPIKeys)
|
||||
mgmt.PATCH("/ampcode/upstream-api-keys", s.mgmt.PatchAmpUpstreamAPIKeys)
|
||||
mgmt.DELETE("/ampcode/upstream-api-keys", s.mgmt.DeleteAmpUpstreamAPIKeys)
|
||||
|
||||
mgmt.GET("/request-retry", s.mgmt.GetRequestRetry)
|
||||
mgmt.PUT("/request-retry", s.mgmt.PutRequestRetry)
|
||||
mgmt.PATCH("/request-retry", s.mgmt.PutRequestRetry)
|
||||
mgmt.GET("/max-retry-interval", s.mgmt.GetMaxRetryInterval)
|
||||
mgmt.PUT("/max-retry-interval", s.mgmt.PutMaxRetryInterval)
|
||||
mgmt.PATCH("/max-retry-interval", s.mgmt.PutMaxRetryInterval)
|
||||
|
||||
mgmt.GET("/force-model-prefix", s.mgmt.GetForceModelPrefix)
|
||||
mgmt.PUT("/force-model-prefix", s.mgmt.PutForceModelPrefix)
|
||||
mgmt.PATCH("/force-model-prefix", s.mgmt.PutForceModelPrefix)
|
||||
|
||||
mgmt.GET("/routing/strategy", s.mgmt.GetRoutingStrategy)
|
||||
mgmt.PUT("/routing/strategy", s.mgmt.PutRoutingStrategy)
|
||||
mgmt.PATCH("/routing/strategy", s.mgmt.PutRoutingStrategy)
|
||||
|
||||
mgmt.GET("/claude-api-key", s.mgmt.GetClaudeKeys)
|
||||
mgmt.PUT("/claude-api-key", s.mgmt.PutClaudeKeys)
|
||||
@@ -322,19 +596,155 @@ func (s *Server) setupRoutes() {
|
||||
mgmt.PATCH("/openai-compatibility", s.mgmt.PatchOpenAICompat)
|
||||
mgmt.DELETE("/openai-compatibility", s.mgmt.DeleteOpenAICompat)
|
||||
|
||||
mgmt.GET("/vertex-api-key", s.mgmt.GetVertexCompatKeys)
|
||||
mgmt.PUT("/vertex-api-key", s.mgmt.PutVertexCompatKeys)
|
||||
mgmt.PATCH("/vertex-api-key", s.mgmt.PatchVertexCompatKey)
|
||||
mgmt.DELETE("/vertex-api-key", s.mgmt.DeleteVertexCompatKey)
|
||||
|
||||
mgmt.GET("/oauth-excluded-models", s.mgmt.GetOAuthExcludedModels)
|
||||
mgmt.PUT("/oauth-excluded-models", s.mgmt.PutOAuthExcludedModels)
|
||||
mgmt.PATCH("/oauth-excluded-models", s.mgmt.PatchOAuthExcludedModels)
|
||||
mgmt.DELETE("/oauth-excluded-models", s.mgmt.DeleteOAuthExcludedModels)
|
||||
|
||||
mgmt.GET("/oauth-model-alias", s.mgmt.GetOAuthModelAlias)
|
||||
mgmt.PUT("/oauth-model-alias", s.mgmt.PutOAuthModelAlias)
|
||||
mgmt.PATCH("/oauth-model-alias", s.mgmt.PatchOAuthModelAlias)
|
||||
mgmt.DELETE("/oauth-model-alias", s.mgmt.DeleteOAuthModelAlias)
|
||||
|
||||
mgmt.GET("/auth-files", s.mgmt.ListAuthFiles)
|
||||
mgmt.GET("/auth-files/models", s.mgmt.GetAuthFileModels)
|
||||
mgmt.GET("/model-definitions/:channel", s.mgmt.GetStaticModelDefinitions)
|
||||
mgmt.GET("/auth-files/download", s.mgmt.DownloadAuthFile)
|
||||
mgmt.POST("/auth-files", s.mgmt.UploadAuthFile)
|
||||
mgmt.DELETE("/auth-files", s.mgmt.DeleteAuthFile)
|
||||
mgmt.PATCH("/auth-files/status", s.mgmt.PatchAuthFileStatus)
|
||||
mgmt.POST("/vertex/import", s.mgmt.ImportVertexCredential)
|
||||
|
||||
mgmt.GET("/anthropic-auth-url", s.mgmt.RequestAnthropicToken)
|
||||
mgmt.GET("/codex-auth-url", s.mgmt.RequestCodexToken)
|
||||
mgmt.GET("/gemini-cli-auth-url", s.mgmt.RequestGeminiCLIToken)
|
||||
mgmt.POST("/gemini-web-token", s.mgmt.CreateGeminiWebToken)
|
||||
mgmt.GET("/antigravity-auth-url", s.mgmt.RequestAntigravityToken)
|
||||
mgmt.GET("/qwen-auth-url", s.mgmt.RequestQwenToken)
|
||||
mgmt.GET("/iflow-auth-url", s.mgmt.RequestIFlowToken)
|
||||
mgmt.POST("/iflow-auth-url", s.mgmt.RequestIFlowCookieToken)
|
||||
mgmt.POST("/oauth-callback", s.mgmt.PostOAuthCallback)
|
||||
mgmt.GET("/get-auth-status", s.mgmt.GetAuthStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) managementAvailabilityMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if !s.managementRoutesEnabled.Load() {
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) serveManagementControlPanel(c *gin.Context) {
|
||||
cfg := s.cfg
|
||||
if cfg == nil || cfg.RemoteManagement.DisableControlPanel {
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
filePath := managementasset.FilePath(s.configFilePath)
|
||||
if strings.TrimSpace(filePath) == "" {
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
go managementasset.EnsureLatestManagementHTML(context.Background(), managementasset.StaticDir(s.configFilePath), cfg.ProxyURL, cfg.RemoteManagement.PanelGitHubRepository)
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
log.WithError(err).Error("failed to stat management control panel asset")
|
||||
c.AbortWithStatus(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
c.File(filePath)
|
||||
}
|
||||
|
||||
func (s *Server) enableKeepAlive(timeout time.Duration, onTimeout func()) {
|
||||
if timeout <= 0 || onTimeout == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.keepAliveEnabled = true
|
||||
s.keepAliveTimeout = timeout
|
||||
s.keepAliveOnTimeout = onTimeout
|
||||
s.keepAliveHeartbeat = make(chan struct{}, 1)
|
||||
s.keepAliveStop = make(chan struct{}, 1)
|
||||
|
||||
s.engine.GET("/keep-alive", s.handleKeepAlive)
|
||||
|
||||
go s.watchKeepAlive()
|
||||
}
|
||||
|
||||
func (s *Server) handleKeepAlive(c *gin.Context) {
|
||||
if s.localPassword != "" {
|
||||
provided := strings.TrimSpace(c.GetHeader("Authorization"))
|
||||
if provided != "" {
|
||||
parts := strings.SplitN(provided, " ", 2)
|
||||
if len(parts) == 2 && strings.EqualFold(parts[0], "bearer") {
|
||||
provided = parts[1]
|
||||
}
|
||||
}
|
||||
if provided == "" {
|
||||
provided = strings.TrimSpace(c.GetHeader("X-Local-Password"))
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(provided), []byte(s.localPassword)) != 1 {
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "invalid password"})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.signalKeepAlive()
|
||||
c.JSON(http.StatusOK, gin.H{"status": "ok"})
|
||||
}
|
||||
|
||||
func (s *Server) signalKeepAlive() {
|
||||
if !s.keepAliveEnabled {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case s.keepAliveHeartbeat <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) watchKeepAlive() {
|
||||
if !s.keepAliveEnabled {
|
||||
return
|
||||
}
|
||||
|
||||
timer := time.NewTimer(s.keepAliveTimeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
log.Warnf("keep-alive endpoint idle for %s, shutting down", s.keepAliveTimeout)
|
||||
if s.keepAliveOnTimeout != nil {
|
||||
s.keepAliveOnTimeout()
|
||||
}
|
||||
return
|
||||
case <-s.keepAliveHeartbeat:
|
||||
if !timer.Stop() {
|
||||
select {
|
||||
case <-timer.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
timer.Reset(s.keepAliveTimeout)
|
||||
case <-s.keepAliveStop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unifiedModelsHandler creates a unified handler for the /v1/models endpoint
|
||||
@@ -356,17 +766,33 @@ func (s *Server) unifiedModelsHandler(openaiHandler *openai.OpenAIAPIHandler, cl
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins listening for and serving HTTP requests.
|
||||
// Start begins listening for and serving HTTP or HTTPS requests.
|
||||
// It's a blocking call and will only return on an unrecoverable error.
|
||||
//
|
||||
// Returns:
|
||||
// - error: An error if the server fails to start
|
||||
func (s *Server) Start() error {
|
||||
log.Debugf("Starting API server on %s", s.server.Addr)
|
||||
if s == nil || s.server == nil {
|
||||
return fmt.Errorf("failed to start HTTP server: server not initialized")
|
||||
}
|
||||
|
||||
// Start the HTTP server.
|
||||
if err := s.server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
return fmt.Errorf("failed to start HTTP server: %v", err)
|
||||
useTLS := s.cfg != nil && s.cfg.TLS.Enable
|
||||
if useTLS {
|
||||
cert := strings.TrimSpace(s.cfg.TLS.Cert)
|
||||
key := strings.TrimSpace(s.cfg.TLS.Key)
|
||||
if cert == "" || key == "" {
|
||||
return fmt.Errorf("failed to start HTTPS server: tls.cert or tls.key is empty")
|
||||
}
|
||||
log.Debugf("Starting API server on %s with TLS", s.server.Addr)
|
||||
if errServeTLS := s.server.ListenAndServeTLS(cert, key); errServeTLS != nil && !errors.Is(errServeTLS, http.ErrServerClosed) {
|
||||
return fmt.Errorf("failed to start HTTPS server: %v", errServeTLS)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("Starting API server on %s", s.server.Addr)
|
||||
if errServe := s.server.ListenAndServe(); errServe != nil && !errors.Is(errServe, http.ErrServerClosed) {
|
||||
return fmt.Errorf("failed to start HTTP server: %v", errServe)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -383,6 +809,13 @@ func (s *Server) Start() error {
|
||||
func (s *Server) Stop(ctx context.Context) error {
|
||||
log.Debug("Stopping API server...")
|
||||
|
||||
if s.keepAliveEnabled {
|
||||
select {
|
||||
case s.keepAliveStop <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown the HTTP server.
|
||||
if err := s.server.Shutdown(ctx); err != nil {
|
||||
return fmt.Errorf("failed to shutdown HTTP server: %v", err)
|
||||
@@ -400,7 +833,7 @@ func (s *Server) Stop(ctx context.Context) error {
|
||||
func corsMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
c.Header("Access-Control-Allow-Origin", "*")
|
||||
c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
|
||||
c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS")
|
||||
c.Header("Access-Control-Allow-Headers", "*")
|
||||
|
||||
if c.Request.Method == "OPTIONS" {
|
||||
@@ -412,16 +845,13 @@ func corsMiddleware() gin.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) applyAccessConfig(cfg *config.Config) {
|
||||
if s == nil || s.accessManager == nil {
|
||||
func (s *Server) applyAccessConfig(oldCfg, newCfg *config.Config) {
|
||||
if s == nil || s.accessManager == nil || newCfg == nil {
|
||||
return
|
||||
}
|
||||
providers, err := sdkaccess.BuildProviders(cfg)
|
||||
if err != nil {
|
||||
log.Errorf("failed to update request auth providers: %v", err)
|
||||
if _, err := access.ApplyAccessProviders(s.accessManager, oldCfg, newCfg); err != nil {
|
||||
return
|
||||
}
|
||||
s.accessManager.SetProviders(providers)
|
||||
}
|
||||
|
||||
// UpdateClients updates the server's client list and configuration.
|
||||
@@ -431,51 +861,198 @@ func (s *Server) applyAccessConfig(cfg *config.Config) {
|
||||
// - clients: The new slice of AI service clients
|
||||
// - cfg: The new application configuration
|
||||
func (s *Server) UpdateClients(cfg *config.Config) {
|
||||
// Reconstruct old config from YAML snapshot to avoid reference sharing issues
|
||||
var oldCfg *config.Config
|
||||
if len(s.oldConfigYaml) > 0 {
|
||||
_ = yaml.Unmarshal(s.oldConfigYaml, &oldCfg)
|
||||
}
|
||||
|
||||
// Update request logger enabled state if it has changed
|
||||
if s.requestLogger != nil && s.cfg.RequestLog != cfg.RequestLog {
|
||||
previousRequestLog := false
|
||||
if oldCfg != nil {
|
||||
previousRequestLog = oldCfg.RequestLog
|
||||
}
|
||||
if s.requestLogger != nil && (oldCfg == nil || previousRequestLog != cfg.RequestLog) {
|
||||
if s.loggerToggle != nil {
|
||||
s.loggerToggle(cfg.RequestLog)
|
||||
} else if toggler, ok := s.requestLogger.(interface{ SetEnabled(bool) }); ok {
|
||||
toggler.SetEnabled(cfg.RequestLog)
|
||||
}
|
||||
log.Debugf("request logging updated from %t to %t", s.cfg.RequestLog, cfg.RequestLog)
|
||||
if oldCfg != nil {
|
||||
log.Debugf("request logging updated from %t to %t", previousRequestLog, cfg.RequestLog)
|
||||
} else {
|
||||
log.Debugf("request logging toggled to %t", cfg.RequestLog)
|
||||
}
|
||||
}
|
||||
|
||||
if oldCfg == nil || oldCfg.LoggingToFile != cfg.LoggingToFile || oldCfg.LogsMaxTotalSizeMB != cfg.LogsMaxTotalSizeMB {
|
||||
if err := logging.ConfigureLogOutput(cfg); err != nil {
|
||||
log.Errorf("failed to reconfigure log output: %v", err)
|
||||
} else {
|
||||
if oldCfg == nil {
|
||||
log.Debug("log output configuration refreshed")
|
||||
} else {
|
||||
if oldCfg.LoggingToFile != cfg.LoggingToFile {
|
||||
log.Debugf("logging_to_file updated from %t to %t", oldCfg.LoggingToFile, cfg.LoggingToFile)
|
||||
}
|
||||
if oldCfg.LogsMaxTotalSizeMB != cfg.LogsMaxTotalSizeMB {
|
||||
log.Debugf("logs_max_total_size_mb updated from %d to %d", oldCfg.LogsMaxTotalSizeMB, cfg.LogsMaxTotalSizeMB)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if oldCfg == nil || oldCfg.UsageStatisticsEnabled != cfg.UsageStatisticsEnabled {
|
||||
usage.SetStatisticsEnabled(cfg.UsageStatisticsEnabled)
|
||||
if oldCfg != nil {
|
||||
log.Debugf("usage_statistics_enabled updated from %t to %t", oldCfg.UsageStatisticsEnabled, cfg.UsageStatisticsEnabled)
|
||||
} else {
|
||||
log.Debugf("usage_statistics_enabled toggled to %t", cfg.UsageStatisticsEnabled)
|
||||
}
|
||||
}
|
||||
|
||||
if s.requestLogger != nil && (oldCfg == nil || oldCfg.ErrorLogsMaxFiles != cfg.ErrorLogsMaxFiles) {
|
||||
if setter, ok := s.requestLogger.(interface{ SetErrorLogsMaxFiles(int) }); ok {
|
||||
setter.SetErrorLogsMaxFiles(cfg.ErrorLogsMaxFiles)
|
||||
}
|
||||
if oldCfg != nil {
|
||||
log.Debugf("error_logs_max_files updated from %d to %d", oldCfg.ErrorLogsMaxFiles, cfg.ErrorLogsMaxFiles)
|
||||
}
|
||||
}
|
||||
|
||||
if oldCfg == nil || oldCfg.DisableCooling != cfg.DisableCooling {
|
||||
auth.SetQuotaCooldownDisabled(cfg.DisableCooling)
|
||||
if oldCfg != nil {
|
||||
log.Debugf("disable_cooling updated from %t to %t", oldCfg.DisableCooling, cfg.DisableCooling)
|
||||
} else {
|
||||
log.Debugf("disable_cooling toggled to %t", cfg.DisableCooling)
|
||||
}
|
||||
}
|
||||
|
||||
if oldCfg == nil || oldCfg.CodexInstructionsEnabled != cfg.CodexInstructionsEnabled {
|
||||
misc.SetCodexInstructionsEnabled(cfg.CodexInstructionsEnabled)
|
||||
if oldCfg != nil {
|
||||
log.Debugf("codex_instructions_enabled updated from %t to %t", oldCfg.CodexInstructionsEnabled, cfg.CodexInstructionsEnabled)
|
||||
} else {
|
||||
log.Debugf("codex_instructions_enabled toggled to %t", cfg.CodexInstructionsEnabled)
|
||||
}
|
||||
}
|
||||
|
||||
if s.handlers != nil && s.handlers.AuthManager != nil {
|
||||
s.handlers.AuthManager.SetRetryConfig(cfg.RequestRetry, time.Duration(cfg.MaxRetryInterval)*time.Second)
|
||||
}
|
||||
|
||||
// Update log level dynamically when debug flag changes
|
||||
if s.cfg.Debug != cfg.Debug {
|
||||
if oldCfg == nil || oldCfg.Debug != cfg.Debug {
|
||||
util.SetLogLevel(cfg)
|
||||
log.Debugf("debug mode updated from %t to %t", s.cfg.Debug, cfg.Debug)
|
||||
if oldCfg != nil {
|
||||
log.Debugf("debug mode updated from %t to %t", oldCfg.Debug, cfg.Debug)
|
||||
} else {
|
||||
log.Debugf("debug mode toggled to %t", cfg.Debug)
|
||||
}
|
||||
}
|
||||
|
||||
prevSecretEmpty := true
|
||||
if oldCfg != nil {
|
||||
prevSecretEmpty = oldCfg.RemoteManagement.SecretKey == ""
|
||||
}
|
||||
newSecretEmpty := cfg.RemoteManagement.SecretKey == ""
|
||||
if s.envManagementSecret {
|
||||
s.registerManagementRoutes()
|
||||
if s.managementRoutesEnabled.CompareAndSwap(false, true) {
|
||||
log.Info("management routes enabled via MANAGEMENT_PASSWORD")
|
||||
} else {
|
||||
s.managementRoutesEnabled.Store(true)
|
||||
}
|
||||
} else {
|
||||
switch {
|
||||
case prevSecretEmpty && !newSecretEmpty:
|
||||
s.registerManagementRoutes()
|
||||
if s.managementRoutesEnabled.CompareAndSwap(false, true) {
|
||||
log.Info("management routes enabled after secret key update")
|
||||
} else {
|
||||
s.managementRoutesEnabled.Store(true)
|
||||
}
|
||||
case !prevSecretEmpty && newSecretEmpty:
|
||||
if s.managementRoutesEnabled.CompareAndSwap(true, false) {
|
||||
log.Info("management routes disabled after secret key removal")
|
||||
} else {
|
||||
s.managementRoutesEnabled.Store(false)
|
||||
}
|
||||
default:
|
||||
s.managementRoutesEnabled.Store(!newSecretEmpty)
|
||||
}
|
||||
}
|
||||
|
||||
s.applyAccessConfig(oldCfg, cfg)
|
||||
s.cfg = cfg
|
||||
s.handlers.UpdateClients(cfg)
|
||||
s.wsAuthEnabled.Store(cfg.WebsocketAuth)
|
||||
if oldCfg != nil && s.wsAuthChanged != nil && oldCfg.WebsocketAuth != cfg.WebsocketAuth {
|
||||
s.wsAuthChanged(oldCfg.WebsocketAuth, cfg.WebsocketAuth)
|
||||
}
|
||||
managementasset.SetCurrentConfig(cfg)
|
||||
// Save YAML snapshot for next comparison
|
||||
s.oldConfigYaml, _ = yaml.Marshal(cfg)
|
||||
|
||||
s.handlers.UpdateClients(&cfg.SDKConfig)
|
||||
|
||||
if !cfg.RemoteManagement.DisableControlPanel {
|
||||
staticDir := managementasset.StaticDir(s.configFilePath)
|
||||
go managementasset.EnsureLatestManagementHTML(context.Background(), staticDir, cfg.ProxyURL, cfg.RemoteManagement.PanelGitHubRepository)
|
||||
}
|
||||
if s.mgmt != nil {
|
||||
s.mgmt.SetConfig(cfg)
|
||||
s.mgmt.SetAuthManager(s.handlers.AuthManager)
|
||||
}
|
||||
s.applyAccessConfig(cfg)
|
||||
|
||||
// Count client sources from configuration and auth directory
|
||||
authFiles := util.CountAuthFiles(cfg.AuthDir)
|
||||
glAPIKeyCount := len(cfg.GlAPIKey)
|
||||
claudeAPIKeyCount := len(cfg.ClaudeKey)
|
||||
codexAPIKeyCount := len(cfg.CodexKey)
|
||||
openAICompatCount := 0
|
||||
for i := range cfg.OpenAICompatibility {
|
||||
openAICompatCount += len(cfg.OpenAICompatibility[i].APIKeys)
|
||||
// Notify Amp module when Amp config or OAuth model aliases have changed.
|
||||
ampConfigChanged := oldCfg == nil || !reflect.DeepEqual(oldCfg.AmpCode, cfg.AmpCode) || !reflect.DeepEqual(oldCfg.OAuthModelAlias, cfg.OAuthModelAlias)
|
||||
if ampConfigChanged {
|
||||
if s.ampModule != nil {
|
||||
log.Debugf("triggering amp module config update")
|
||||
if err := s.ampModule.OnConfigUpdated(cfg); err != nil {
|
||||
log.Errorf("failed to update Amp module config: %v", err)
|
||||
}
|
||||
} else {
|
||||
log.Warnf("amp module is nil, skipping config update")
|
||||
}
|
||||
}
|
||||
|
||||
total := authFiles + glAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + openAICompatCount
|
||||
log.Infof("server clients and configuration updated: %d clients (%d auth files + %d GL API keys + %d Claude API keys + %d Codex keys + %d OpenAI-compat)",
|
||||
// Count client sources from configuration and auth store.
|
||||
tokenStore := sdkAuth.GetTokenStore()
|
||||
if dirSetter, ok := tokenStore.(interface{ SetBaseDir(string) }); ok {
|
||||
dirSetter.SetBaseDir(cfg.AuthDir)
|
||||
}
|
||||
authEntries := util.CountAuthFiles(context.Background(), tokenStore)
|
||||
geminiAPIKeyCount := len(cfg.GeminiKey)
|
||||
claudeAPIKeyCount := len(cfg.ClaudeKey)
|
||||
codexAPIKeyCount := len(cfg.CodexKey)
|
||||
vertexAICompatCount := len(cfg.VertexCompatAPIKey)
|
||||
openAICompatCount := 0
|
||||
for i := range cfg.OpenAICompatibility {
|
||||
entry := cfg.OpenAICompatibility[i]
|
||||
openAICompatCount += len(entry.APIKeyEntries)
|
||||
}
|
||||
|
||||
total := authEntries + geminiAPIKeyCount + claudeAPIKeyCount + codexAPIKeyCount + vertexAICompatCount + openAICompatCount
|
||||
fmt.Printf("server clients and configuration updated: %d clients (%d auth entries + %d Gemini API keys + %d Claude API keys + %d Codex keys + %d Vertex-compat + %d OpenAI-compat)\n",
|
||||
total,
|
||||
authFiles,
|
||||
glAPIKeyCount,
|
||||
authEntries,
|
||||
geminiAPIKeyCount,
|
||||
claudeAPIKeyCount,
|
||||
codexAPIKeyCount,
|
||||
vertexAICompatCount,
|
||||
openAICompatCount,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Server) SetWebsocketAuthChangeHandler(fn func(bool, bool)) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.wsAuthChanged = fn
|
||||
}
|
||||
|
||||
// (management handlers moved to internal/api/handlers/management)
|
||||
|
||||
// AuthMiddleware returns a Gin middleware handler that authenticates requests
|
||||
@@ -512,5 +1089,3 @@ func AuthMiddleware(manager *sdkaccess.Manager) gin.HandlerFunc {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// legacy clientsToSlice removed; handlers no longer consume legacy client slices
|
||||
|
||||
111
internal/api/server_test.go
Normal file
111
internal/api/server_test.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
gin "github.com/gin-gonic/gin"
|
||||
proxyconfig "github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkaccess "github.com/router-for-me/CLIProxyAPI/v6/sdk/access"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
sdkconfig "github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
)
|
||||
|
||||
func newTestServer(t *testing.T) *Server {
|
||||
t.Helper()
|
||||
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
authDir := filepath.Join(tmpDir, "auth")
|
||||
if err := os.MkdirAll(authDir, 0o700); err != nil {
|
||||
t.Fatalf("failed to create auth dir: %v", err)
|
||||
}
|
||||
|
||||
cfg := &proxyconfig.Config{
|
||||
SDKConfig: sdkconfig.SDKConfig{
|
||||
APIKeys: []string{"test-key"},
|
||||
},
|
||||
Port: 0,
|
||||
AuthDir: authDir,
|
||||
Debug: true,
|
||||
LoggingToFile: false,
|
||||
UsageStatisticsEnabled: false,
|
||||
}
|
||||
|
||||
authManager := auth.NewManager(nil, nil, nil)
|
||||
accessManager := sdkaccess.NewManager()
|
||||
|
||||
configPath := filepath.Join(tmpDir, "config.yaml")
|
||||
return NewServer(cfg, authManager, accessManager, configPath)
|
||||
}
|
||||
|
||||
func TestAmpProviderModelRoutes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
path string
|
||||
wantStatus int
|
||||
wantContains string
|
||||
}{
|
||||
{
|
||||
name: "openai root models",
|
||||
path: "/api/provider/openai/models",
|
||||
wantStatus: http.StatusOK,
|
||||
wantContains: `"object":"list"`,
|
||||
},
|
||||
{
|
||||
name: "groq root models",
|
||||
path: "/api/provider/groq/models",
|
||||
wantStatus: http.StatusOK,
|
||||
wantContains: `"object":"list"`,
|
||||
},
|
||||
{
|
||||
name: "openai models",
|
||||
path: "/api/provider/openai/v1/models",
|
||||
wantStatus: http.StatusOK,
|
||||
wantContains: `"object":"list"`,
|
||||
},
|
||||
{
|
||||
name: "anthropic models",
|
||||
path: "/api/provider/anthropic/v1/models",
|
||||
wantStatus: http.StatusOK,
|
||||
wantContains: `"data"`,
|
||||
},
|
||||
{
|
||||
name: "google models v1",
|
||||
path: "/api/provider/google/v1/models",
|
||||
wantStatus: http.StatusOK,
|
||||
wantContains: `"models"`,
|
||||
},
|
||||
{
|
||||
name: "google models v1beta",
|
||||
path: "/api/provider/google/v1beta/models",
|
||||
wantStatus: http.StatusOK,
|
||||
wantContains: `"models"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
server := newTestServer(t)
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, tc.path, nil)
|
||||
req.Header.Set("Authorization", "Bearer test-key")
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
server.engine.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != tc.wantStatus {
|
||||
t.Fatalf("unexpected status code for %s: got %d want %d; body=%s", tc.path, rr.Code, tc.wantStatus, rr.Body.String())
|
||||
}
|
||||
if body := rr.Body.String(); !strings.Contains(body, tc.wantContains) {
|
||||
t.Fatalf("response body for %s missing %q: %s", tc.path, tc.wantContains, body)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
344
internal/auth/antigravity/auth.go
Normal file
344
internal/auth/antigravity/auth.go
Normal file
@@ -0,0 +1,344 @@
|
||||
// Package antigravity provides OAuth2 authentication functionality for the Antigravity provider.
|
||||
package antigravity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// TokenResponse represents OAuth token response from Google
|
||||
type TokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int64 `json:"expires_in"`
|
||||
TokenType string `json:"token_type"`
|
||||
}
|
||||
|
||||
// userInfo represents Google user profile
|
||||
type userInfo struct {
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// AntigravityAuth handles Antigravity OAuth authentication
|
||||
type AntigravityAuth struct {
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// NewAntigravityAuth creates a new Antigravity auth service.
|
||||
func NewAntigravityAuth(cfg *config.Config, httpClient *http.Client) *AntigravityAuth {
|
||||
if httpClient != nil {
|
||||
return &AntigravityAuth{httpClient: httpClient}
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = &config.Config{}
|
||||
}
|
||||
return &AntigravityAuth{
|
||||
httpClient: util.SetProxy(&cfg.SDKConfig, &http.Client{}),
|
||||
}
|
||||
}
|
||||
|
||||
// BuildAuthURL generates the OAuth authorization URL.
|
||||
func (o *AntigravityAuth) BuildAuthURL(state, redirectURI string) string {
|
||||
if strings.TrimSpace(redirectURI) == "" {
|
||||
redirectURI = fmt.Sprintf("http://localhost:%d/oauth-callback", CallbackPort)
|
||||
}
|
||||
params := url.Values{}
|
||||
params.Set("access_type", "offline")
|
||||
params.Set("client_id", ClientID)
|
||||
params.Set("prompt", "consent")
|
||||
params.Set("redirect_uri", redirectURI)
|
||||
params.Set("response_type", "code")
|
||||
params.Set("scope", strings.Join(Scopes, " "))
|
||||
params.Set("state", state)
|
||||
return AuthEndpoint + "?" + params.Encode()
|
||||
}
|
||||
|
||||
// ExchangeCodeForTokens exchanges authorization code for access and refresh tokens
|
||||
func (o *AntigravityAuth) ExchangeCodeForTokens(ctx context.Context, code, redirectURI string) (*TokenResponse, error) {
|
||||
data := url.Values{}
|
||||
data.Set("code", code)
|
||||
data.Set("client_id", ClientID)
|
||||
data.Set("client_secret", ClientSecret)
|
||||
data.Set("redirect_uri", redirectURI)
|
||||
data.Set("grant_type", "authorization_code")
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, TokenEndpoint, strings.NewReader(data.Encode()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("antigravity token exchange: create request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
resp, errDo := o.httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return nil, fmt.Errorf("antigravity token exchange: execute request: %w", errDo)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("antigravity token exchange: close body error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
bodyBytes, errRead := io.ReadAll(io.LimitReader(resp.Body, 8<<10))
|
||||
if errRead != nil {
|
||||
return nil, fmt.Errorf("antigravity token exchange: read response: %w", errRead)
|
||||
}
|
||||
body := strings.TrimSpace(string(bodyBytes))
|
||||
if body == "" {
|
||||
return nil, fmt.Errorf("antigravity token exchange: request failed: status %d", resp.StatusCode)
|
||||
}
|
||||
return nil, fmt.Errorf("antigravity token exchange: request failed: status %d: %s", resp.StatusCode, body)
|
||||
}
|
||||
|
||||
var token TokenResponse
|
||||
if errDecode := json.NewDecoder(resp.Body).Decode(&token); errDecode != nil {
|
||||
return nil, fmt.Errorf("antigravity token exchange: decode response: %w", errDecode)
|
||||
}
|
||||
return &token, nil
|
||||
}
|
||||
|
||||
// FetchUserInfo retrieves user email from Google
|
||||
func (o *AntigravityAuth) FetchUserInfo(ctx context.Context, accessToken string) (string, error) {
|
||||
accessToken = strings.TrimSpace(accessToken)
|
||||
if accessToken == "" {
|
||||
return "", fmt.Errorf("antigravity userinfo: missing access token")
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, UserInfoEndpoint, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("antigravity userinfo: create request: %w", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+accessToken)
|
||||
|
||||
resp, errDo := o.httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return "", fmt.Errorf("antigravity userinfo: execute request: %w", errDo)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("antigravity userinfo: close body error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
bodyBytes, errRead := io.ReadAll(io.LimitReader(resp.Body, 8<<10))
|
||||
if errRead != nil {
|
||||
return "", fmt.Errorf("antigravity userinfo: read response: %w", errRead)
|
||||
}
|
||||
body := strings.TrimSpace(string(bodyBytes))
|
||||
if body == "" {
|
||||
return "", fmt.Errorf("antigravity userinfo: request failed: status %d", resp.StatusCode)
|
||||
}
|
||||
return "", fmt.Errorf("antigravity userinfo: request failed: status %d: %s", resp.StatusCode, body)
|
||||
}
|
||||
var info userInfo
|
||||
if errDecode := json.NewDecoder(resp.Body).Decode(&info); errDecode != nil {
|
||||
return "", fmt.Errorf("antigravity userinfo: decode response: %w", errDecode)
|
||||
}
|
||||
email := strings.TrimSpace(info.Email)
|
||||
if email == "" {
|
||||
return "", fmt.Errorf("antigravity userinfo: response missing email")
|
||||
}
|
||||
return email, nil
|
||||
}
|
||||
|
||||
// FetchProjectID retrieves the project ID for the authenticated user via loadCodeAssist
|
||||
func (o *AntigravityAuth) FetchProjectID(ctx context.Context, accessToken string) (string, error) {
|
||||
loadReqBody := map[string]any{
|
||||
"metadata": map[string]string{
|
||||
"ideType": "ANTIGRAVITY",
|
||||
"platform": "PLATFORM_UNSPECIFIED",
|
||||
"pluginType": "GEMINI",
|
||||
},
|
||||
}
|
||||
|
||||
rawBody, errMarshal := json.Marshal(loadReqBody)
|
||||
if errMarshal != nil {
|
||||
return "", fmt.Errorf("marshal request body: %w", errMarshal)
|
||||
}
|
||||
|
||||
endpointURL := fmt.Sprintf("%s/%s:loadCodeAssist", APIEndpoint, APIVersion)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpointURL, strings.NewReader(string(rawBody)))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("create request: %w", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+accessToken)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", APIUserAgent)
|
||||
req.Header.Set("X-Goog-Api-Client", APIClient)
|
||||
req.Header.Set("Client-Metadata", ClientMetadata)
|
||||
|
||||
resp, errDo := o.httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return "", fmt.Errorf("execute request: %w", errDo)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("antigravity loadCodeAssist: close body error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
bodyBytes, errRead := io.ReadAll(resp.Body)
|
||||
if errRead != nil {
|
||||
return "", fmt.Errorf("read response: %w", errRead)
|
||||
}
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
return "", fmt.Errorf("request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||
}
|
||||
|
||||
var loadResp map[string]any
|
||||
if errDecode := json.Unmarshal(bodyBytes, &loadResp); errDecode != nil {
|
||||
return "", fmt.Errorf("decode response: %w", errDecode)
|
||||
}
|
||||
|
||||
// Extract projectID from response
|
||||
projectID := ""
|
||||
if id, ok := loadResp["cloudaicompanionProject"].(string); ok {
|
||||
projectID = strings.TrimSpace(id)
|
||||
}
|
||||
if projectID == "" {
|
||||
if projectMap, ok := loadResp["cloudaicompanionProject"].(map[string]any); ok {
|
||||
if id, okID := projectMap["id"].(string); okID {
|
||||
projectID = strings.TrimSpace(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if projectID == "" {
|
||||
tierID := "legacy-tier"
|
||||
if tiers, okTiers := loadResp["allowedTiers"].([]any); okTiers {
|
||||
for _, rawTier := range tiers {
|
||||
tier, okTier := rawTier.(map[string]any)
|
||||
if !okTier {
|
||||
continue
|
||||
}
|
||||
if isDefault, okDefault := tier["isDefault"].(bool); okDefault && isDefault {
|
||||
if id, okID := tier["id"].(string); okID && strings.TrimSpace(id) != "" {
|
||||
tierID = strings.TrimSpace(id)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
projectID, err = o.OnboardUser(ctx, accessToken, tierID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return projectID, nil
|
||||
}
|
||||
|
||||
return projectID, nil
|
||||
}
|
||||
|
||||
// OnboardUser attempts to fetch the project ID via onboardUser by polling for completion
|
||||
func (o *AntigravityAuth) OnboardUser(ctx context.Context, accessToken, tierID string) (string, error) {
|
||||
log.Infof("Antigravity: onboarding user with tier: %s", tierID)
|
||||
requestBody := map[string]any{
|
||||
"tierId": tierID,
|
||||
"metadata": map[string]string{
|
||||
"ideType": "ANTIGRAVITY",
|
||||
"platform": "PLATFORM_UNSPECIFIED",
|
||||
"pluginType": "GEMINI",
|
||||
},
|
||||
}
|
||||
|
||||
rawBody, errMarshal := json.Marshal(requestBody)
|
||||
if errMarshal != nil {
|
||||
return "", fmt.Errorf("marshal request body: %w", errMarshal)
|
||||
}
|
||||
|
||||
maxAttempts := 5
|
||||
for attempt := 1; attempt <= maxAttempts; attempt++ {
|
||||
log.Debugf("Polling attempt %d/%d", attempt, maxAttempts)
|
||||
|
||||
reqCtx := ctx
|
||||
var cancel context.CancelFunc
|
||||
if reqCtx == nil {
|
||||
reqCtx = context.Background()
|
||||
}
|
||||
reqCtx, cancel = context.WithTimeout(reqCtx, 30*time.Second)
|
||||
|
||||
endpointURL := fmt.Sprintf("%s/%s:onboardUser", APIEndpoint, APIVersion)
|
||||
req, errRequest := http.NewRequestWithContext(reqCtx, http.MethodPost, endpointURL, strings.NewReader(string(rawBody)))
|
||||
if errRequest != nil {
|
||||
cancel()
|
||||
return "", fmt.Errorf("create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+accessToken)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", APIUserAgent)
|
||||
req.Header.Set("X-Goog-Api-Client", APIClient)
|
||||
req.Header.Set("Client-Metadata", ClientMetadata)
|
||||
|
||||
resp, errDo := o.httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
cancel()
|
||||
return "", fmt.Errorf("execute request: %w", errDo)
|
||||
}
|
||||
|
||||
bodyBytes, errRead := io.ReadAll(resp.Body)
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("close body error: %v", errClose)
|
||||
}
|
||||
cancel()
|
||||
|
||||
if errRead != nil {
|
||||
return "", fmt.Errorf("read response: %w", errRead)
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
var data map[string]any
|
||||
if errDecode := json.Unmarshal(bodyBytes, &data); errDecode != nil {
|
||||
return "", fmt.Errorf("decode response: %w", errDecode)
|
||||
}
|
||||
|
||||
if done, okDone := data["done"].(bool); okDone && done {
|
||||
projectID := ""
|
||||
if responseData, okResp := data["response"].(map[string]any); okResp {
|
||||
switch projectValue := responseData["cloudaicompanionProject"].(type) {
|
||||
case map[string]any:
|
||||
if id, okID := projectValue["id"].(string); okID {
|
||||
projectID = strings.TrimSpace(id)
|
||||
}
|
||||
case string:
|
||||
projectID = strings.TrimSpace(projectValue)
|
||||
}
|
||||
}
|
||||
|
||||
if projectID != "" {
|
||||
log.Infof("Successfully fetched project_id: %s", projectID)
|
||||
return projectID, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no project_id in response")
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
responsePreview := strings.TrimSpace(string(bodyBytes))
|
||||
if len(responsePreview) > 500 {
|
||||
responsePreview = responsePreview[:500]
|
||||
}
|
||||
|
||||
responseErr := responsePreview
|
||||
if len(responseErr) > 200 {
|
||||
responseErr = responseErr[:200]
|
||||
}
|
||||
return "", fmt.Errorf("http %d: %s", resp.StatusCode, responseErr)
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
34
internal/auth/antigravity/constants.go
Normal file
34
internal/auth/antigravity/constants.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// Package antigravity provides OAuth2 authentication functionality for the Antigravity provider.
|
||||
package antigravity
|
||||
|
||||
// OAuth client credentials and configuration
|
||||
const (
|
||||
ClientID = "1071006060591-tmhssin2h21lcre235vtolojh4g403ep.apps.googleusercontent.com"
|
||||
ClientSecret = "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"
|
||||
CallbackPort = 51121
|
||||
)
|
||||
|
||||
// Scopes defines the OAuth scopes required for Antigravity authentication
|
||||
var Scopes = []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/userinfo.email",
|
||||
"https://www.googleapis.com/auth/userinfo.profile",
|
||||
"https://www.googleapis.com/auth/cclog",
|
||||
"https://www.googleapis.com/auth/experimentsandconfigs",
|
||||
}
|
||||
|
||||
// OAuth2 endpoints for Google authentication
|
||||
const (
|
||||
TokenEndpoint = "https://oauth2.googleapis.com/token"
|
||||
AuthEndpoint = "https://accounts.google.com/o/oauth2/v2/auth"
|
||||
UserInfoEndpoint = "https://www.googleapis.com/oauth2/v1/userinfo?alt=json"
|
||||
)
|
||||
|
||||
// Antigravity API configuration
|
||||
const (
|
||||
APIEndpoint = "https://cloudcode-pa.googleapis.com"
|
||||
APIVersion = "v1internal"
|
||||
APIUserAgent = "google-api-nodejs-client/9.15.1"
|
||||
APIClient = "google-cloud-sdk vscode_cloudshelleditor/0.1"
|
||||
ClientMetadata = `{"ideType":"IDE_UNSPECIFIED","platform":"PLATFORM_UNSPECIFIED","pluginType":"GEMINI"}`
|
||||
)
|
||||
16
internal/auth/antigravity/filename.go
Normal file
16
internal/auth/antigravity/filename.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package antigravity
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CredentialFileName returns the filename used to persist Antigravity credentials.
|
||||
// It uses the email as a suffix to disambiguate accounts.
|
||||
func CredentialFileName(email string) string {
|
||||
email = strings.TrimSpace(email)
|
||||
if email == "" {
|
||||
return "antigravity.json"
|
||||
}
|
||||
return fmt.Sprintf("antigravity-%s.json", email)
|
||||
}
|
||||
@@ -14,15 +14,15 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OAuth configuration constants for Claude/Anthropic
|
||||
const (
|
||||
anthropicAuthURL = "https://claude.ai/oauth/authorize"
|
||||
anthropicTokenURL = "https://console.anthropic.com/v1/oauth/token"
|
||||
anthropicClientID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e"
|
||||
redirectURI = "http://localhost:54545/callback"
|
||||
AuthURL = "https://claude.ai/oauth/authorize"
|
||||
TokenURL = "https://console.anthropic.com/v1/oauth/token"
|
||||
ClientID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e"
|
||||
RedirectURI = "http://localhost:54545/callback"
|
||||
)
|
||||
|
||||
// tokenResponse represents the response structure from Anthropic's OAuth token endpoint.
|
||||
@@ -50,7 +50,8 @@ type ClaudeAuth struct {
|
||||
}
|
||||
|
||||
// NewClaudeAuth creates a new Anthropic authentication service.
|
||||
// It initializes the HTTP client with proxy settings from the configuration.
|
||||
// It initializes the HTTP client with a custom TLS transport that uses Firefox
|
||||
// fingerprint to bypass Cloudflare's TLS fingerprinting on Anthropic domains.
|
||||
//
|
||||
// Parameters:
|
||||
// - cfg: The application configuration containing proxy settings
|
||||
@@ -58,8 +59,10 @@ type ClaudeAuth struct {
|
||||
// Returns:
|
||||
// - *ClaudeAuth: A new Claude authentication service instance
|
||||
func NewClaudeAuth(cfg *config.Config) *ClaudeAuth {
|
||||
// Use custom HTTP client with Firefox TLS fingerprint to bypass
|
||||
// Cloudflare's bot detection on Anthropic domains
|
||||
return &ClaudeAuth{
|
||||
httpClient: util.SetProxy(cfg, &http.Client{}),
|
||||
httpClient: NewAnthropicHttpClient(&cfg.SDKConfig),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,16 +85,16 @@ func (o *ClaudeAuth) GenerateAuthURL(state string, pkceCodes *PKCECodes) (string
|
||||
|
||||
params := url.Values{
|
||||
"code": {"true"},
|
||||
"client_id": {anthropicClientID},
|
||||
"client_id": {ClientID},
|
||||
"response_type": {"code"},
|
||||
"redirect_uri": {redirectURI},
|
||||
"redirect_uri": {RedirectURI},
|
||||
"scope": {"org:create_api_key user:profile user:inference"},
|
||||
"code_challenge": {pkceCodes.CodeChallenge},
|
||||
"code_challenge_method": {"S256"},
|
||||
"state": {state},
|
||||
}
|
||||
|
||||
authURL := fmt.Sprintf("%s?%s", anthropicAuthURL, params.Encode())
|
||||
authURL := fmt.Sprintf("%s?%s", AuthURL, params.Encode())
|
||||
return authURL, state, nil
|
||||
}
|
||||
|
||||
@@ -137,8 +140,8 @@ func (o *ClaudeAuth) ExchangeCodeForTokens(ctx context.Context, code, state stri
|
||||
"code": newCode,
|
||||
"state": state,
|
||||
"grant_type": "authorization_code",
|
||||
"client_id": anthropicClientID,
|
||||
"redirect_uri": redirectURI,
|
||||
"client_id": ClientID,
|
||||
"redirect_uri": RedirectURI,
|
||||
"code_verifier": pkceCodes.CodeVerifier,
|
||||
}
|
||||
|
||||
@@ -154,7 +157,7 @@ func (o *ClaudeAuth) ExchangeCodeForTokens(ctx context.Context, code, state stri
|
||||
|
||||
// log.Debugf("Token exchange request: %s", string(jsonBody))
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", anthropicTokenURL, strings.NewReader(string(jsonBody)))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", TokenURL, strings.NewReader(string(jsonBody)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create token request: %w", err)
|
||||
}
|
||||
@@ -221,7 +224,7 @@ func (o *ClaudeAuth) RefreshTokens(ctx context.Context, refreshToken string) (*C
|
||||
}
|
||||
|
||||
reqBody := map[string]interface{}{
|
||||
"client_id": anthropicClientID,
|
||||
"client_id": ClientID,
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": refreshToken,
|
||||
}
|
||||
@@ -231,7 +234,7 @@ func (o *ClaudeAuth) RefreshTokens(ctx context.Context, refreshToken string) (*C
|
||||
return nil, fmt.Errorf("failed to marshal request body: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", anthropicTokenURL, strings.NewReader(string(jsonBody)))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", TokenURL, strings.NewReader(string(jsonBody)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create refresh request: %w", err)
|
||||
}
|
||||
|
||||
165
internal/auth/claude/utls_transport.go
Normal file
165
internal/auth/claude/utls_transport.go
Normal file
@@ -0,0 +1,165 @@
|
||||
// Package claude provides authentication functionality for Anthropic's Claude API.
|
||||
// This file implements a custom HTTP transport using utls to bypass TLS fingerprinting.
|
||||
package claude
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
tls "github.com/refraction-networking/utls"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
// utlsRoundTripper implements http.RoundTripper using utls with Firefox fingerprint
|
||||
// to bypass Cloudflare's TLS fingerprinting on Anthropic domains.
|
||||
type utlsRoundTripper struct {
|
||||
// mu protects the connections map and pending map
|
||||
mu sync.Mutex
|
||||
// connections caches HTTP/2 client connections per host
|
||||
connections map[string]*http2.ClientConn
|
||||
// pending tracks hosts that are currently being connected to (prevents race condition)
|
||||
pending map[string]*sync.Cond
|
||||
// dialer is used to create network connections, supporting proxies
|
||||
dialer proxy.Dialer
|
||||
}
|
||||
|
||||
// newUtlsRoundTripper creates a new utls-based round tripper with optional proxy support
|
||||
func newUtlsRoundTripper(cfg *config.SDKConfig) *utlsRoundTripper {
|
||||
var dialer proxy.Dialer = proxy.Direct
|
||||
if cfg != nil && cfg.ProxyURL != "" {
|
||||
proxyURL, err := url.Parse(cfg.ProxyURL)
|
||||
if err != nil {
|
||||
log.Errorf("failed to parse proxy URL %q: %v", cfg.ProxyURL, err)
|
||||
} else {
|
||||
pDialer, err := proxy.FromURL(proxyURL, proxy.Direct)
|
||||
if err != nil {
|
||||
log.Errorf("failed to create proxy dialer for %q: %v", cfg.ProxyURL, err)
|
||||
} else {
|
||||
dialer = pDialer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &utlsRoundTripper{
|
||||
connections: make(map[string]*http2.ClientConn),
|
||||
pending: make(map[string]*sync.Cond),
|
||||
dialer: dialer,
|
||||
}
|
||||
}
|
||||
|
||||
// getOrCreateConnection gets an existing connection or creates a new one.
|
||||
// It uses a per-host locking mechanism to prevent multiple goroutines from
|
||||
// creating connections to the same host simultaneously.
|
||||
func (t *utlsRoundTripper) getOrCreateConnection(host, addr string) (*http2.ClientConn, error) {
|
||||
t.mu.Lock()
|
||||
|
||||
// Check if connection exists and is usable
|
||||
if h2Conn, ok := t.connections[host]; ok && h2Conn.CanTakeNewRequest() {
|
||||
t.mu.Unlock()
|
||||
return h2Conn, nil
|
||||
}
|
||||
|
||||
// Check if another goroutine is already creating a connection
|
||||
if cond, ok := t.pending[host]; ok {
|
||||
// Wait for the other goroutine to finish
|
||||
cond.Wait()
|
||||
// Check if connection is now available
|
||||
if h2Conn, ok := t.connections[host]; ok && h2Conn.CanTakeNewRequest() {
|
||||
t.mu.Unlock()
|
||||
return h2Conn, nil
|
||||
}
|
||||
// Connection still not available, we'll create one
|
||||
}
|
||||
|
||||
// Mark this host as pending
|
||||
cond := sync.NewCond(&t.mu)
|
||||
t.pending[host] = cond
|
||||
t.mu.Unlock()
|
||||
|
||||
// Create connection outside the lock
|
||||
h2Conn, err := t.createConnection(host, addr)
|
||||
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
// Remove pending marker and wake up waiting goroutines
|
||||
delete(t.pending, host)
|
||||
cond.Broadcast()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Store the new connection
|
||||
t.connections[host] = h2Conn
|
||||
return h2Conn, nil
|
||||
}
|
||||
|
||||
// createConnection creates a new HTTP/2 connection with Firefox TLS fingerprint
|
||||
func (t *utlsRoundTripper) createConnection(host, addr string) (*http2.ClientConn, error) {
|
||||
conn, err := t.dialer.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{ServerName: host}
|
||||
tlsConn := tls.UClient(conn, tlsConfig, tls.HelloFirefox_Auto)
|
||||
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tr := &http2.Transport{}
|
||||
h2Conn, err := tr.NewClientConn(tlsConn)
|
||||
if err != nil {
|
||||
tlsConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return h2Conn, nil
|
||||
}
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (t *utlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
host := req.URL.Host
|
||||
addr := host
|
||||
if !strings.Contains(addr, ":") {
|
||||
addr += ":443"
|
||||
}
|
||||
|
||||
// Get hostname without port for TLS ServerName
|
||||
hostname := req.URL.Hostname()
|
||||
|
||||
h2Conn, err := t.getOrCreateConnection(hostname, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := h2Conn.RoundTrip(req)
|
||||
if err != nil {
|
||||
// Connection failed, remove it from cache
|
||||
t.mu.Lock()
|
||||
if cached, ok := t.connections[hostname]; ok && cached == h2Conn {
|
||||
delete(t.connections, hostname)
|
||||
}
|
||||
t.mu.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// NewAnthropicHttpClient creates an HTTP client that bypasses TLS fingerprinting
|
||||
// for Anthropic domains by using utls with Firefox fingerprint.
|
||||
// It accepts optional SDK configuration for proxy settings.
|
||||
func NewAnthropicHttpClient(cfg *config.SDKConfig) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: newUtlsRoundTripper(cfg),
|
||||
}
|
||||
}
|
||||
46
internal/auth/codex/filename.go
Normal file
46
internal/auth/codex/filename.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package codex
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// CredentialFileName returns the filename used to persist Codex OAuth credentials.
|
||||
// When planType is available (e.g. "plus", "team"), it is appended after the email
|
||||
// as a suffix to disambiguate subscriptions.
|
||||
func CredentialFileName(email, planType, hashAccountID string, includeProviderPrefix bool) string {
|
||||
email = strings.TrimSpace(email)
|
||||
plan := normalizePlanTypeForFilename(planType)
|
||||
|
||||
prefix := ""
|
||||
if includeProviderPrefix {
|
||||
prefix = "codex"
|
||||
}
|
||||
|
||||
if plan == "" {
|
||||
return fmt.Sprintf("%s-%s.json", prefix, email)
|
||||
} else if plan == "team" {
|
||||
return fmt.Sprintf("%s-%s-%s-%s.json", prefix, hashAccountID, email, plan)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-%s.json", prefix, email, plan)
|
||||
}
|
||||
|
||||
func normalizePlanTypeForFilename(planType string) string {
|
||||
planType = strings.TrimSpace(planType)
|
||||
if planType == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
parts := strings.FieldsFunc(planType, func(r rune) bool {
|
||||
return !unicode.IsLetter(r) && !unicode.IsDigit(r)
|
||||
})
|
||||
if len(parts) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
for i, part := range parts {
|
||||
parts[i] = strings.ToLower(strings.TrimSpace(part))
|
||||
}
|
||||
return strings.Join(parts, "-")
|
||||
}
|
||||
@@ -19,11 +19,12 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OAuth configuration constants for OpenAI Codex
|
||||
const (
|
||||
openaiAuthURL = "https://auth.openai.com/oauth/authorize"
|
||||
openaiTokenURL = "https://auth.openai.com/oauth/token"
|
||||
openaiClientID = "app_EMoamEEZ73f0CkXaXp7hrann"
|
||||
redirectURI = "http://localhost:1455/auth/callback"
|
||||
AuthURL = "https://auth.openai.com/oauth/authorize"
|
||||
TokenURL = "https://auth.openai.com/oauth/token"
|
||||
ClientID = "app_EMoamEEZ73f0CkXaXp7hrann"
|
||||
RedirectURI = "http://localhost:1455/auth/callback"
|
||||
)
|
||||
|
||||
// CodexAuth handles the OpenAI OAuth2 authentication flow.
|
||||
@@ -37,7 +38,7 @@ type CodexAuth struct {
|
||||
// It initializes an HTTP client with proxy settings from the provided configuration.
|
||||
func NewCodexAuth(cfg *config.Config) *CodexAuth {
|
||||
return &CodexAuth{
|
||||
httpClient: util.SetProxy(cfg, &http.Client{}),
|
||||
httpClient: util.SetProxy(&cfg.SDKConfig, &http.Client{}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,9 +51,9 @@ func (o *CodexAuth) GenerateAuthURL(state string, pkceCodes *PKCECodes) (string,
|
||||
}
|
||||
|
||||
params := url.Values{
|
||||
"client_id": {openaiClientID},
|
||||
"client_id": {ClientID},
|
||||
"response_type": {"code"},
|
||||
"redirect_uri": {redirectURI},
|
||||
"redirect_uri": {RedirectURI},
|
||||
"scope": {"openid email profile offline_access"},
|
||||
"state": {state},
|
||||
"code_challenge": {pkceCodes.CodeChallenge},
|
||||
@@ -62,7 +63,7 @@ func (o *CodexAuth) GenerateAuthURL(state string, pkceCodes *PKCECodes) (string,
|
||||
"codex_cli_simplified_flow": {"true"},
|
||||
}
|
||||
|
||||
authURL := fmt.Sprintf("%s?%s", openaiAuthURL, params.Encode())
|
||||
authURL := fmt.Sprintf("%s?%s", AuthURL, params.Encode())
|
||||
return authURL, nil
|
||||
}
|
||||
|
||||
@@ -77,13 +78,13 @@ func (o *CodexAuth) ExchangeCodeForTokens(ctx context.Context, code string, pkce
|
||||
// Prepare token exchange request
|
||||
data := url.Values{
|
||||
"grant_type": {"authorization_code"},
|
||||
"client_id": {openaiClientID},
|
||||
"client_id": {ClientID},
|
||||
"code": {code},
|
||||
"redirect_uri": {redirectURI},
|
||||
"redirect_uri": {RedirectURI},
|
||||
"code_verifier": {pkceCodes.CodeVerifier},
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", openaiTokenURL, strings.NewReader(data.Encode()))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", TokenURL, strings.NewReader(data.Encode()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create token request: %w", err)
|
||||
}
|
||||
@@ -163,13 +164,13 @@ func (o *CodexAuth) RefreshTokens(ctx context.Context, refreshToken string) (*Co
|
||||
}
|
||||
|
||||
data := url.Values{
|
||||
"client_id": {openaiClientID},
|
||||
"client_id": {ClientID},
|
||||
"grant_type": {"refresh_token"},
|
||||
"refresh_token": {refreshToken},
|
||||
"scope": {"openid profile email"},
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", openaiTokenURL, strings.NewReader(data.Encode()))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", TokenURL, strings.NewReader(data.Encode()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create refresh request: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
// Package gemini provides authentication and token management functionality
|
||||
// for Google's Gemini AI services. It handles OAuth2 token storage, serialization,
|
||||
// and retrieval for maintaining authenticated sessions with the Gemini API.
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GeminiWebTokenStorage stores cookie information for Google Gemini Web authentication.
|
||||
type GeminiWebTokenStorage struct {
|
||||
Secure1PSID string `json:"secure_1psid"`
|
||||
Secure1PSIDTS string `json:"secure_1psidts"`
|
||||
Type string `json:"type"`
|
||||
LastRefresh string `json:"last_refresh,omitempty"`
|
||||
}
|
||||
|
||||
// SaveTokenToFile serializes the Gemini Web token storage to a JSON file.
|
||||
func (ts *GeminiWebTokenStorage) SaveTokenToFile(authFilePath string) error {
|
||||
misc.LogSavingCredentials(authFilePath)
|
||||
ts.Type = "gemini-web"
|
||||
if ts.LastRefresh == "" {
|
||||
ts.LastRefresh = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(authFilePath), 0700); err != nil {
|
||||
return fmt.Errorf("failed to create directory: %v", err)
|
||||
}
|
||||
|
||||
f, err := os.Create(authFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create token file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := f.Close(); errClose != nil {
|
||||
log.Errorf("failed to close file: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if err = json.NewEncoder(f).Encode(ts); err != nil {
|
||||
return fmt.Errorf("failed to write token to file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/codex"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/browser"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
@@ -27,18 +28,19 @@ import (
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
// OAuth configuration constants for Gemini
|
||||
const (
|
||||
geminiOauthClientID = "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com"
|
||||
geminiOauthClientSecret = "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"
|
||||
ClientID = "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com"
|
||||
ClientSecret = "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"
|
||||
DefaultCallbackPort = 8085
|
||||
)
|
||||
|
||||
var (
|
||||
geminiOauthScopes = []string{
|
||||
// OAuth scopes for Gemini authentication
|
||||
var Scopes = []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/userinfo.email",
|
||||
"https://www.googleapis.com/auth/userinfo.profile",
|
||||
}
|
||||
)
|
||||
|
||||
// GeminiAuth provides methods for handling the Gemini OAuth2 authentication flow.
|
||||
// It encapsulates the logic for obtaining, storing, and refreshing authentication tokens
|
||||
@@ -46,6 +48,13 @@ var (
|
||||
type GeminiAuth struct {
|
||||
}
|
||||
|
||||
// WebLoginOptions customizes the interactive OAuth flow.
|
||||
type WebLoginOptions struct {
|
||||
NoBrowser bool
|
||||
CallbackPort int
|
||||
Prompt func(string) (string, error)
|
||||
}
|
||||
|
||||
// NewGeminiAuth creates a new instance of GeminiAuth.
|
||||
func NewGeminiAuth() *GeminiAuth {
|
||||
return &GeminiAuth{}
|
||||
@@ -59,12 +68,18 @@ func NewGeminiAuth() *GeminiAuth {
|
||||
// - ctx: The context for the HTTP client
|
||||
// - ts: The Gemini token storage containing authentication tokens
|
||||
// - cfg: The configuration containing proxy settings
|
||||
// - noBrowser: Optional parameter to disable browser opening
|
||||
// - opts: Optional parameters to customize browser and prompt behavior
|
||||
//
|
||||
// Returns:
|
||||
// - *http.Client: An HTTP client configured with authentication
|
||||
// - error: An error if the client configuration fails, nil otherwise
|
||||
func (g *GeminiAuth) GetAuthenticatedClient(ctx context.Context, ts *GeminiTokenStorage, cfg *config.Config, noBrowser ...bool) (*http.Client, error) {
|
||||
func (g *GeminiAuth) GetAuthenticatedClient(ctx context.Context, ts *GeminiTokenStorage, cfg *config.Config, opts *WebLoginOptions) (*http.Client, error) {
|
||||
callbackPort := DefaultCallbackPort
|
||||
if opts != nil && opts.CallbackPort > 0 {
|
||||
callbackPort = opts.CallbackPort
|
||||
}
|
||||
callbackURL := fmt.Sprintf("http://localhost:%d/oauth2callback", callbackPort)
|
||||
|
||||
// Configure proxy settings for the HTTP client if a proxy URL is provided.
|
||||
proxyURL, err := url.Parse(cfg.ProxyURL)
|
||||
if err == nil {
|
||||
@@ -76,7 +91,8 @@ func (g *GeminiAuth) GetAuthenticatedClient(ctx context.Context, ts *GeminiToken
|
||||
auth := &proxy.Auth{User: username, Password: password}
|
||||
dialer, errSOCKS5 := proxy.SOCKS5("tcp", proxyURL.Host, auth, proxy.Direct)
|
||||
if errSOCKS5 != nil {
|
||||
log.Fatalf("create SOCKS5 dialer failed: %v", errSOCKS5)
|
||||
log.Errorf("create SOCKS5 dialer failed: %v", errSOCKS5)
|
||||
return nil, fmt.Errorf("create SOCKS5 dialer failed: %w", errSOCKS5)
|
||||
}
|
||||
transport = &http.Transport{
|
||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
@@ -96,10 +112,10 @@ func (g *GeminiAuth) GetAuthenticatedClient(ctx context.Context, ts *GeminiToken
|
||||
|
||||
// Configure the OAuth2 client.
|
||||
conf := &oauth2.Config{
|
||||
ClientID: geminiOauthClientID,
|
||||
ClientSecret: geminiOauthClientSecret,
|
||||
RedirectURL: "http://localhost:8085/oauth2callback", // This will be used by the local server.
|
||||
Scopes: geminiOauthScopes,
|
||||
ClientID: ClientID,
|
||||
ClientSecret: ClientSecret,
|
||||
RedirectURL: callbackURL, // This will be used by the local server.
|
||||
Scopes: Scopes,
|
||||
Endpoint: google.Endpoint,
|
||||
}
|
||||
|
||||
@@ -107,8 +123,8 @@ func (g *GeminiAuth) GetAuthenticatedClient(ctx context.Context, ts *GeminiToken
|
||||
|
||||
// If no token is found in storage, initiate the web-based OAuth flow.
|
||||
if ts.Token == nil {
|
||||
log.Info("Could not load token from file, starting OAuth flow.")
|
||||
token, err = g.getTokenFromWeb(ctx, conf, noBrowser...)
|
||||
fmt.Printf("Could not load token from file, starting OAuth flow.\n")
|
||||
token, err = g.getTokenFromWeb(ctx, conf, opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get token from web: %w", err)
|
||||
}
|
||||
@@ -169,9 +185,9 @@ func (g *GeminiAuth) createTokenStorage(ctx context.Context, config *oauth2.Conf
|
||||
|
||||
emailResult := gjson.GetBytes(bodyBytes, "email")
|
||||
if emailResult.Exists() && emailResult.Type == gjson.String {
|
||||
log.Infof("Authenticated user email: %s", emailResult.String())
|
||||
fmt.Printf("Authenticated user email: %s\n", emailResult.String())
|
||||
} else {
|
||||
log.Info("Failed to get user email from token")
|
||||
fmt.Println("Failed to get user email from token")
|
||||
}
|
||||
|
||||
var ifToken map[string]any
|
||||
@@ -182,9 +198,9 @@ func (g *GeminiAuth) createTokenStorage(ctx context.Context, config *oauth2.Conf
|
||||
}
|
||||
|
||||
ifToken["token_uri"] = "https://oauth2.googleapis.com/token"
|
||||
ifToken["client_id"] = geminiOauthClientID
|
||||
ifToken["client_secret"] = geminiOauthClientSecret
|
||||
ifToken["scopes"] = geminiOauthScopes
|
||||
ifToken["client_id"] = ClientID
|
||||
ifToken["client_secret"] = ClientSecret
|
||||
ifToken["scopes"] = Scopes
|
||||
ifToken["universe_domain"] = "googleapis.com"
|
||||
|
||||
ts := GeminiTokenStorage{
|
||||
@@ -204,61 +220,85 @@ func (g *GeminiAuth) createTokenStorage(ctx context.Context, config *oauth2.Conf
|
||||
// Parameters:
|
||||
// - ctx: The context for the HTTP client
|
||||
// - config: The OAuth2 configuration
|
||||
// - noBrowser: Optional parameter to disable browser opening
|
||||
// - opts: Optional parameters to customize browser and prompt behavior
|
||||
//
|
||||
// Returns:
|
||||
// - *oauth2.Token: The OAuth2 token obtained from the authorization flow
|
||||
// - error: An error if the token acquisition fails, nil otherwise
|
||||
func (g *GeminiAuth) getTokenFromWeb(ctx context.Context, config *oauth2.Config, noBrowser ...bool) (*oauth2.Token, error) {
|
||||
func (g *GeminiAuth) getTokenFromWeb(ctx context.Context, config *oauth2.Config, opts *WebLoginOptions) (*oauth2.Token, error) {
|
||||
callbackPort := DefaultCallbackPort
|
||||
if opts != nil && opts.CallbackPort > 0 {
|
||||
callbackPort = opts.CallbackPort
|
||||
}
|
||||
callbackURL := fmt.Sprintf("http://localhost:%d/oauth2callback", callbackPort)
|
||||
|
||||
// Use a channel to pass the authorization code from the HTTP handler to the main function.
|
||||
codeChan := make(chan string)
|
||||
errChan := make(chan error)
|
||||
codeChan := make(chan string, 1)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
// Create a new HTTP server with its own multiplexer.
|
||||
mux := http.NewServeMux()
|
||||
server := &http.Server{Addr: ":8085", Handler: mux}
|
||||
config.RedirectURL = "http://localhost:8085/oauth2callback"
|
||||
server := &http.Server{Addr: fmt.Sprintf(":%d", callbackPort), Handler: mux}
|
||||
config.RedirectURL = callbackURL
|
||||
|
||||
mux.HandleFunc("/oauth2callback", func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := r.URL.Query().Get("error"); err != "" {
|
||||
_, _ = fmt.Fprintf(w, "Authentication failed: %s", err)
|
||||
errChan <- fmt.Errorf("authentication failed via callback: %s", err)
|
||||
select {
|
||||
case errChan <- fmt.Errorf("authentication failed via callback: %s", err):
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
code := r.URL.Query().Get("code")
|
||||
if code == "" {
|
||||
_, _ = fmt.Fprint(w, "Authentication failed: code not found.")
|
||||
errChan <- fmt.Errorf("code not found in callback")
|
||||
select {
|
||||
case errChan <- fmt.Errorf("code not found in callback"):
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
_, _ = fmt.Fprint(w, "<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
||||
codeChan <- code
|
||||
select {
|
||||
case codeChan <- code:
|
||||
default:
|
||||
}
|
||||
})
|
||||
|
||||
// Start the server in a goroutine.
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Fatalf("ListenAndServe(): %v", err)
|
||||
log.Errorf("ListenAndServe(): %v", err)
|
||||
select {
|
||||
case errChan <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Open the authorization URL in the user's browser.
|
||||
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", "consent"))
|
||||
|
||||
if len(noBrowser) == 1 && !noBrowser[0] {
|
||||
log.Info("Opening browser for authentication...")
|
||||
noBrowser := false
|
||||
if opts != nil {
|
||||
noBrowser = opts.NoBrowser
|
||||
}
|
||||
|
||||
if !noBrowser {
|
||||
fmt.Println("Opening browser for authentication...")
|
||||
|
||||
// Check if browser is available
|
||||
if !browser.IsAvailable() {
|
||||
log.Warn("No browser available on this system")
|
||||
util.PrintSSHTunnelInstructions(8085)
|
||||
log.Infof("Please manually open this URL in your browser:\n\n%s\n", authURL)
|
||||
util.PrintSSHTunnelInstructions(callbackPort)
|
||||
fmt.Printf("Please manually open this URL in your browser:\n\n%s\n", authURL)
|
||||
} else {
|
||||
if err := browser.OpenURL(authURL); err != nil {
|
||||
authErr := codex.NewAuthenticationError(codex.ErrBrowserOpenFailed, err)
|
||||
log.Warn(codex.GetUserFriendlyMessage(authErr))
|
||||
util.PrintSSHTunnelInstructions(8085)
|
||||
log.Infof("Please manually open this URL in your browser:\n\n%s\n", authURL)
|
||||
util.PrintSSHTunnelInstructions(callbackPort)
|
||||
fmt.Printf("Please manually open this URL in your browser:\n\n%s\n", authURL)
|
||||
|
||||
// Log platform info for debugging
|
||||
platformInfo := browser.GetPlatformInfo()
|
||||
@@ -268,22 +308,69 @@ func (g *GeminiAuth) getTokenFromWeb(ctx context.Context, config *oauth2.Config,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
util.PrintSSHTunnelInstructions(8085)
|
||||
log.Infof("Please open this URL in your browser:\n\n%s\n", authURL)
|
||||
util.PrintSSHTunnelInstructions(callbackPort)
|
||||
fmt.Printf("Please open this URL in your browser:\n\n%s\n", authURL)
|
||||
}
|
||||
|
||||
log.Info("Waiting for authentication callback...")
|
||||
fmt.Println("Waiting for authentication callback...")
|
||||
|
||||
// Wait for the authorization code or an error.
|
||||
var authCode string
|
||||
timeoutTimer := time.NewTimer(5 * time.Minute)
|
||||
defer timeoutTimer.Stop()
|
||||
|
||||
var manualPromptTimer *time.Timer
|
||||
var manualPromptC <-chan time.Time
|
||||
if opts != nil && opts.Prompt != nil {
|
||||
manualPromptTimer = time.NewTimer(15 * time.Second)
|
||||
manualPromptC = manualPromptTimer.C
|
||||
defer manualPromptTimer.Stop()
|
||||
}
|
||||
|
||||
waitForCallback:
|
||||
for {
|
||||
select {
|
||||
case code := <-codeChan:
|
||||
authCode = code
|
||||
break waitForCallback
|
||||
case err := <-errChan:
|
||||
return nil, err
|
||||
case <-time.After(5 * time.Minute): // Timeout
|
||||
case <-manualPromptC:
|
||||
manualPromptC = nil
|
||||
if manualPromptTimer != nil {
|
||||
manualPromptTimer.Stop()
|
||||
}
|
||||
select {
|
||||
case code := <-codeChan:
|
||||
authCode = code
|
||||
break waitForCallback
|
||||
case err := <-errChan:
|
||||
return nil, err
|
||||
default:
|
||||
}
|
||||
input, err := opts.Prompt("Paste the Gemini callback URL (or press Enter to keep waiting): ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parsed, err := misc.ParseOAuthCallback(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if parsed == nil {
|
||||
continue
|
||||
}
|
||||
if parsed.Error != "" {
|
||||
return nil, fmt.Errorf("authentication failed via callback: %s", parsed.Error)
|
||||
}
|
||||
if parsed.Code == "" {
|
||||
return nil, fmt.Errorf("code not found in callback")
|
||||
}
|
||||
authCode = parsed.Code
|
||||
break waitForCallback
|
||||
case <-timeoutTimer.C:
|
||||
return nil, fmt.Errorf("oauth flow timed out")
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown the server.
|
||||
if err := server.Shutdown(ctx); err != nil {
|
||||
@@ -296,6 +383,6 @@ func (g *GeminiAuth) getTokenFromWeb(ctx context.Context, config *oauth2.Config,
|
||||
return nil, fmt.Errorf("failed to exchange token: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Authentication successful.")
|
||||
fmt.Println("Authentication successful.")
|
||||
return token, nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -67,3 +68,20 @@ func (ts *GeminiTokenStorage) SaveTokenToFile(authFilePath string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CredentialFileName returns the filename used to persist Gemini CLI credentials.
|
||||
// When projectID represents multiple projects (comma-separated or literal ALL),
|
||||
// the suffix is normalized to "all" and a "gemini-" prefix is enforced to keep
|
||||
// web and CLI generated files consistent.
|
||||
func CredentialFileName(email, projectID string, includeProviderPrefix bool) string {
|
||||
email = strings.TrimSpace(email)
|
||||
project := strings.TrimSpace(projectID)
|
||||
if strings.EqualFold(project, "all") || strings.Contains(project, ",") {
|
||||
return fmt.Sprintf("gemini-%s-all.json", email)
|
||||
}
|
||||
prefix := ""
|
||||
if includeProviderPrefix {
|
||||
prefix = "gemini-"
|
||||
}
|
||||
return fmt.Sprintf("%s%s-%s.json", prefix, email, project)
|
||||
}
|
||||
|
||||
99
internal/auth/iflow/cookie_helpers.go
Normal file
99
internal/auth/iflow/cookie_helpers.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package iflow
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NormalizeCookie normalizes raw cookie strings for iFlow authentication flows.
|
||||
func NormalizeCookie(raw string) (string, error) {
|
||||
trimmed := strings.TrimSpace(raw)
|
||||
if trimmed == "" {
|
||||
return "", fmt.Errorf("cookie cannot be empty")
|
||||
}
|
||||
|
||||
combined := strings.Join(strings.Fields(trimmed), " ")
|
||||
if !strings.HasSuffix(combined, ";") {
|
||||
combined += ";"
|
||||
}
|
||||
if !strings.Contains(combined, "BXAuth=") {
|
||||
return "", fmt.Errorf("cookie missing BXAuth field")
|
||||
}
|
||||
return combined, nil
|
||||
}
|
||||
|
||||
// SanitizeIFlowFileName normalizes user identifiers for safe filename usage.
|
||||
func SanitizeIFlowFileName(raw string) string {
|
||||
if raw == "" {
|
||||
return ""
|
||||
}
|
||||
cleanEmail := strings.ReplaceAll(raw, "*", "x")
|
||||
var result strings.Builder
|
||||
for _, r := range cleanEmail {
|
||||
if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '_' || r == '@' || r == '.' || r == '-' {
|
||||
result.WriteRune(r)
|
||||
}
|
||||
}
|
||||
return strings.TrimSpace(result.String())
|
||||
}
|
||||
|
||||
// ExtractBXAuth extracts the BXAuth value from a cookie string.
|
||||
func ExtractBXAuth(cookie string) string {
|
||||
parts := strings.Split(cookie, ";")
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
if strings.HasPrefix(part, "BXAuth=") {
|
||||
return strings.TrimPrefix(part, "BXAuth=")
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// CheckDuplicateBXAuth checks if the given BXAuth value already exists in any iflow auth file.
|
||||
// Returns the path of the existing file if found, empty string otherwise.
|
||||
func CheckDuplicateBXAuth(authDir, bxAuth string) (string, error) {
|
||||
if bxAuth == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(authDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", nil
|
||||
}
|
||||
return "", fmt.Errorf("read auth dir failed: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
if !strings.HasPrefix(name, "iflow-") || !strings.HasSuffix(name, ".json") {
|
||||
continue
|
||||
}
|
||||
|
||||
filePath := filepath.Join(authDir, name)
|
||||
data, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var tokenData struct {
|
||||
Cookie string `json:"cookie"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &tokenData); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
existingBXAuth := ExtractBXAuth(tokenData.Cookie)
|
||||
if existingBXAuth != "" && existingBXAuth == bxAuth {
|
||||
return filePath, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
523
internal/auth/iflow/iflow_auth.go
Normal file
523
internal/auth/iflow/iflow_auth.go
Normal file
@@ -0,0 +1,523 @@
|
||||
package iflow
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// OAuth endpoints and client metadata are derived from the reference Python implementation.
|
||||
iFlowOAuthTokenEndpoint = "https://iflow.cn/oauth/token"
|
||||
iFlowOAuthAuthorizeEndpoint = "https://iflow.cn/oauth"
|
||||
iFlowUserInfoEndpoint = "https://iflow.cn/api/oauth/getUserInfo"
|
||||
iFlowSuccessRedirectURL = "https://iflow.cn/oauth/success"
|
||||
|
||||
// Cookie authentication endpoints
|
||||
iFlowAPIKeyEndpoint = "https://platform.iflow.cn/api/openapi/apikey"
|
||||
|
||||
// Client credentials provided by iFlow for the Code Assist integration.
|
||||
iFlowOAuthClientID = "10009311001"
|
||||
iFlowOAuthClientSecret = "4Z3YjXycVsQvyGF1etiNlIBB4RsqSDtW"
|
||||
)
|
||||
|
||||
// DefaultAPIBaseURL is the canonical chat completions endpoint.
|
||||
const DefaultAPIBaseURL = "https://apis.iflow.cn/v1"
|
||||
|
||||
// SuccessRedirectURL is exposed for consumers needing the official success page.
|
||||
const SuccessRedirectURL = iFlowSuccessRedirectURL
|
||||
|
||||
// CallbackPort defines the local port used for OAuth callbacks.
|
||||
const CallbackPort = 11451
|
||||
|
||||
// IFlowAuth encapsulates the HTTP client helpers for the OAuth flow.
|
||||
type IFlowAuth struct {
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// NewIFlowAuth constructs a new IFlowAuth with proxy-aware transport.
|
||||
func NewIFlowAuth(cfg *config.Config) *IFlowAuth {
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
return &IFlowAuth{httpClient: util.SetProxy(&cfg.SDKConfig, client)}
|
||||
}
|
||||
|
||||
// AuthorizationURL builds the authorization URL and matching redirect URI.
|
||||
func (ia *IFlowAuth) AuthorizationURL(state string, port int) (authURL, redirectURI string) {
|
||||
redirectURI = fmt.Sprintf("http://localhost:%d/oauth2callback", port)
|
||||
values := url.Values{}
|
||||
values.Set("loginMethod", "phone")
|
||||
values.Set("type", "phone")
|
||||
values.Set("redirect", redirectURI)
|
||||
values.Set("state", state)
|
||||
values.Set("client_id", iFlowOAuthClientID)
|
||||
authURL = fmt.Sprintf("%s?%s", iFlowOAuthAuthorizeEndpoint, values.Encode())
|
||||
return authURL, redirectURI
|
||||
}
|
||||
|
||||
// ExchangeCodeForTokens exchanges an authorization code for access and refresh tokens.
|
||||
func (ia *IFlowAuth) ExchangeCodeForTokens(ctx context.Context, code, redirectURI string) (*IFlowTokenData, error) {
|
||||
form := url.Values{}
|
||||
form.Set("grant_type", "authorization_code")
|
||||
form.Set("code", code)
|
||||
form.Set("redirect_uri", redirectURI)
|
||||
form.Set("client_id", iFlowOAuthClientID)
|
||||
form.Set("client_secret", iFlowOAuthClientSecret)
|
||||
|
||||
req, err := ia.newTokenRequest(ctx, form)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ia.doTokenRequest(ctx, req)
|
||||
}
|
||||
|
||||
// RefreshTokens exchanges a refresh token for a new access token.
|
||||
func (ia *IFlowAuth) RefreshTokens(ctx context.Context, refreshToken string) (*IFlowTokenData, error) {
|
||||
form := url.Values{}
|
||||
form.Set("grant_type", "refresh_token")
|
||||
form.Set("refresh_token", refreshToken)
|
||||
form.Set("client_id", iFlowOAuthClientID)
|
||||
form.Set("client_secret", iFlowOAuthClientSecret)
|
||||
|
||||
req, err := ia.newTokenRequest(ctx, form)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ia.doTokenRequest(ctx, req)
|
||||
}
|
||||
|
||||
func (ia *IFlowAuth) newTokenRequest(ctx context.Context, form url.Values) (*http.Request, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, iFlowOAuthTokenEndpoint, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow token: create request failed: %w", err)
|
||||
}
|
||||
|
||||
basic := base64.StdEncoding.EncodeToString([]byte(iFlowOAuthClientID + ":" + iFlowOAuthClientSecret))
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Authorization", "Basic "+basic)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (ia *IFlowAuth) doTokenRequest(ctx context.Context, req *http.Request) (*IFlowTokenData, error) {
|
||||
resp, err := ia.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow token: request failed: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow token: read response failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Debugf("iflow token request failed: status=%d body=%s", resp.StatusCode, string(body))
|
||||
return nil, fmt.Errorf("iflow token: %d %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
var tokenResp IFlowTokenResponse
|
||||
if err = json.Unmarshal(body, &tokenResp); err != nil {
|
||||
return nil, fmt.Errorf("iflow token: decode response failed: %w", err)
|
||||
}
|
||||
|
||||
data := &IFlowTokenData{
|
||||
AccessToken: tokenResp.AccessToken,
|
||||
RefreshToken: tokenResp.RefreshToken,
|
||||
TokenType: tokenResp.TokenType,
|
||||
Scope: tokenResp.Scope,
|
||||
Expire: time.Now().Add(time.Duration(tokenResp.ExpiresIn) * time.Second).Format(time.RFC3339),
|
||||
}
|
||||
|
||||
if tokenResp.AccessToken == "" {
|
||||
log.Debug(string(body))
|
||||
return nil, fmt.Errorf("iflow token: missing access token in response")
|
||||
}
|
||||
|
||||
info, errAPI := ia.FetchUserInfo(ctx, tokenResp.AccessToken)
|
||||
if errAPI != nil {
|
||||
return nil, fmt.Errorf("iflow token: fetch user info failed: %w", errAPI)
|
||||
}
|
||||
if strings.TrimSpace(info.APIKey) == "" {
|
||||
return nil, fmt.Errorf("iflow token: empty api key returned")
|
||||
}
|
||||
email := strings.TrimSpace(info.Email)
|
||||
if email == "" {
|
||||
email = strings.TrimSpace(info.Phone)
|
||||
}
|
||||
if email == "" {
|
||||
return nil, fmt.Errorf("iflow token: missing account email/phone in user info")
|
||||
}
|
||||
data.APIKey = info.APIKey
|
||||
data.Email = email
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// FetchUserInfo retrieves account metadata (including API key) for the provided access token.
|
||||
func (ia *IFlowAuth) FetchUserInfo(ctx context.Context, accessToken string) (*userInfoData, error) {
|
||||
if strings.TrimSpace(accessToken) == "" {
|
||||
return nil, fmt.Errorf("iflow api key: access token is empty")
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("%s?accessToken=%s", iFlowUserInfoEndpoint, url.QueryEscape(accessToken))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow api key: create request failed: %w", err)
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
resp, err := ia.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow api key: request failed: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow api key: read response failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Debugf("iflow api key failed: status=%d body=%s", resp.StatusCode, string(body))
|
||||
return nil, fmt.Errorf("iflow api key: %d %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
var result userInfoResponse
|
||||
if err = json.Unmarshal(body, &result); err != nil {
|
||||
return nil, fmt.Errorf("iflow api key: decode body failed: %w", err)
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return nil, fmt.Errorf("iflow api key: request not successful")
|
||||
}
|
||||
|
||||
if result.Data.APIKey == "" {
|
||||
return nil, fmt.Errorf("iflow api key: missing api key in response")
|
||||
}
|
||||
|
||||
return &result.Data, nil
|
||||
}
|
||||
|
||||
// CreateTokenStorage converts token data into persistence storage.
|
||||
func (ia *IFlowAuth) CreateTokenStorage(data *IFlowTokenData) *IFlowTokenStorage {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
return &IFlowTokenStorage{
|
||||
AccessToken: data.AccessToken,
|
||||
RefreshToken: data.RefreshToken,
|
||||
LastRefresh: time.Now().Format(time.RFC3339),
|
||||
Expire: data.Expire,
|
||||
APIKey: data.APIKey,
|
||||
Email: data.Email,
|
||||
TokenType: data.TokenType,
|
||||
Scope: data.Scope,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateTokenStorage updates the persisted token storage with latest token data.
|
||||
func (ia *IFlowAuth) UpdateTokenStorage(storage *IFlowTokenStorage, data *IFlowTokenData) {
|
||||
if storage == nil || data == nil {
|
||||
return
|
||||
}
|
||||
storage.AccessToken = data.AccessToken
|
||||
storage.RefreshToken = data.RefreshToken
|
||||
storage.LastRefresh = time.Now().Format(time.RFC3339)
|
||||
storage.Expire = data.Expire
|
||||
if data.APIKey != "" {
|
||||
storage.APIKey = data.APIKey
|
||||
}
|
||||
if data.Email != "" {
|
||||
storage.Email = data.Email
|
||||
}
|
||||
storage.TokenType = data.TokenType
|
||||
storage.Scope = data.Scope
|
||||
}
|
||||
|
||||
// IFlowTokenResponse models the OAuth token endpoint response.
|
||||
type IFlowTokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
TokenType string `json:"token_type"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// IFlowTokenData captures processed token details.
|
||||
type IFlowTokenData struct {
|
||||
AccessToken string
|
||||
RefreshToken string
|
||||
TokenType string
|
||||
Scope string
|
||||
Expire string
|
||||
APIKey string
|
||||
Email string
|
||||
Cookie string
|
||||
}
|
||||
|
||||
// userInfoResponse represents the structure returned by the user info endpoint.
|
||||
type userInfoResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Data userInfoData `json:"data"`
|
||||
}
|
||||
|
||||
type userInfoData struct {
|
||||
APIKey string `json:"apiKey"`
|
||||
Email string `json:"email"`
|
||||
Phone string `json:"phone"`
|
||||
}
|
||||
|
||||
// iFlowAPIKeyResponse represents the response from the API key endpoint
|
||||
type iFlowAPIKeyResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data iFlowKeyData `json:"data"`
|
||||
Extra interface{} `json:"extra"`
|
||||
}
|
||||
|
||||
// iFlowKeyData contains the API key information
|
||||
type iFlowKeyData struct {
|
||||
HasExpired bool `json:"hasExpired"`
|
||||
ExpireTime string `json:"expireTime"`
|
||||
Name string `json:"name"`
|
||||
APIKey string `json:"apiKey"`
|
||||
APIKeyMask string `json:"apiKeyMask"`
|
||||
}
|
||||
|
||||
// iFlowRefreshRequest represents the request body for refreshing API key
|
||||
type iFlowRefreshRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// AuthenticateWithCookie performs authentication using browser cookies
|
||||
func (ia *IFlowAuth) AuthenticateWithCookie(ctx context.Context, cookie string) (*IFlowTokenData, error) {
|
||||
if strings.TrimSpace(cookie) == "" {
|
||||
return nil, fmt.Errorf("iflow cookie authentication: cookie is empty")
|
||||
}
|
||||
|
||||
// First, get initial API key information using GET request to obtain the name
|
||||
keyInfo, err := ia.fetchAPIKeyInfo(ctx, cookie)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie authentication: fetch initial API key info failed: %w", err)
|
||||
}
|
||||
|
||||
// Refresh the API key using POST request
|
||||
refreshedKeyInfo, err := ia.RefreshAPIKey(ctx, cookie, keyInfo.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie authentication: refresh API key failed: %w", err)
|
||||
}
|
||||
|
||||
// Convert to token data format using refreshed key
|
||||
data := &IFlowTokenData{
|
||||
APIKey: refreshedKeyInfo.APIKey,
|
||||
Expire: refreshedKeyInfo.ExpireTime,
|
||||
Email: refreshedKeyInfo.Name,
|
||||
Cookie: cookie,
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// fetchAPIKeyInfo retrieves API key information using GET request with cookie
|
||||
func (ia *IFlowAuth) fetchAPIKeyInfo(ctx context.Context, cookie string) (*iFlowKeyData, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, iFlowAPIKeyEndpoint, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie: create GET request failed: %w", err)
|
||||
}
|
||||
|
||||
// Set cookie and other headers to mimic browser
|
||||
req.Header.Set("Cookie", cookie)
|
||||
req.Header.Set("Accept", "application/json, text/plain, */*")
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
|
||||
req.Header.Set("Accept-Language", "zh-CN,zh;q=0.9,en;q=0.8")
|
||||
req.Header.Set("Accept-Encoding", "gzip, deflate, br")
|
||||
req.Header.Set("Connection", "keep-alive")
|
||||
req.Header.Set("Sec-Fetch-Dest", "empty")
|
||||
req.Header.Set("Sec-Fetch-Mode", "cors")
|
||||
req.Header.Set("Sec-Fetch-Site", "same-origin")
|
||||
|
||||
resp, err := ia.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie: GET request failed: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
// Handle gzip compression
|
||||
var reader io.Reader = resp.Body
|
||||
if resp.Header.Get("Content-Encoding") == "gzip" {
|
||||
gzipReader, err := gzip.NewReader(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie: create gzip reader failed: %w", err)
|
||||
}
|
||||
defer func() { _ = gzipReader.Close() }()
|
||||
reader = gzipReader
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie: read GET response failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Debugf("iflow cookie GET request failed: status=%d body=%s", resp.StatusCode, string(body))
|
||||
return nil, fmt.Errorf("iflow cookie: GET request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
var keyResp iFlowAPIKeyResponse
|
||||
if err = json.Unmarshal(body, &keyResp); err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie: decode GET response failed: %w", err)
|
||||
}
|
||||
|
||||
if !keyResp.Success {
|
||||
return nil, fmt.Errorf("iflow cookie: GET request not successful: %s", keyResp.Message)
|
||||
}
|
||||
|
||||
// Handle initial response where apiKey field might be apiKeyMask
|
||||
if keyResp.Data.APIKey == "" && keyResp.Data.APIKeyMask != "" {
|
||||
keyResp.Data.APIKey = keyResp.Data.APIKeyMask
|
||||
}
|
||||
|
||||
return &keyResp.Data, nil
|
||||
}
|
||||
|
||||
// RefreshAPIKey refreshes the API key using POST request
|
||||
func (ia *IFlowAuth) RefreshAPIKey(ctx context.Context, cookie, name string) (*iFlowKeyData, error) {
|
||||
if strings.TrimSpace(cookie) == "" {
|
||||
return nil, fmt.Errorf("iflow cookie refresh: cookie is empty")
|
||||
}
|
||||
if strings.TrimSpace(name) == "" {
|
||||
return nil, fmt.Errorf("iflow cookie refresh: name is empty")
|
||||
}
|
||||
|
||||
// Prepare request body
|
||||
refreshReq := iFlowRefreshRequest{
|
||||
Name: name,
|
||||
}
|
||||
|
||||
bodyBytes, err := json.Marshal(refreshReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie refresh: marshal request failed: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, iFlowAPIKeyEndpoint, strings.NewReader(string(bodyBytes)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie refresh: create POST request failed: %w", err)
|
||||
}
|
||||
|
||||
// Set cookie and other headers to mimic browser
|
||||
req.Header.Set("Cookie", cookie)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json, text/plain, */*")
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
|
||||
req.Header.Set("Accept-Language", "zh-CN,zh;q=0.9,en;q=0.8")
|
||||
req.Header.Set("Accept-Encoding", "gzip, deflate, br")
|
||||
req.Header.Set("Connection", "keep-alive")
|
||||
req.Header.Set("Origin", "https://platform.iflow.cn")
|
||||
req.Header.Set("Referer", "https://platform.iflow.cn/")
|
||||
|
||||
resp, err := ia.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie refresh: POST request failed: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
// Handle gzip compression
|
||||
var reader io.Reader = resp.Body
|
||||
if resp.Header.Get("Content-Encoding") == "gzip" {
|
||||
gzipReader, err := gzip.NewReader(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie refresh: create gzip reader failed: %w", err)
|
||||
}
|
||||
defer func() { _ = gzipReader.Close() }()
|
||||
reader = gzipReader
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie refresh: read POST response failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Debugf("iflow cookie POST request failed: status=%d body=%s", resp.StatusCode, string(body))
|
||||
return nil, fmt.Errorf("iflow cookie refresh: POST request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(body)))
|
||||
}
|
||||
|
||||
var keyResp iFlowAPIKeyResponse
|
||||
if err = json.Unmarshal(body, &keyResp); err != nil {
|
||||
return nil, fmt.Errorf("iflow cookie refresh: decode POST response failed: %w", err)
|
||||
}
|
||||
|
||||
if !keyResp.Success {
|
||||
return nil, fmt.Errorf("iflow cookie refresh: POST request not successful: %s", keyResp.Message)
|
||||
}
|
||||
|
||||
return &keyResp.Data, nil
|
||||
}
|
||||
|
||||
// ShouldRefreshAPIKey checks if the API key needs to be refreshed (within 2 days of expiry)
|
||||
func ShouldRefreshAPIKey(expireTime string) (bool, time.Duration, error) {
|
||||
if strings.TrimSpace(expireTime) == "" {
|
||||
return false, 0, fmt.Errorf("iflow cookie: expire time is empty")
|
||||
}
|
||||
|
||||
expire, err := time.Parse("2006-01-02 15:04", expireTime)
|
||||
if err != nil {
|
||||
return false, 0, fmt.Errorf("iflow cookie: parse expire time failed: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
twoDaysFromNow := now.Add(48 * time.Hour)
|
||||
|
||||
needsRefresh := expire.Before(twoDaysFromNow)
|
||||
timeUntilExpiry := expire.Sub(now)
|
||||
|
||||
return needsRefresh, timeUntilExpiry, nil
|
||||
}
|
||||
|
||||
// CreateCookieTokenStorage converts cookie-based token data into persistence storage
|
||||
func (ia *IFlowAuth) CreateCookieTokenStorage(data *IFlowTokenData) *IFlowTokenStorage {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only save the BXAuth field from the cookie
|
||||
bxAuth := ExtractBXAuth(data.Cookie)
|
||||
cookieToSave := ""
|
||||
if bxAuth != "" {
|
||||
cookieToSave = "BXAuth=" + bxAuth + ";"
|
||||
}
|
||||
|
||||
return &IFlowTokenStorage{
|
||||
APIKey: data.APIKey,
|
||||
Email: data.Email,
|
||||
Expire: data.Expire,
|
||||
Cookie: cookieToSave,
|
||||
LastRefresh: time.Now().Format(time.RFC3339),
|
||||
Type: "iflow",
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateCookieTokenStorage updates the persisted token storage with refreshed API key data
|
||||
func (ia *IFlowAuth) UpdateCookieTokenStorage(storage *IFlowTokenStorage, keyData *iFlowKeyData) {
|
||||
if storage == nil || keyData == nil {
|
||||
return
|
||||
}
|
||||
|
||||
storage.APIKey = keyData.APIKey
|
||||
storage.Expire = keyData.ExpireTime
|
||||
storage.LastRefresh = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
44
internal/auth/iflow/iflow_token.go
Normal file
44
internal/auth/iflow/iflow_token.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package iflow
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
)
|
||||
|
||||
// IFlowTokenStorage persists iFlow OAuth credentials alongside the derived API key.
|
||||
type IFlowTokenStorage struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
LastRefresh string `json:"last_refresh"`
|
||||
Expire string `json:"expired"`
|
||||
APIKey string `json:"api_key"`
|
||||
Email string `json:"email"`
|
||||
TokenType string `json:"token_type"`
|
||||
Scope string `json:"scope"`
|
||||
Cookie string `json:"cookie"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// SaveTokenToFile serialises the token storage to disk.
|
||||
func (ts *IFlowTokenStorage) SaveTokenToFile(authFilePath string) error {
|
||||
misc.LogSavingCredentials(authFilePath)
|
||||
ts.Type = "iflow"
|
||||
if err := os.MkdirAll(filepath.Dir(authFilePath), 0o700); err != nil {
|
||||
return fmt.Errorf("iflow token: create directory failed: %w", err)
|
||||
}
|
||||
|
||||
f, err := os.Create(authFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("iflow token: create file failed: %w", err)
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
if err = json.NewEncoder(f).Encode(ts); err != nil {
|
||||
return fmt.Errorf("iflow token: encode token failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
143
internal/auth/iflow/oauth_server.go
Normal file
143
internal/auth/iflow/oauth_server.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package iflow
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const errorRedirectURL = "https://iflow.cn/oauth/error"
|
||||
|
||||
// OAuthResult captures the outcome of the local OAuth callback.
|
||||
type OAuthResult struct {
|
||||
Code string
|
||||
State string
|
||||
Error string
|
||||
}
|
||||
|
||||
// OAuthServer provides a minimal HTTP server for handling the iFlow OAuth callback.
|
||||
type OAuthServer struct {
|
||||
server *http.Server
|
||||
port int
|
||||
result chan *OAuthResult
|
||||
errChan chan error
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
}
|
||||
|
||||
// NewOAuthServer constructs a new OAuthServer bound to the provided port.
|
||||
func NewOAuthServer(port int) *OAuthServer {
|
||||
return &OAuthServer{
|
||||
port: port,
|
||||
result: make(chan *OAuthResult, 1),
|
||||
errChan: make(chan error, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Start launches the callback listener.
|
||||
func (s *OAuthServer) Start() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.running {
|
||||
return fmt.Errorf("iflow oauth server already running")
|
||||
}
|
||||
if !s.isPortAvailable() {
|
||||
return fmt.Errorf("port %d is already in use", s.port)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/oauth2callback", s.handleCallback)
|
||||
|
||||
s.server = &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", s.port),
|
||||
Handler: mux,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
s.running = true
|
||||
|
||||
go func() {
|
||||
if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
s.errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully terminates the callback listener.
|
||||
func (s *OAuthServer) Stop(ctx context.Context) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.running || s.server == nil {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
s.running = false
|
||||
s.server = nil
|
||||
}()
|
||||
return s.server.Shutdown(ctx)
|
||||
}
|
||||
|
||||
// WaitForCallback blocks until a callback result, server error, or timeout occurs.
|
||||
func (s *OAuthServer) WaitForCallback(timeout time.Duration) (*OAuthResult, error) {
|
||||
select {
|
||||
case res := <-s.result:
|
||||
return res, nil
|
||||
case err := <-s.errChan:
|
||||
return nil, err
|
||||
case <-time.After(timeout):
|
||||
return nil, fmt.Errorf("timeout waiting for OAuth callback")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *OAuthServer) handleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
query := r.URL.Query()
|
||||
if errParam := strings.TrimSpace(query.Get("error")); errParam != "" {
|
||||
s.sendResult(&OAuthResult{Error: errParam})
|
||||
http.Redirect(w, r, errorRedirectURL, http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
code := strings.TrimSpace(query.Get("code"))
|
||||
if code == "" {
|
||||
s.sendResult(&OAuthResult{Error: "missing_code"})
|
||||
http.Redirect(w, r, errorRedirectURL, http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
state := query.Get("state")
|
||||
s.sendResult(&OAuthResult{Code: code, State: state})
|
||||
http.Redirect(w, r, SuccessRedirectURL, http.StatusFound)
|
||||
}
|
||||
|
||||
func (s *OAuthServer) sendResult(res *OAuthResult) {
|
||||
select {
|
||||
case s.result <- res:
|
||||
default:
|
||||
log.Debug("iflow oauth result channel full, dropping result")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *OAuthServer) isPortAvailable() bool {
|
||||
addr := fmt.Sprintf(":%d", s.port)
|
||||
listener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_ = listener.Close()
|
||||
return true
|
||||
}
|
||||
@@ -85,7 +85,7 @@ type QwenAuth struct {
|
||||
// NewQwenAuth creates a new QwenAuth instance with a proxy-configured HTTP client.
|
||||
func NewQwenAuth(cfg *config.Config) *QwenAuth {
|
||||
return &QwenAuth{
|
||||
httpClient: util.SetProxy(cfg, &http.Client{}),
|
||||
httpClient: util.SetProxy(&cfg.SDKConfig, &http.Client{}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,7 +260,7 @@ func (qa *QwenAuth) PollForToken(deviceCode, codeVerifier string) (*QwenTokenDat
|
||||
switch errorType {
|
||||
case "authorization_pending":
|
||||
// User has not yet approved the authorization request. Continue polling.
|
||||
log.Infof("Polling attempt %d/%d...\n", attempt+1, maxAttempts)
|
||||
fmt.Printf("Polling attempt %d/%d...\n\n", attempt+1, maxAttempts)
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
case "slow_down":
|
||||
@@ -269,7 +269,7 @@ func (qa *QwenAuth) PollForToken(deviceCode, codeVerifier string) (*QwenTokenDat
|
||||
if pollInterval > 10*time.Second {
|
||||
pollInterval = 10 * time.Second
|
||||
}
|
||||
log.Infof("Server requested to slow down, increasing poll interval to %v\n", pollInterval)
|
||||
fmt.Printf("Server requested to slow down, increasing poll interval to %v\n\n", pollInterval)
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
case "expired_token":
|
||||
|
||||
208
internal/auth/vertex/keyutil.go
Normal file
208
internal/auth/vertex/keyutil.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package vertex
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NormalizeServiceAccountJSON normalizes the given JSON-encoded service account payload.
|
||||
// It returns the normalized JSON (with sanitized private_key) or, if normalization fails,
|
||||
// the original bytes and the encountered error.
|
||||
func NormalizeServiceAccountJSON(raw []byte) ([]byte, error) {
|
||||
if len(raw) == 0 {
|
||||
return raw, nil
|
||||
}
|
||||
var payload map[string]any
|
||||
if err := json.Unmarshal(raw, &payload); err != nil {
|
||||
return raw, err
|
||||
}
|
||||
normalized, err := NormalizeServiceAccountMap(payload)
|
||||
if err != nil {
|
||||
return raw, err
|
||||
}
|
||||
out, err := json.Marshal(normalized)
|
||||
if err != nil {
|
||||
return raw, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// NormalizeServiceAccountMap returns a copy of the given service account map with
|
||||
// a sanitized private_key field that is guaranteed to contain a valid RSA PRIVATE KEY PEM block.
|
||||
func NormalizeServiceAccountMap(sa map[string]any) (map[string]any, error) {
|
||||
if sa == nil {
|
||||
return nil, fmt.Errorf("service account payload is empty")
|
||||
}
|
||||
pk, _ := sa["private_key"].(string)
|
||||
if strings.TrimSpace(pk) == "" {
|
||||
return nil, fmt.Errorf("service account missing private_key")
|
||||
}
|
||||
normalized, err := sanitizePrivateKey(pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone := make(map[string]any, len(sa))
|
||||
for k, v := range sa {
|
||||
clone[k] = v
|
||||
}
|
||||
clone["private_key"] = normalized
|
||||
return clone, nil
|
||||
}
|
||||
|
||||
func sanitizePrivateKey(raw string) (string, error) {
|
||||
pk := strings.ReplaceAll(raw, "\r\n", "\n")
|
||||
pk = strings.ReplaceAll(pk, "\r", "\n")
|
||||
pk = stripANSIEscape(pk)
|
||||
pk = strings.ToValidUTF8(pk, "")
|
||||
pk = strings.TrimSpace(pk)
|
||||
|
||||
normalized := pk
|
||||
if block, _ := pem.Decode([]byte(pk)); block == nil {
|
||||
// Attempt to reconstruct from the textual payload.
|
||||
if reconstructed, err := rebuildPEM(pk); err == nil {
|
||||
normalized = reconstructed
|
||||
} else {
|
||||
return "", fmt.Errorf("private_key is not valid pem: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
block, _ := pem.Decode([]byte(normalized))
|
||||
if block == nil {
|
||||
return "", fmt.Errorf("private_key pem decode failed")
|
||||
}
|
||||
|
||||
rsaBlock, err := ensureRSAPrivateKey(block)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(pem.EncodeToMemory(rsaBlock)), nil
|
||||
}
|
||||
|
||||
func ensureRSAPrivateKey(block *pem.Block) (*pem.Block, error) {
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("pem block is nil")
|
||||
}
|
||||
|
||||
if block.Type == "RSA PRIVATE KEY" {
|
||||
if _, err := x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
|
||||
return nil, fmt.Errorf("private_key invalid rsa: %w", err)
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
if block.Type == "PRIVATE KEY" {
|
||||
key, err := x509.ParsePKCS8PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("private_key invalid pkcs8: %w", err)
|
||||
}
|
||||
rsaKey, ok := key.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("private_key is not an RSA key")
|
||||
}
|
||||
der := x509.MarshalPKCS1PrivateKey(rsaKey)
|
||||
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: der}, nil
|
||||
}
|
||||
|
||||
// Attempt auto-detection: try PKCS#1 first, then PKCS#8.
|
||||
if rsaKey, err := x509.ParsePKCS1PrivateKey(block.Bytes); err == nil {
|
||||
der := x509.MarshalPKCS1PrivateKey(rsaKey)
|
||||
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: der}, nil
|
||||
}
|
||||
if key, err := x509.ParsePKCS8PrivateKey(block.Bytes); err == nil {
|
||||
if rsaKey, ok := key.(*rsa.PrivateKey); ok {
|
||||
der := x509.MarshalPKCS1PrivateKey(rsaKey)
|
||||
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: der}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("private_key uses unsupported format")
|
||||
}
|
||||
|
||||
func rebuildPEM(raw string) (string, error) {
|
||||
kind := "PRIVATE KEY"
|
||||
if strings.Contains(raw, "RSA PRIVATE KEY") {
|
||||
kind = "RSA PRIVATE KEY"
|
||||
}
|
||||
header := "-----BEGIN " + kind + "-----"
|
||||
footer := "-----END " + kind + "-----"
|
||||
start := strings.Index(raw, header)
|
||||
end := strings.Index(raw, footer)
|
||||
if start < 0 || end <= start {
|
||||
return "", fmt.Errorf("missing pem markers")
|
||||
}
|
||||
body := raw[start+len(header) : end]
|
||||
payload := filterBase64(body)
|
||||
if payload == "" {
|
||||
return "", fmt.Errorf("private_key base64 payload empty")
|
||||
}
|
||||
der, err := base64.StdEncoding.DecodeString(payload)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("private_key base64 decode failed: %w", err)
|
||||
}
|
||||
block := &pem.Block{Type: kind, Bytes: der}
|
||||
return string(pem.EncodeToMemory(block)), nil
|
||||
}
|
||||
|
||||
func filterBase64(s string) string {
|
||||
var b strings.Builder
|
||||
for _, r := range s {
|
||||
switch {
|
||||
case r >= 'A' && r <= 'Z':
|
||||
b.WriteRune(r)
|
||||
case r >= 'a' && r <= 'z':
|
||||
b.WriteRune(r)
|
||||
case r >= '0' && r <= '9':
|
||||
b.WriteRune(r)
|
||||
case r == '+' || r == '/' || r == '=':
|
||||
b.WriteRune(r)
|
||||
default:
|
||||
// skip
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func stripANSIEscape(s string) string {
|
||||
in := []rune(s)
|
||||
var out []rune
|
||||
for i := 0; i < len(in); i++ {
|
||||
r := in[i]
|
||||
if r != 0x1b {
|
||||
out = append(out, r)
|
||||
continue
|
||||
}
|
||||
if i+1 >= len(in) {
|
||||
continue
|
||||
}
|
||||
next := in[i+1]
|
||||
switch next {
|
||||
case ']':
|
||||
i += 2
|
||||
for i < len(in) {
|
||||
if in[i] == 0x07 {
|
||||
break
|
||||
}
|
||||
if in[i] == 0x1b && i+1 < len(in) && in[i+1] == '\\' {
|
||||
i++
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
case '[':
|
||||
i += 2
|
||||
for i < len(in) {
|
||||
if (in[i] >= 'A' && in[i] <= 'Z') || (in[i] >= 'a' && in[i] <= 'z') {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
default:
|
||||
// skip single ESC
|
||||
}
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
66
internal/auth/vertex/vertex_credentials.go
Normal file
66
internal/auth/vertex/vertex_credentials.go
Normal file
@@ -0,0 +1,66 @@
|
||||
// Package vertex provides token storage for Google Vertex AI Gemini via service account credentials.
|
||||
// It serialises service account JSON into an auth file that is consumed by the runtime executor.
|
||||
package vertex
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/misc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// VertexCredentialStorage stores the service account JSON for Vertex AI access.
|
||||
// The content is persisted verbatim under the "service_account" key, together with
|
||||
// helper fields for project, location and email to improve logging and discovery.
|
||||
type VertexCredentialStorage struct {
|
||||
// ServiceAccount holds the parsed service account JSON content.
|
||||
ServiceAccount map[string]any `json:"service_account"`
|
||||
|
||||
// ProjectID is derived from the service account JSON (project_id).
|
||||
ProjectID string `json:"project_id"`
|
||||
|
||||
// Email is the client_email from the service account JSON.
|
||||
Email string `json:"email"`
|
||||
|
||||
// Location optionally sets a default region (e.g., us-central1) for Vertex endpoints.
|
||||
Location string `json:"location,omitempty"`
|
||||
|
||||
// Type is the provider identifier stored alongside credentials. Always "vertex".
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// SaveTokenToFile writes the credential payload to the given file path in JSON format.
|
||||
// It ensures the parent directory exists and logs the operation for transparency.
|
||||
func (s *VertexCredentialStorage) SaveTokenToFile(authFilePath string) error {
|
||||
misc.LogSavingCredentials(authFilePath)
|
||||
if s == nil {
|
||||
return fmt.Errorf("vertex credential: storage is nil")
|
||||
}
|
||||
if s.ServiceAccount == nil {
|
||||
return fmt.Errorf("vertex credential: service account content is empty")
|
||||
}
|
||||
// Ensure we tag the file with the provider type.
|
||||
s.Type = "vertex"
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(authFilePath), 0o700); err != nil {
|
||||
return fmt.Errorf("vertex credential: create directory failed: %w", err)
|
||||
}
|
||||
f, err := os.Create(authFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("vertex credential: create file failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := f.Close(); errClose != nil {
|
||||
log.Errorf("vertex credential: failed to close file: %v", errClose)
|
||||
}
|
||||
}()
|
||||
enc := json.NewEncoder(f)
|
||||
enc.SetIndent("", " ")
|
||||
if err = enc.Encode(s); err != nil {
|
||||
return fmt.Errorf("vertex credential: encode failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
// Returns:
|
||||
// - An error if the URL cannot be opened, otherwise nil.
|
||||
func OpenURL(url string) error {
|
||||
log.Infof("Attempting to open URL in browser: %s", url)
|
||||
fmt.Printf("Attempting to open URL in browser: %s\n", url)
|
||||
|
||||
// Try using the open-golang library first
|
||||
err := open.Run(url)
|
||||
|
||||
15
internal/buildinfo/buildinfo.go
Normal file
15
internal/buildinfo/buildinfo.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Package buildinfo exposes compile-time metadata shared across the server.
|
||||
package buildinfo
|
||||
|
||||
// The following variables are overridden via ldflags during release builds.
|
||||
// Defaults cover local development builds.
|
||||
var (
|
||||
// Version is the semantic version or git describe output of the binary.
|
||||
Version = "dev"
|
||||
|
||||
// Commit is the git commit SHA baked into the binary.
|
||||
Commit = "none"
|
||||
|
||||
// BuildDate records when the binary was built in UTC.
|
||||
BuildDate = "unknown"
|
||||
)
|
||||
195
internal/cache/signature_cache.go
vendored
Normal file
195
internal/cache/signature_cache.go
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SignatureEntry holds a cached thinking signature with timestamp
|
||||
type SignatureEntry struct {
|
||||
Signature string
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
const (
|
||||
// SignatureCacheTTL is how long signatures are valid
|
||||
SignatureCacheTTL = 3 * time.Hour
|
||||
|
||||
// SignatureTextHashLen is the length of the hash key (16 hex chars = 64-bit key space)
|
||||
SignatureTextHashLen = 16
|
||||
|
||||
// MinValidSignatureLen is the minimum length for a signature to be considered valid
|
||||
MinValidSignatureLen = 50
|
||||
|
||||
// CacheCleanupInterval controls how often stale entries are purged
|
||||
CacheCleanupInterval = 10 * time.Minute
|
||||
)
|
||||
|
||||
// signatureCache stores signatures by model group -> textHash -> SignatureEntry
|
||||
var signatureCache sync.Map
|
||||
|
||||
// cacheCleanupOnce ensures the background cleanup goroutine starts only once
|
||||
var cacheCleanupOnce sync.Once
|
||||
|
||||
// groupCache is the inner map type
|
||||
type groupCache struct {
|
||||
mu sync.RWMutex
|
||||
entries map[string]SignatureEntry
|
||||
}
|
||||
|
||||
// hashText creates a stable, Unicode-safe key from text content
|
||||
func hashText(text string) string {
|
||||
h := sha256.Sum256([]byte(text))
|
||||
return hex.EncodeToString(h[:])[:SignatureTextHashLen]
|
||||
}
|
||||
|
||||
// getOrCreateGroupCache gets or creates a cache bucket for a model group
|
||||
func getOrCreateGroupCache(groupKey string) *groupCache {
|
||||
// Start background cleanup on first access
|
||||
cacheCleanupOnce.Do(startCacheCleanup)
|
||||
|
||||
if val, ok := signatureCache.Load(groupKey); ok {
|
||||
return val.(*groupCache)
|
||||
}
|
||||
sc := &groupCache{entries: make(map[string]SignatureEntry)}
|
||||
actual, _ := signatureCache.LoadOrStore(groupKey, sc)
|
||||
return actual.(*groupCache)
|
||||
}
|
||||
|
||||
// startCacheCleanup launches a background goroutine that periodically
|
||||
// removes caches where all entries have expired.
|
||||
func startCacheCleanup() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(CacheCleanupInterval)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
purgeExpiredCaches()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// purgeExpiredCaches removes caches with no valid (non-expired) entries.
|
||||
func purgeExpiredCaches() {
|
||||
now := time.Now()
|
||||
signatureCache.Range(func(key, value any) bool {
|
||||
sc := value.(*groupCache)
|
||||
sc.mu.Lock()
|
||||
// Remove expired entries
|
||||
for k, entry := range sc.entries {
|
||||
if now.Sub(entry.Timestamp) > SignatureCacheTTL {
|
||||
delete(sc.entries, k)
|
||||
}
|
||||
}
|
||||
isEmpty := len(sc.entries) == 0
|
||||
sc.mu.Unlock()
|
||||
// Remove cache bucket if empty
|
||||
if isEmpty {
|
||||
signatureCache.Delete(key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// CacheSignature stores a thinking signature for a given model group and text.
|
||||
// Used for Claude models that require signed thinking blocks in multi-turn conversations.
|
||||
func CacheSignature(modelName, text, signature string) {
|
||||
if text == "" || signature == "" {
|
||||
return
|
||||
}
|
||||
if len(signature) < MinValidSignatureLen {
|
||||
return
|
||||
}
|
||||
|
||||
groupKey := GetModelGroup(modelName)
|
||||
textHash := hashText(text)
|
||||
sc := getOrCreateGroupCache(groupKey)
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
|
||||
sc.entries[textHash] = SignatureEntry{
|
||||
Signature: signature,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// GetCachedSignature retrieves a cached signature for a given model group and text.
|
||||
// Returns empty string if not found or expired.
|
||||
func GetCachedSignature(modelName, text string) string {
|
||||
groupKey := GetModelGroup(modelName)
|
||||
|
||||
if text == "" {
|
||||
if groupKey == "gemini" {
|
||||
return "skip_thought_signature_validator"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
val, ok := signatureCache.Load(groupKey)
|
||||
if !ok {
|
||||
if groupKey == "gemini" {
|
||||
return "skip_thought_signature_validator"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
sc := val.(*groupCache)
|
||||
|
||||
textHash := hashText(text)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
sc.mu.Lock()
|
||||
entry, exists := sc.entries[textHash]
|
||||
if !exists {
|
||||
sc.mu.Unlock()
|
||||
if groupKey == "gemini" {
|
||||
return "skip_thought_signature_validator"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
if now.Sub(entry.Timestamp) > SignatureCacheTTL {
|
||||
delete(sc.entries, textHash)
|
||||
sc.mu.Unlock()
|
||||
if groupKey == "gemini" {
|
||||
return "skip_thought_signature_validator"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Refresh TTL on access (sliding expiration).
|
||||
entry.Timestamp = now
|
||||
sc.entries[textHash] = entry
|
||||
sc.mu.Unlock()
|
||||
|
||||
return entry.Signature
|
||||
}
|
||||
|
||||
// ClearSignatureCache clears signature cache for a specific model group or all groups.
|
||||
func ClearSignatureCache(modelName string) {
|
||||
if modelName == "" {
|
||||
signatureCache.Range(func(key, _ any) bool {
|
||||
signatureCache.Delete(key)
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
groupKey := GetModelGroup(modelName)
|
||||
signatureCache.Delete(groupKey)
|
||||
}
|
||||
|
||||
// HasValidSignature checks if a signature is valid (non-empty and long enough)
|
||||
func HasValidSignature(modelName, signature string) bool {
|
||||
return (signature != "" && len(signature) >= MinValidSignatureLen) || (signature == "skip_thought_signature_validator" && GetModelGroup(modelName) == "gemini")
|
||||
}
|
||||
|
||||
func GetModelGroup(modelName string) string {
|
||||
if strings.Contains(modelName, "gpt") {
|
||||
return "gpt"
|
||||
} else if strings.Contains(modelName, "claude") {
|
||||
return "claude"
|
||||
} else if strings.Contains(modelName, "gemini") {
|
||||
return "gemini"
|
||||
}
|
||||
return modelName
|
||||
}
|
||||
210
internal/cache/signature_cache_test.go
vendored
Normal file
210
internal/cache/signature_cache_test.go
vendored
Normal file
@@ -0,0 +1,210 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const testModelName = "claude-sonnet-4-5"
|
||||
|
||||
func TestCacheSignature_BasicStorageAndRetrieval(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
text := "This is some thinking text content"
|
||||
signature := "abc123validSignature1234567890123456789012345678901234567890"
|
||||
|
||||
// Store signature
|
||||
CacheSignature(testModelName, text, signature)
|
||||
|
||||
// Retrieve signature
|
||||
retrieved := GetCachedSignature(testModelName, text)
|
||||
if retrieved != signature {
|
||||
t.Errorf("Expected signature '%s', got '%s'", signature, retrieved)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheSignature_DifferentModelGroups(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
text := "Same text across models"
|
||||
sig1 := "signature1_1234567890123456789012345678901234567890123456"
|
||||
sig2 := "signature2_1234567890123456789012345678901234567890123456"
|
||||
|
||||
geminiModel := "gemini-3-pro-preview"
|
||||
CacheSignature(testModelName, text, sig1)
|
||||
CacheSignature(geminiModel, text, sig2)
|
||||
|
||||
if GetCachedSignature(testModelName, text) != sig1 {
|
||||
t.Error("Claude signature mismatch")
|
||||
}
|
||||
if GetCachedSignature(geminiModel, text) != sig2 {
|
||||
t.Error("Gemini signature mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheSignature_NotFound(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
// Non-existent session
|
||||
if got := GetCachedSignature(testModelName, "some text"); got != "" {
|
||||
t.Errorf("Expected empty string for nonexistent session, got '%s'", got)
|
||||
}
|
||||
|
||||
// Existing session but different text
|
||||
CacheSignature(testModelName, "text-a", "sigA12345678901234567890123456789012345678901234567890")
|
||||
if got := GetCachedSignature(testModelName, "text-b"); got != "" {
|
||||
t.Errorf("Expected empty string for different text, got '%s'", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheSignature_EmptyInputs(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
// All empty/invalid inputs should be no-ops
|
||||
CacheSignature(testModelName, "", "sig12345678901234567890123456789012345678901234567890")
|
||||
CacheSignature(testModelName, "text", "")
|
||||
CacheSignature(testModelName, "text", "short") // Too short
|
||||
|
||||
if got := GetCachedSignature(testModelName, "text"); got != "" {
|
||||
t.Errorf("Expected empty after invalid cache attempts, got '%s'", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheSignature_ShortSignatureRejected(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
text := "Some text"
|
||||
shortSig := "abc123" // Less than 50 chars
|
||||
|
||||
CacheSignature(testModelName, text, shortSig)
|
||||
|
||||
if got := GetCachedSignature(testModelName, text); got != "" {
|
||||
t.Errorf("Short signature should be rejected, got '%s'", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearSignatureCache_ModelGroup(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
sig := "validSig1234567890123456789012345678901234567890123456"
|
||||
CacheSignature(testModelName, "text", sig)
|
||||
CacheSignature(testModelName, "text-2", sig)
|
||||
|
||||
ClearSignatureCache("session-1")
|
||||
|
||||
if got := GetCachedSignature(testModelName, "text"); got != sig {
|
||||
t.Error("signature should remain when clearing unknown session")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearSignatureCache_AllSessions(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
sig := "validSig1234567890123456789012345678901234567890123456"
|
||||
CacheSignature(testModelName, "text", sig)
|
||||
CacheSignature(testModelName, "text-2", sig)
|
||||
|
||||
ClearSignatureCache("")
|
||||
|
||||
if got := GetCachedSignature(testModelName, "text"); got != "" {
|
||||
t.Error("text should be cleared")
|
||||
}
|
||||
if got := GetCachedSignature(testModelName, "text-2"); got != "" {
|
||||
t.Error("text-2 should be cleared")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasValidSignature(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modelName string
|
||||
signature string
|
||||
expected bool
|
||||
}{
|
||||
{"valid long signature", testModelName, "abc123validSignature1234567890123456789012345678901234567890", true},
|
||||
{"exactly 50 chars", testModelName, "12345678901234567890123456789012345678901234567890", true},
|
||||
{"49 chars - invalid", testModelName, "1234567890123456789012345678901234567890123456789", false},
|
||||
{"empty string", testModelName, "", false},
|
||||
{"short signature", testModelName, "abc", false},
|
||||
{"gemini sentinel", "gemini-3-pro-preview", "skip_thought_signature_validator", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := HasValidSignature(tt.modelName, tt.signature)
|
||||
if result != tt.expected {
|
||||
t.Errorf("HasValidSignature(%q) = %v, expected %v", tt.signature, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheSignature_TextHashCollisionResistance(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
// Different texts should produce different hashes
|
||||
text1 := "First thinking text"
|
||||
text2 := "Second thinking text"
|
||||
sig1 := "signature1_1234567890123456789012345678901234567890123456"
|
||||
sig2 := "signature2_1234567890123456789012345678901234567890123456"
|
||||
|
||||
CacheSignature(testModelName, text1, sig1)
|
||||
CacheSignature(testModelName, text2, sig2)
|
||||
|
||||
if GetCachedSignature(testModelName, text1) != sig1 {
|
||||
t.Error("text1 signature mismatch")
|
||||
}
|
||||
if GetCachedSignature(testModelName, text2) != sig2 {
|
||||
t.Error("text2 signature mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheSignature_UnicodeText(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
text := "한글 텍스트와 이모지 🎉 그리고 特殊文字"
|
||||
sig := "unicodeSig123456789012345678901234567890123456789012345"
|
||||
|
||||
CacheSignature(testModelName, text, sig)
|
||||
|
||||
if got := GetCachedSignature(testModelName, text); got != sig {
|
||||
t.Errorf("Unicode text signature retrieval failed, got '%s'", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheSignature_Overwrite(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
text := "Same text"
|
||||
sig1 := "firstSignature12345678901234567890123456789012345678901"
|
||||
sig2 := "secondSignature1234567890123456789012345678901234567890"
|
||||
|
||||
CacheSignature(testModelName, text, sig1)
|
||||
CacheSignature(testModelName, text, sig2) // Overwrite
|
||||
|
||||
if got := GetCachedSignature(testModelName, text); got != sig2 {
|
||||
t.Errorf("Expected overwritten signature '%s', got '%s'", sig2, got)
|
||||
}
|
||||
}
|
||||
|
||||
// Note: TTL expiration test is tricky to test without mocking time
|
||||
// We test the logic path exists but actual expiration would require time manipulation
|
||||
func TestCacheSignature_ExpirationLogic(t *testing.T) {
|
||||
ClearSignatureCache("")
|
||||
|
||||
// This test verifies the expiration check exists
|
||||
// In a real scenario, we'd mock time.Now()
|
||||
text := "text"
|
||||
sig := "validSig1234567890123456789012345678901234567890123456"
|
||||
|
||||
CacheSignature(testModelName, text, sig)
|
||||
|
||||
// Fresh entry should be retrievable
|
||||
if got := GetCachedSignature(testModelName, text); got != sig {
|
||||
t.Errorf("Fresh entry should be retrievable, got '%s'", got)
|
||||
}
|
||||
|
||||
// We can't easily test actual expiration without time mocking
|
||||
// but the logic is verified by the implementation
|
||||
_ = time.Now() // Acknowledge we're not testing time passage
|
||||
}
|
||||
@@ -24,12 +24,18 @@ func DoClaudeLogin(cfg *config.Config, options *LoginOptions) {
|
||||
options = &LoginOptions{}
|
||||
}
|
||||
|
||||
promptFn := options.Prompt
|
||||
if promptFn == nil {
|
||||
promptFn = defaultProjectPrompt()
|
||||
}
|
||||
|
||||
manager := newAuthManager()
|
||||
|
||||
authOpts := &sdkAuth.LoginOptions{
|
||||
NoBrowser: options.NoBrowser,
|
||||
CallbackPort: options.CallbackPort,
|
||||
Metadata: map[string]string{},
|
||||
Prompt: options.Prompt,
|
||||
Prompt: promptFn,
|
||||
}
|
||||
|
||||
_, savedPath, err := manager.Login(context.Background(), "claude", cfg, authOpts)
|
||||
|
||||
44
internal/cmd/antigravity_login.go
Normal file
44
internal/cmd/antigravity_login.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DoAntigravityLogin triggers the OAuth flow for the antigravity provider and saves tokens.
|
||||
func DoAntigravityLogin(cfg *config.Config, options *LoginOptions) {
|
||||
if options == nil {
|
||||
options = &LoginOptions{}
|
||||
}
|
||||
|
||||
promptFn := options.Prompt
|
||||
if promptFn == nil {
|
||||
promptFn = defaultProjectPrompt()
|
||||
}
|
||||
|
||||
manager := newAuthManager()
|
||||
authOpts := &sdkAuth.LoginOptions{
|
||||
NoBrowser: options.NoBrowser,
|
||||
CallbackPort: options.CallbackPort,
|
||||
Metadata: map[string]string{},
|
||||
Prompt: promptFn,
|
||||
}
|
||||
|
||||
record, savedPath, err := manager.Login(context.Background(), "antigravity", cfg, authOpts)
|
||||
if err != nil {
|
||||
log.Errorf("Antigravity authentication failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if savedPath != "" {
|
||||
fmt.Printf("Authentication saved to %s\n", savedPath)
|
||||
}
|
||||
if record != nil && record.Label != "" {
|
||||
fmt.Printf("Authenticated as %s\n", record.Label)
|
||||
}
|
||||
fmt.Println("Antigravity authentication successful!")
|
||||
}
|
||||
@@ -17,6 +17,8 @@ func newAuthManager() *sdkAuth.Manager {
|
||||
sdkAuth.NewCodexAuthenticator(),
|
||||
sdkAuth.NewClaudeAuthenticator(),
|
||||
sdkAuth.NewQwenAuthenticator(),
|
||||
sdkAuth.NewIFlowAuthenticator(),
|
||||
sdkAuth.NewAntigravityAuthenticator(),
|
||||
)
|
||||
return manager
|
||||
}
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
// Package cmd provides command-line interface functionality for the CLI Proxy API.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DoGeminiWebAuth handles the process of creating a Gemini Web token file.
|
||||
// It prompts the user for their cookie values and saves them to a JSON file.
|
||||
func DoGeminiWebAuth(cfg *config.Config) {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
fmt.Print("Enter your __Secure-1PSID cookie value: ")
|
||||
secure1psid, _ := reader.ReadString('\n')
|
||||
secure1psid = strings.TrimSpace(secure1psid)
|
||||
|
||||
if secure1psid == "" {
|
||||
log.Fatal("The __Secure-1PSID value cannot be empty.")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Print("Enter your __Secure-1PSIDTS cookie value: ")
|
||||
secure1psidts, _ := reader.ReadString('\n')
|
||||
secure1psidts = strings.TrimSpace(secure1psidts)
|
||||
|
||||
if secure1psidts == "" {
|
||||
fmt.Println("The __Secure-1PSIDTS value cannot be empty.")
|
||||
return
|
||||
}
|
||||
|
||||
tokenStorage := &gemini.GeminiWebTokenStorage{
|
||||
Secure1PSID: secure1psid,
|
||||
Secure1PSIDTS: secure1psidts,
|
||||
}
|
||||
|
||||
// Generate a filename based on the SHA256 hash of the PSID
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(secure1psid))
|
||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||
fileName := fmt.Sprintf("gemini-web-%s.json", hash[:16])
|
||||
record := &sdkAuth.TokenRecord{
|
||||
Provider: "gemini-web",
|
||||
FileName: fileName,
|
||||
Storage: tokenStorage,
|
||||
}
|
||||
store := sdkAuth.GetTokenStore()
|
||||
savedPath, err := store.Save(context.Background(), cfg, record)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to save Gemini Web token to file: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully saved Gemini Web token to: %s\n", savedPath)
|
||||
}
|
||||
98
internal/cmd/iflow_cookie.go
Normal file
98
internal/cmd/iflow_cookie.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/iflow"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
)
|
||||
|
||||
// DoIFlowCookieAuth performs the iFlow cookie-based authentication.
|
||||
func DoIFlowCookieAuth(cfg *config.Config, options *LoginOptions) {
|
||||
if options == nil {
|
||||
options = &LoginOptions{}
|
||||
}
|
||||
|
||||
promptFn := options.Prompt
|
||||
if promptFn == nil {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
promptFn = func(prompt string) (string, error) {
|
||||
fmt.Print(prompt)
|
||||
value, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(value), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Prompt user for cookie
|
||||
cookie, err := promptForCookie(promptFn)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to get cookie: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Check for duplicate BXAuth before authentication
|
||||
bxAuth := iflow.ExtractBXAuth(cookie)
|
||||
if existingFile, err := iflow.CheckDuplicateBXAuth(cfg.AuthDir, bxAuth); err != nil {
|
||||
fmt.Printf("Failed to check duplicate: %v\n", err)
|
||||
return
|
||||
} else if existingFile != "" {
|
||||
fmt.Printf("Duplicate BXAuth found, authentication already exists: %s\n", filepath.Base(existingFile))
|
||||
return
|
||||
}
|
||||
|
||||
// Authenticate with cookie
|
||||
auth := iflow.NewIFlowAuth(cfg)
|
||||
ctx := context.Background()
|
||||
|
||||
tokenData, err := auth.AuthenticateWithCookie(ctx, cookie)
|
||||
if err != nil {
|
||||
fmt.Printf("iFlow cookie authentication failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Create token storage
|
||||
tokenStorage := auth.CreateCookieTokenStorage(tokenData)
|
||||
|
||||
// Get auth file path using email in filename
|
||||
authFilePath := getAuthFilePath(cfg, "iflow", tokenData.Email)
|
||||
|
||||
// Save token to file
|
||||
if err := tokenStorage.SaveTokenToFile(authFilePath); err != nil {
|
||||
fmt.Printf("Failed to save authentication: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Authentication successful! API key: %s\n", tokenData.APIKey)
|
||||
fmt.Printf("Expires at: %s\n", tokenData.Expire)
|
||||
fmt.Printf("Authentication saved to: %s\n", authFilePath)
|
||||
}
|
||||
|
||||
// promptForCookie prompts the user to enter their iFlow cookie
|
||||
func promptForCookie(promptFn func(string) (string, error)) (string, error) {
|
||||
line, err := promptFn("Enter iFlow Cookie (from browser cookies): ")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read cookie: %w", err)
|
||||
}
|
||||
|
||||
cookie, err := iflow.NormalizeCookie(line)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return cookie, nil
|
||||
}
|
||||
|
||||
// getAuthFilePath returns the auth file path for the given provider and email
|
||||
func getAuthFilePath(cfg *config.Config, provider, email string) string {
|
||||
fileName := iflow.SanitizeIFlowFileName(email)
|
||||
return fmt.Sprintf("%s/%s-%s-%d.json", cfg.AuthDir, provider, fileName, time.Now().Unix())
|
||||
}
|
||||
49
internal/cmd/iflow_login.go
Normal file
49
internal/cmd/iflow_login.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DoIFlowLogin performs the iFlow OAuth login via the shared authentication manager.
|
||||
func DoIFlowLogin(cfg *config.Config, options *LoginOptions) {
|
||||
if options == nil {
|
||||
options = &LoginOptions{}
|
||||
}
|
||||
|
||||
manager := newAuthManager()
|
||||
|
||||
promptFn := options.Prompt
|
||||
if promptFn == nil {
|
||||
promptFn = defaultProjectPrompt()
|
||||
}
|
||||
|
||||
authOpts := &sdkAuth.LoginOptions{
|
||||
NoBrowser: options.NoBrowser,
|
||||
CallbackPort: options.CallbackPort,
|
||||
Metadata: map[string]string{},
|
||||
Prompt: promptFn,
|
||||
}
|
||||
|
||||
_, savedPath, err := manager.Login(context.Background(), "iflow", cfg, authOpts)
|
||||
if err != nil {
|
||||
var emailErr *sdkAuth.EmailRequiredError
|
||||
if errors.As(err, &emailErr) {
|
||||
log.Error(emailErr.Error())
|
||||
return
|
||||
}
|
||||
fmt.Printf("iFlow authentication failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if savedPath != "" {
|
||||
fmt.Printf("Authentication saved to %s\n", savedPath)
|
||||
}
|
||||
|
||||
fmt.Println("iFlow authentication successful!")
|
||||
}
|
||||
@@ -4,18 +4,45 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/auth/gemini"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
const (
|
||||
geminiCLIEndpoint = "https://cloudcode-pa.googleapis.com"
|
||||
geminiCLIVersion = "v1internal"
|
||||
geminiCLIUserAgent = "google-api-nodejs-client/9.15.1"
|
||||
geminiCLIApiClient = "gl-node/22.17.0"
|
||||
geminiCLIClientMetadata = "ideType=IDE_UNSPECIFIED,platform=PLATFORM_UNSPECIFIED,pluginType=GEMINI"
|
||||
)
|
||||
|
||||
type projectSelectionRequiredError struct{}
|
||||
|
||||
func (e *projectSelectionRequiredError) Error() string {
|
||||
return "gemini cli: project selection required"
|
||||
}
|
||||
|
||||
// DoLogin handles Google Gemini authentication using the shared authentication manager.
|
||||
// It initiates the OAuth flow for Google Gemini services and saves the authentication
|
||||
// tokens to the configured auth directory.
|
||||
// It initiates the OAuth flow for Google Gemini services, performs the legacy CLI user setup,
|
||||
// and saves the authentication tokens to the configured auth directory.
|
||||
//
|
||||
// Parameters:
|
||||
// - cfg: The application configuration
|
||||
@@ -26,26 +53,490 @@ func DoLogin(cfg *config.Config, projectID string, options *LoginOptions) {
|
||||
options = &LoginOptions{}
|
||||
}
|
||||
|
||||
manager := newAuthManager()
|
||||
ctx := context.Background()
|
||||
|
||||
metadata := map[string]string{}
|
||||
if projectID != "" {
|
||||
metadata["project_id"] = projectID
|
||||
promptFn := options.Prompt
|
||||
if promptFn == nil {
|
||||
promptFn = defaultProjectPrompt()
|
||||
}
|
||||
|
||||
authOpts := &sdkAuth.LoginOptions{
|
||||
trimmedProjectID := strings.TrimSpace(projectID)
|
||||
callbackPrompt := promptFn
|
||||
if trimmedProjectID == "" {
|
||||
callbackPrompt = nil
|
||||
}
|
||||
|
||||
loginOpts := &sdkAuth.LoginOptions{
|
||||
NoBrowser: options.NoBrowser,
|
||||
ProjectID: projectID,
|
||||
Metadata: metadata,
|
||||
Prompt: options.Prompt,
|
||||
ProjectID: trimmedProjectID,
|
||||
CallbackPort: options.CallbackPort,
|
||||
Metadata: map[string]string{},
|
||||
Prompt: callbackPrompt,
|
||||
}
|
||||
|
||||
authenticator := sdkAuth.NewGeminiAuthenticator()
|
||||
record, errLogin := authenticator.Login(ctx, cfg, loginOpts)
|
||||
if errLogin != nil {
|
||||
log.Errorf("Gemini authentication failed: %v", errLogin)
|
||||
return
|
||||
}
|
||||
|
||||
storage, okStorage := record.Storage.(*gemini.GeminiTokenStorage)
|
||||
if !okStorage || storage == nil {
|
||||
log.Error("Gemini authentication failed: unsupported token storage")
|
||||
return
|
||||
}
|
||||
|
||||
geminiAuth := gemini.NewGeminiAuth()
|
||||
httpClient, errClient := geminiAuth.GetAuthenticatedClient(ctx, storage, cfg, &gemini.WebLoginOptions{
|
||||
NoBrowser: options.NoBrowser,
|
||||
CallbackPort: options.CallbackPort,
|
||||
Prompt: callbackPrompt,
|
||||
})
|
||||
if errClient != nil {
|
||||
log.Errorf("Gemini authentication failed: %v", errClient)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Authentication successful.")
|
||||
|
||||
projects, errProjects := fetchGCPProjects(ctx, httpClient)
|
||||
if errProjects != nil {
|
||||
log.Errorf("Failed to get project list: %v", errProjects)
|
||||
return
|
||||
}
|
||||
|
||||
selectedProjectID := promptForProjectSelection(projects, trimmedProjectID, promptFn)
|
||||
projectSelections, errSelection := resolveProjectSelections(selectedProjectID, projects)
|
||||
if errSelection != nil {
|
||||
log.Errorf("Invalid project selection: %v", errSelection)
|
||||
return
|
||||
}
|
||||
if len(projectSelections) == 0 {
|
||||
log.Error("No project selected; aborting login.")
|
||||
return
|
||||
}
|
||||
|
||||
activatedProjects := make([]string, 0, len(projectSelections))
|
||||
seenProjects := make(map[string]bool)
|
||||
for _, candidateID := range projectSelections {
|
||||
log.Infof("Activating project %s", candidateID)
|
||||
if errSetup := performGeminiCLISetup(ctx, httpClient, storage, candidateID); errSetup != nil {
|
||||
var projectErr *projectSelectionRequiredError
|
||||
if errors.As(errSetup, &projectErr) {
|
||||
log.Error("Failed to start user onboarding: A project ID is required.")
|
||||
showProjectSelectionHelp(storage.Email, projects)
|
||||
return
|
||||
}
|
||||
log.Errorf("Failed to complete user setup: %v", errSetup)
|
||||
return
|
||||
}
|
||||
finalID := strings.TrimSpace(storage.ProjectID)
|
||||
if finalID == "" {
|
||||
finalID = candidateID
|
||||
}
|
||||
|
||||
// Skip duplicates
|
||||
if seenProjects[finalID] {
|
||||
log.Infof("Project %s already activated, skipping", finalID)
|
||||
continue
|
||||
}
|
||||
seenProjects[finalID] = true
|
||||
activatedProjects = append(activatedProjects, finalID)
|
||||
}
|
||||
|
||||
storage.Auto = false
|
||||
storage.ProjectID = strings.Join(activatedProjects, ",")
|
||||
|
||||
if !storage.Auto && !storage.Checked {
|
||||
for _, pid := range activatedProjects {
|
||||
isChecked, errCheck := checkCloudAPIIsEnabled(ctx, httpClient, pid)
|
||||
if errCheck != nil {
|
||||
log.Errorf("Failed to check if Cloud AI API is enabled for %s: %v", pid, errCheck)
|
||||
return
|
||||
}
|
||||
if !isChecked {
|
||||
log.Errorf("Failed to check if Cloud AI API is enabled for project %s. If you encounter an error message, please create an issue.", pid)
|
||||
return
|
||||
}
|
||||
}
|
||||
storage.Checked = true
|
||||
}
|
||||
|
||||
updateAuthRecord(record, storage)
|
||||
|
||||
store := sdkAuth.GetTokenStore()
|
||||
if setter, okSetter := store.(interface{ SetBaseDir(string) }); okSetter && cfg != nil {
|
||||
setter.SetBaseDir(cfg.AuthDir)
|
||||
}
|
||||
|
||||
savedPath, errSave := store.Save(ctx, record)
|
||||
if errSave != nil {
|
||||
log.Errorf("Failed to save token to file: %v", errSave)
|
||||
return
|
||||
}
|
||||
|
||||
if savedPath != "" {
|
||||
fmt.Printf("Authentication saved to %s\n", savedPath)
|
||||
}
|
||||
|
||||
fmt.Println("Gemini authentication successful!")
|
||||
}
|
||||
|
||||
func performGeminiCLISetup(ctx context.Context, httpClient *http.Client, storage *gemini.GeminiTokenStorage, requestedProject string) error {
|
||||
metadata := map[string]string{
|
||||
"ideType": "IDE_UNSPECIFIED",
|
||||
"platform": "PLATFORM_UNSPECIFIED",
|
||||
"pluginType": "GEMINI",
|
||||
}
|
||||
|
||||
trimmedRequest := strings.TrimSpace(requestedProject)
|
||||
explicitProject := trimmedRequest != ""
|
||||
|
||||
loadReqBody := map[string]any{
|
||||
"metadata": metadata,
|
||||
}
|
||||
if explicitProject {
|
||||
loadReqBody["cloudaicompanionProject"] = trimmedRequest
|
||||
}
|
||||
|
||||
var loadResp map[string]any
|
||||
if errLoad := callGeminiCLI(ctx, httpClient, "loadCodeAssist", loadReqBody, &loadResp); errLoad != nil {
|
||||
return fmt.Errorf("load code assist: %w", errLoad)
|
||||
}
|
||||
|
||||
tierID := "legacy-tier"
|
||||
if tiers, okTiers := loadResp["allowedTiers"].([]any); okTiers {
|
||||
for _, rawTier := range tiers {
|
||||
tier, okTier := rawTier.(map[string]any)
|
||||
if !okTier {
|
||||
continue
|
||||
}
|
||||
if isDefault, okDefault := tier["isDefault"].(bool); okDefault && isDefault {
|
||||
if id, okID := tier["id"].(string); okID && strings.TrimSpace(id) != "" {
|
||||
tierID = strings.TrimSpace(id)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
projectID := trimmedRequest
|
||||
if projectID == "" {
|
||||
if id, okProject := loadResp["cloudaicompanionProject"].(string); okProject {
|
||||
projectID = strings.TrimSpace(id)
|
||||
}
|
||||
if projectID == "" {
|
||||
if projectMap, okProject := loadResp["cloudaicompanionProject"].(map[string]any); okProject {
|
||||
if id, okID := projectMap["id"].(string); okID {
|
||||
projectID = strings.TrimSpace(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if projectID == "" {
|
||||
return &projectSelectionRequiredError{}
|
||||
}
|
||||
|
||||
onboardReqBody := map[string]any{
|
||||
"tierId": tierID,
|
||||
"metadata": metadata,
|
||||
"cloudaicompanionProject": projectID,
|
||||
}
|
||||
|
||||
// Store the requested project as a fallback in case the response omits it.
|
||||
storage.ProjectID = projectID
|
||||
|
||||
for {
|
||||
var onboardResp map[string]any
|
||||
if errOnboard := callGeminiCLI(ctx, httpClient, "onboardUser", onboardReqBody, &onboardResp); errOnboard != nil {
|
||||
return fmt.Errorf("onboard user: %w", errOnboard)
|
||||
}
|
||||
|
||||
if done, okDone := onboardResp["done"].(bool); okDone && done {
|
||||
responseProjectID := ""
|
||||
if resp, okResp := onboardResp["response"].(map[string]any); okResp {
|
||||
switch projectValue := resp["cloudaicompanionProject"].(type) {
|
||||
case map[string]any:
|
||||
if id, okID := projectValue["id"].(string); okID {
|
||||
responseProjectID = strings.TrimSpace(id)
|
||||
}
|
||||
case string:
|
||||
responseProjectID = strings.TrimSpace(projectValue)
|
||||
}
|
||||
}
|
||||
|
||||
finalProjectID := projectID
|
||||
if responseProjectID != "" {
|
||||
if explicitProject && !strings.EqualFold(responseProjectID, projectID) {
|
||||
// Check if this is a free user (gen-lang-client projects or free/legacy tier)
|
||||
isFreeUser := strings.HasPrefix(projectID, "gen-lang-client-") ||
|
||||
strings.EqualFold(tierID, "FREE") ||
|
||||
strings.EqualFold(tierID, "LEGACY")
|
||||
|
||||
if isFreeUser {
|
||||
// Interactive prompt for free users
|
||||
fmt.Printf("\nGoogle returned a different project ID:\n")
|
||||
fmt.Printf(" Requested (frontend): %s\n", projectID)
|
||||
fmt.Printf(" Returned (backend): %s\n\n", responseProjectID)
|
||||
fmt.Printf(" Backend project IDs have access to preview models (gemini-3-*).\n")
|
||||
fmt.Printf(" This is normal for free tier users.\n\n")
|
||||
fmt.Printf("Which project ID would you like to use?\n")
|
||||
fmt.Printf(" [1] Backend (recommended): %s\n", responseProjectID)
|
||||
fmt.Printf(" [2] Frontend: %s\n\n", projectID)
|
||||
fmt.Printf("Enter choice [1]: ")
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
choice, _ := reader.ReadString('\n')
|
||||
choice = strings.TrimSpace(choice)
|
||||
|
||||
if choice == "2" {
|
||||
log.Infof("Using frontend project ID: %s", projectID)
|
||||
fmt.Println(". Warning: Frontend project IDs may not have access to preview models.")
|
||||
finalProjectID = projectID
|
||||
} else {
|
||||
log.Infof("Using backend project ID: %s (recommended)", responseProjectID)
|
||||
finalProjectID = responseProjectID
|
||||
}
|
||||
} else {
|
||||
// Pro users: keep requested project ID (original behavior)
|
||||
log.Warnf("Gemini onboarding returned project %s instead of requested %s; keeping requested project ID.", responseProjectID, projectID)
|
||||
}
|
||||
} else {
|
||||
finalProjectID = responseProjectID
|
||||
}
|
||||
}
|
||||
|
||||
storage.ProjectID = strings.TrimSpace(finalProjectID)
|
||||
if storage.ProjectID == "" {
|
||||
storage.ProjectID = strings.TrimSpace(projectID)
|
||||
}
|
||||
if storage.ProjectID == "" {
|
||||
return fmt.Errorf("onboard user completed without project id")
|
||||
}
|
||||
log.Infof("Onboarding complete. Using Project ID: %s", storage.ProjectID)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Println("Onboarding in progress, waiting 5 seconds...")
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func callGeminiCLI(ctx context.Context, httpClient *http.Client, endpoint string, body any, result any) error {
|
||||
url := fmt.Sprintf("%s/%s:%s", geminiCLIEndpoint, geminiCLIVersion, endpoint)
|
||||
if strings.HasPrefix(endpoint, "operations/") {
|
||||
url = fmt.Sprintf("%s/%s", geminiCLIEndpoint, endpoint)
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
if body != nil {
|
||||
rawBody, errMarshal := json.Marshal(body)
|
||||
if errMarshal != nil {
|
||||
return fmt.Errorf("marshal request body: %w", errMarshal)
|
||||
}
|
||||
reader = bytes.NewReader(rawBody)
|
||||
}
|
||||
|
||||
req, errRequest := http.NewRequestWithContext(ctx, http.MethodPost, url, reader)
|
||||
if errRequest != nil {
|
||||
return fmt.Errorf("create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||
req.Header.Set("X-Goog-Api-Client", geminiCLIApiClient)
|
||||
req.Header.Set("Client-Metadata", geminiCLIClientMetadata)
|
||||
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return fmt.Errorf("execute request: %w", errDo)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("response body close error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("api request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||
}
|
||||
|
||||
if result == nil {
|
||||
_, _ = io.Copy(io.Discard, resp.Body)
|
||||
return nil
|
||||
}
|
||||
|
||||
if errDecode := json.NewDecoder(resp.Body).Decode(result); errDecode != nil {
|
||||
return fmt.Errorf("decode response body: %w", errDecode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchGCPProjects(ctx context.Context, httpClient *http.Client) ([]interfaces.GCPProjectProjects, error) {
|
||||
req, errRequest := http.NewRequestWithContext(ctx, http.MethodGet, "https://cloudresourcemanager.googleapis.com/v1/projects", nil)
|
||||
if errRequest != nil {
|
||||
return nil, fmt.Errorf("could not create project list request: %w", errRequest)
|
||||
}
|
||||
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return nil, fmt.Errorf("failed to execute project list request: %w", errDo)
|
||||
}
|
||||
defer func() {
|
||||
if errClose := resp.Body.Close(); errClose != nil {
|
||||
log.Errorf("response body close error: %v", errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("project list request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(bodyBytes)))
|
||||
}
|
||||
|
||||
var projects interfaces.GCPProject
|
||||
if errDecode := json.NewDecoder(resp.Body).Decode(&projects); errDecode != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal project list: %w", errDecode)
|
||||
}
|
||||
|
||||
return projects.Projects, nil
|
||||
}
|
||||
|
||||
// promptForProjectSelection prints available projects and returns the chosen project ID.
|
||||
func promptForProjectSelection(projects []interfaces.GCPProjectProjects, presetID string, promptFn func(string) (string, error)) string {
|
||||
trimmedPreset := strings.TrimSpace(presetID)
|
||||
if len(projects) == 0 {
|
||||
if trimmedPreset != "" {
|
||||
return trimmedPreset
|
||||
}
|
||||
fmt.Println("No Google Cloud projects are available for selection.")
|
||||
return ""
|
||||
}
|
||||
|
||||
fmt.Println("Available Google Cloud projects:")
|
||||
defaultIndex := 0
|
||||
for idx, project := range projects {
|
||||
fmt.Printf("[%d] %s (%s)\n", idx+1, project.ProjectID, project.Name)
|
||||
if trimmedPreset != "" && project.ProjectID == trimmedPreset {
|
||||
defaultIndex = idx
|
||||
}
|
||||
}
|
||||
fmt.Println("Type 'ALL' to onboard every listed project.")
|
||||
|
||||
defaultID := projects[defaultIndex].ProjectID
|
||||
|
||||
if trimmedPreset != "" {
|
||||
if strings.EqualFold(trimmedPreset, "ALL") {
|
||||
return "ALL"
|
||||
}
|
||||
for _, project := range projects {
|
||||
if project.ProjectID == trimmedPreset {
|
||||
return trimmedPreset
|
||||
}
|
||||
}
|
||||
log.Warnf("Provided project ID %s not found in available projects; please choose from the list.", trimmedPreset)
|
||||
}
|
||||
|
||||
for {
|
||||
promptMsg := fmt.Sprintf("Enter project ID [%s] or ALL: ", defaultID)
|
||||
answer, errPrompt := promptFn(promptMsg)
|
||||
if errPrompt != nil {
|
||||
log.Errorf("Project selection prompt failed: %v", errPrompt)
|
||||
return defaultID
|
||||
}
|
||||
answer = strings.TrimSpace(answer)
|
||||
if strings.EqualFold(answer, "ALL") {
|
||||
return "ALL"
|
||||
}
|
||||
if answer == "" {
|
||||
return defaultID
|
||||
}
|
||||
|
||||
for _, project := range projects {
|
||||
if project.ProjectID == answer {
|
||||
return project.ProjectID
|
||||
}
|
||||
}
|
||||
|
||||
if idx, errAtoi := strconv.Atoi(answer); errAtoi == nil {
|
||||
if idx >= 1 && idx <= len(projects) {
|
||||
return projects[idx-1].ProjectID
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Invalid selection, enter a project ID or a number from the list.")
|
||||
}
|
||||
}
|
||||
|
||||
func resolveProjectSelections(selection string, projects []interfaces.GCPProjectProjects) ([]string, error) {
|
||||
trimmed := strings.TrimSpace(selection)
|
||||
if trimmed == "" {
|
||||
return nil, nil
|
||||
}
|
||||
available := make(map[string]struct{}, len(projects))
|
||||
ordered := make([]string, 0, len(projects))
|
||||
for _, project := range projects {
|
||||
id := strings.TrimSpace(project.ProjectID)
|
||||
if id == "" {
|
||||
continue
|
||||
}
|
||||
if _, exists := available[id]; exists {
|
||||
continue
|
||||
}
|
||||
available[id] = struct{}{}
|
||||
ordered = append(ordered, id)
|
||||
}
|
||||
if strings.EqualFold(trimmed, "ALL") {
|
||||
if len(ordered) == 0 {
|
||||
return nil, fmt.Errorf("no projects available for ALL selection")
|
||||
}
|
||||
return append([]string(nil), ordered...), nil
|
||||
}
|
||||
parts := strings.Split(trimmed, ",")
|
||||
selections := make([]string, 0, len(parts))
|
||||
seen := make(map[string]struct{}, len(parts))
|
||||
for _, part := range parts {
|
||||
id := strings.TrimSpace(part)
|
||||
if id == "" {
|
||||
continue
|
||||
}
|
||||
if _, dup := seen[id]; dup {
|
||||
continue
|
||||
}
|
||||
if len(available) > 0 {
|
||||
if _, ok := available[id]; !ok {
|
||||
return nil, fmt.Errorf("project %s not found in available projects", id)
|
||||
}
|
||||
}
|
||||
seen[id] = struct{}{}
|
||||
selections = append(selections, id)
|
||||
}
|
||||
return selections, nil
|
||||
}
|
||||
|
||||
func defaultProjectPrompt() func(string) (string, error) {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
return func(prompt string) (string, error) {
|
||||
fmt.Print(prompt)
|
||||
line, errRead := reader.ReadString('\n')
|
||||
if errRead != nil {
|
||||
if errors.Is(errRead, io.EOF) {
|
||||
return strings.TrimSpace(line), nil
|
||||
}
|
||||
return "", errRead
|
||||
}
|
||||
return strings.TrimSpace(line), nil
|
||||
}
|
||||
}
|
||||
|
||||
func showProjectSelectionHelp(email string, projects []interfaces.GCPProjectProjects) {
|
||||
if email != "" {
|
||||
log.Infof("Your account %s needs to specify a project ID.", email)
|
||||
} else {
|
||||
log.Info("You need to specify a project ID.")
|
||||
}
|
||||
|
||||
_, savedPath, err := manager.Login(context.Background(), "gemini", cfg, authOpts)
|
||||
if err != nil {
|
||||
var selectionErr *sdkAuth.ProjectSelectionError
|
||||
if errors.As(err, &selectionErr) {
|
||||
fmt.Println(selectionErr.Error())
|
||||
projects := selectionErr.ProjectsDisplay()
|
||||
if len(projects) > 0 {
|
||||
fmt.Println("========================================================================")
|
||||
for _, p := range projects {
|
||||
@@ -53,17 +544,90 @@ func DoLogin(cfg *config.Config, projectID string, options *LoginOptions) {
|
||||
fmt.Printf("Project Name: %s\n", p.Name)
|
||||
fmt.Println("------------------------------------------------------------------------")
|
||||
}
|
||||
fmt.Println("Please rerun the login command with --project_id <project_id>.")
|
||||
} else {
|
||||
fmt.Println("No active projects were returned for this account.")
|
||||
}
|
||||
return
|
||||
|
||||
fmt.Printf("Please run this command to login again with a specific project:\n\n%s --login --project_id <project_id>\n", os.Args[0])
|
||||
}
|
||||
log.Fatalf("Gemini authentication failed: %v", err)
|
||||
|
||||
func checkCloudAPIIsEnabled(ctx context.Context, httpClient *http.Client, projectID string) (bool, error) {
|
||||
serviceUsageURL := "https://serviceusage.googleapis.com"
|
||||
requiredServices := []string{
|
||||
// "geminicloudassist.googleapis.com", // Gemini Cloud Assist API
|
||||
"cloudaicompanion.googleapis.com", // Gemini for Google Cloud API
|
||||
}
|
||||
for _, service := range requiredServices {
|
||||
checkUrl := fmt.Sprintf("%s/v1/projects/%s/services/%s", serviceUsageURL, projectID, service)
|
||||
req, errRequest := http.NewRequestWithContext(ctx, http.MethodGet, checkUrl, nil)
|
||||
if errRequest != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||
resp, errDo := httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return false, fmt.Errorf("failed to execute request: %w", errDo)
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
if gjson.GetBytes(bodyBytes, "state").String() == "ENABLED" {
|
||||
_ = resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
|
||||
enableUrl := fmt.Sprintf("%s/v1/projects/%s/services/%s:enable", serviceUsageURL, projectID, service)
|
||||
req, errRequest = http.NewRequestWithContext(ctx, http.MethodPost, enableUrl, strings.NewReader("{}"))
|
||||
if errRequest != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", errRequest)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", geminiCLIUserAgent)
|
||||
resp, errDo = httpClient.Do(req)
|
||||
if errDo != nil {
|
||||
return false, fmt.Errorf("failed to execute request: %w", errDo)
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
errMessage := string(bodyBytes)
|
||||
errMessageResult := gjson.GetBytes(bodyBytes, "error.message")
|
||||
if errMessageResult.Exists() {
|
||||
errMessage = errMessageResult.String()
|
||||
}
|
||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusCreated {
|
||||
_ = resp.Body.Close()
|
||||
continue
|
||||
} else if resp.StatusCode == http.StatusBadRequest {
|
||||
_ = resp.Body.Close()
|
||||
if strings.Contains(strings.ToLower(errMessage), "already enabled") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
return false, fmt.Errorf("project activation required: %s", errMessage)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func updateAuthRecord(record *cliproxyauth.Auth, storage *gemini.GeminiTokenStorage) {
|
||||
if record == nil || storage == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if savedPath != "" {
|
||||
log.Infof("Authentication saved to %s", savedPath)
|
||||
}
|
||||
finalName := gemini.CredentialFileName(storage.Email, storage.ProjectID, false)
|
||||
|
||||
log.Info("Gemini authentication successful!")
|
||||
if record.Metadata == nil {
|
||||
record.Metadata = make(map[string]any)
|
||||
}
|
||||
record.Metadata["email"] = storage.Email
|
||||
record.Metadata["project_id"] = storage.ProjectID
|
||||
record.Metadata["auto"] = storage.Auto
|
||||
record.Metadata["checked"] = storage.Checked
|
||||
|
||||
record.ID = finalName
|
||||
record.FileName = finalName
|
||||
record.Storage = storage
|
||||
}
|
||||
|
||||
@@ -19,6 +19,9 @@ type LoginOptions struct {
|
||||
// NoBrowser indicates whether to skip opening the browser automatically.
|
||||
NoBrowser bool
|
||||
|
||||
// CallbackPort overrides the local OAuth callback port when set (>0).
|
||||
CallbackPort int
|
||||
|
||||
// Prompt allows the caller to provide interactive input when needed.
|
||||
Prompt func(prompt string) (string, error)
|
||||
}
|
||||
@@ -35,12 +38,18 @@ func DoCodexLogin(cfg *config.Config, options *LoginOptions) {
|
||||
options = &LoginOptions{}
|
||||
}
|
||||
|
||||
promptFn := options.Prompt
|
||||
if promptFn == nil {
|
||||
promptFn = defaultProjectPrompt()
|
||||
}
|
||||
|
||||
manager := newAuthManager()
|
||||
|
||||
authOpts := &sdkAuth.LoginOptions{
|
||||
NoBrowser: options.NoBrowser,
|
||||
CallbackPort: options.CallbackPort,
|
||||
Metadata: map[string]string{},
|
||||
Prompt: options.Prompt,
|
||||
Prompt: promptFn,
|
||||
}
|
||||
|
||||
_, savedPath, err := manager.Login(context.Background(), "codex", cfg, authOpts)
|
||||
|
||||
@@ -37,6 +37,7 @@ func DoQwenLogin(cfg *config.Config, options *LoginOptions) {
|
||||
|
||||
authOpts := &sdkAuth.LoginOptions{
|
||||
NoBrowser: options.NoBrowser,
|
||||
CallbackPort: options.CallbackPort,
|
||||
Metadata: map[string]string{},
|
||||
Prompt: promptFn,
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user