mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-03 04:50:52 +08:00
refactor(auth): simplify file handling logic and remove redundant comparison functions
feat(auth): fetch and update Antigravity project ID from metadata during filestore operations - Added support to retrieve and update `project_id` using the access token if missing in metadata. - Integrated HTTP client to fetch project ID dynamically. - Enhanced metadata persistence logic.
This commit is contained in:
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
@@ -1104,12 +1105,49 @@ func (e *AntigravityExecutor) refreshToken(ctx context.Context, auth *cliproxyau
|
||||
auth.Metadata["refresh_token"] = tokenResp.RefreshToken
|
||||
}
|
||||
auth.Metadata["expires_in"] = tokenResp.ExpiresIn
|
||||
auth.Metadata["timestamp"] = time.Now().UnixMilli()
|
||||
auth.Metadata["expired"] = time.Now().Add(time.Duration(tokenResp.ExpiresIn) * time.Second).Format(time.RFC3339)
|
||||
now := time.Now()
|
||||
auth.Metadata["timestamp"] = now.UnixMilli()
|
||||
auth.Metadata["expired"] = now.Add(time.Duration(tokenResp.ExpiresIn) * time.Second).Format(time.RFC3339)
|
||||
auth.Metadata["type"] = antigravityAuthType
|
||||
if errProject := e.ensureAntigravityProjectID(ctx, auth, tokenResp.AccessToken); errProject != nil {
|
||||
log.Warnf("antigravity executor: ensure project id failed: %v", errProject)
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func (e *AntigravityExecutor) ensureAntigravityProjectID(ctx context.Context, auth *cliproxyauth.Auth, accessToken string) error {
|
||||
if auth == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if auth.Metadata["project_id"] != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
token := strings.TrimSpace(accessToken)
|
||||
if token == "" {
|
||||
token = metaStringValue(auth.Metadata, "access_token")
|
||||
}
|
||||
if token == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||
projectID, errFetch := sdkAuth.FetchAntigravityProjectID(ctx, token, httpClient)
|
||||
if errFetch != nil {
|
||||
return errFetch
|
||||
}
|
||||
if strings.TrimSpace(projectID) == "" {
|
||||
return nil
|
||||
}
|
||||
if auth.Metadata == nil {
|
||||
auth.Metadata = make(map[string]any)
|
||||
}
|
||||
auth.Metadata["project_id"] = strings.TrimSpace(projectID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *AntigravityExecutor) buildRequest(ctx context.Context, auth *cliproxyauth.Auth, token, modelName string, payload []byte, stream bool, alt, baseURL string) (*http.Request, error) {
|
||||
if token == "" {
|
||||
return nil, statusErr{code: http.StatusUnauthorized, msg: "missing access token"}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -71,21 +72,26 @@ func (s *FileTokenStore) Save(ctx context.Context, auth *cliproxyauth.Auth) (str
|
||||
if errMarshal != nil {
|
||||
return "", fmt.Errorf("auth filestore: marshal metadata failed: %w", errMarshal)
|
||||
}
|
||||
if existing, errRead := os.ReadFile(path); errRead == nil {
|
||||
if _, errRead := os.ReadFile(path); errRead == nil {
|
||||
// Use metadataEqualIgnoringTimestamps to skip writes when only timestamp fields change.
|
||||
// This prevents the token refresh loop caused by timestamp/expired/expires_in changes.
|
||||
if metadataEqualIgnoringTimestamps(existing, raw) {
|
||||
return path, nil
|
||||
file, errOpen := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0o600)
|
||||
if errOpen != nil {
|
||||
return "", fmt.Errorf("auth filestore: open existing failed: %w", errOpen)
|
||||
}
|
||||
} else if errRead != nil && !os.IsNotExist(errRead) {
|
||||
if _, errWrite := file.Write(raw); errWrite != nil {
|
||||
_ = file.Close()
|
||||
return "", fmt.Errorf("auth filestore: write existing failed: %w", errWrite)
|
||||
}
|
||||
if errClose := file.Close(); errClose != nil {
|
||||
return "", fmt.Errorf("auth filestore: close existing failed: %w", errClose)
|
||||
}
|
||||
return path, nil
|
||||
} else if !os.IsNotExist(errRead) {
|
||||
return "", fmt.Errorf("auth filestore: read existing failed: %w", errRead)
|
||||
}
|
||||
tmp := path + ".tmp"
|
||||
if errWrite := os.WriteFile(tmp, raw, 0o600); errWrite != nil {
|
||||
return "", fmt.Errorf("auth filestore: write temp failed: %w", errWrite)
|
||||
}
|
||||
if errRename := os.Rename(tmp, path); errRename != nil {
|
||||
return "", fmt.Errorf("auth filestore: rename failed: %w", errRename)
|
||||
if errWrite := os.WriteFile(path, raw, 0o600); errWrite != nil {
|
||||
return "", fmt.Errorf("auth filestore: write file failed: %w", errWrite)
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("auth filestore: nothing to persist for %s", auth.ID)
|
||||
@@ -178,6 +184,30 @@ func (s *FileTokenStore) readAuthFile(path, baseDir string) (*cliproxyauth.Auth,
|
||||
if provider == "" {
|
||||
provider = "unknown"
|
||||
}
|
||||
if provider == "antigravity" {
|
||||
projectID := ""
|
||||
if pid, ok := metadata["project_id"].(string); ok {
|
||||
projectID = strings.TrimSpace(pid)
|
||||
}
|
||||
if projectID == "" {
|
||||
accessToken := ""
|
||||
if token, ok := metadata["access_token"].(string); ok {
|
||||
accessToken = strings.TrimSpace(token)
|
||||
}
|
||||
if accessToken != "" {
|
||||
fetchedProjectID, errFetch := FetchAntigravityProjectID(context.Background(), accessToken, http.DefaultClient)
|
||||
if errFetch == nil && strings.TrimSpace(fetchedProjectID) != "" {
|
||||
metadata["project_id"] = strings.TrimSpace(fetchedProjectID)
|
||||
if raw, errMarshal := json.Marshal(metadata); errMarshal == nil {
|
||||
if file, errOpen := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0o600); errOpen == nil {
|
||||
_, _ = file.Write(raw)
|
||||
_ = file.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat file: %w", err)
|
||||
@@ -265,93 +295,3 @@ func (s *FileTokenStore) baseDirSnapshot() string {
|
||||
defer s.dirLock.RUnlock()
|
||||
return s.baseDir
|
||||
}
|
||||
|
||||
// DEPRECATED: Use metadataEqualIgnoringTimestamps for comparing auth metadata.
|
||||
// This function is kept for backward compatibility but can cause refresh loops.
|
||||
func jsonEqual(a, b []byte) bool {
|
||||
var objA any
|
||||
var objB any
|
||||
if err := json.Unmarshal(a, &objA); err != nil {
|
||||
return false
|
||||
}
|
||||
if err := json.Unmarshal(b, &objB); err != nil {
|
||||
return false
|
||||
}
|
||||
return deepEqualJSON(objA, objB)
|
||||
}
|
||||
|
||||
// metadataEqualIgnoringTimestamps compares two metadata JSON blobs,
|
||||
// ignoring fields that change on every refresh but don't affect functionality.
|
||||
// This prevents unnecessary file writes that would trigger watcher events and
|
||||
// create refresh loops.
|
||||
func metadataEqualIgnoringTimestamps(a, b []byte) bool {
|
||||
var objA, objB map[string]any
|
||||
if err := json.Unmarshal(a, &objA); err != nil {
|
||||
return false
|
||||
}
|
||||
if err := json.Unmarshal(b, &objB); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Fields to ignore: these change on every refresh but don't affect authentication logic.
|
||||
// - timestamp, expired, expires_in, last_refresh: time-related fields that change on refresh
|
||||
// - access_token: Google OAuth returns a new access_token on each refresh, this is expected
|
||||
// and shouldn't trigger file writes (the new token will be fetched again when needed)
|
||||
ignoredFields := []string{"timestamp", "expired", "expires_in", "last_refresh", "access_token"}
|
||||
for _, field := range ignoredFields {
|
||||
delete(objA, field)
|
||||
delete(objB, field)
|
||||
}
|
||||
|
||||
return deepEqualJSON(objA, objB)
|
||||
}
|
||||
|
||||
func deepEqualJSON(a, b any) bool {
|
||||
switch valA := a.(type) {
|
||||
case map[string]any:
|
||||
valB, ok := b.(map[string]any)
|
||||
if !ok || len(valA) != len(valB) {
|
||||
return false
|
||||
}
|
||||
for key, subA := range valA {
|
||||
subB, ok1 := valB[key]
|
||||
if !ok1 || !deepEqualJSON(subA, subB) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case []any:
|
||||
sliceB, ok := b.([]any)
|
||||
if !ok || len(valA) != len(sliceB) {
|
||||
return false
|
||||
}
|
||||
for i := range valA {
|
||||
if !deepEqualJSON(valA[i], sliceB[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case float64:
|
||||
valB, ok := b.(float64)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return valA == valB
|
||||
case string:
|
||||
valB, ok := b.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return valA == valB
|
||||
case bool:
|
||||
valB, ok := b.(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return valA == valB
|
||||
case nil:
|
||||
return b == nil
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user