mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-18 20:30:51 +08:00
v6 version first commit
This commit is contained in:
32
sdk/cliproxy/auth/errors.go
Normal file
32
sdk/cliproxy/auth/errors.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package auth
|
||||
|
||||
// Error describes an authentication related failure in a provider agnostic format.
|
||||
type Error struct {
|
||||
// Code is a short machine readable identifier.
|
||||
Code string `json:"code,omitempty"`
|
||||
// Message is a human readable description of the failure.
|
||||
Message string `json:"message"`
|
||||
// Retryable indicates whether a retry might fix the issue automatically.
|
||||
Retryable bool `json:"retryable"`
|
||||
// HTTPStatus optionally records an HTTP-like status code for the error.
|
||||
HTTPStatus int `json:"http_status,omitempty"`
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *Error) Error() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
if e.Code == "" {
|
||||
return e.Message
|
||||
}
|
||||
return e.Code + ": " + e.Message
|
||||
}
|
||||
|
||||
// StatusCode implements optional status accessor for manager decision making.
|
||||
func (e *Error) StatusCode() int {
|
||||
if e == nil {
|
||||
return 0
|
||||
}
|
||||
return e.HTTPStatus
|
||||
}
|
||||
247
sdk/cliproxy/auth/filestore.go
Normal file
247
sdk/cliproxy/auth/filestore.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileStore implements Store backed by JSON files in a directory.
|
||||
type FileStore struct {
|
||||
dir string
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewFileStore builds a file-backed store rooted at dir.
|
||||
func NewFileStore(dir string) *FileStore {
|
||||
return &FileStore{dir: dir}
|
||||
}
|
||||
|
||||
// List enumerates all auth JSON files under the store directory.
|
||||
func (s *FileStore) List(ctx context.Context) ([]*Auth, error) {
|
||||
if s.dir == "" {
|
||||
return nil, fmt.Errorf("auth filestore: directory not configured")
|
||||
}
|
||||
entries := make([]*Auth, 0)
|
||||
err := filepath.WalkDir(s.dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if !strings.HasSuffix(strings.ToLower(d.Name()), ".json") {
|
||||
return nil
|
||||
}
|
||||
auth, err := s.readFile(path)
|
||||
if err != nil {
|
||||
// Record error but keep scanning to surface remaining auths.
|
||||
return nil
|
||||
}
|
||||
if auth != nil {
|
||||
entries = append(entries, auth)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Save writes the auth metadata back to its source file location.
|
||||
func (s *FileStore) Save(ctx context.Context, auth *Auth) error {
|
||||
if auth == nil {
|
||||
return fmt.Errorf("auth filestore: auth is nil")
|
||||
}
|
||||
path := s.resolvePath(auth)
|
||||
if path == "" {
|
||||
return fmt.Errorf("auth filestore: missing file path attribute for %s", auth.ID)
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
|
||||
return fmt.Errorf("auth filestore: create dir failed: %w", err)
|
||||
}
|
||||
raw, err := json.Marshal(auth.Metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("auth filestore: marshal metadata failed: %w", err)
|
||||
}
|
||||
if existing, err := os.ReadFile(path); err == nil {
|
||||
if jsonEqual(existing, raw) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
tmp := path + ".tmp"
|
||||
if err = os.WriteFile(tmp, raw, 0o600); err != nil {
|
||||
return fmt.Errorf("auth filestore: write temp failed: %w", err)
|
||||
}
|
||||
if err = os.Rename(tmp, path); err != nil {
|
||||
return fmt.Errorf("auth filestore: rename failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func jsonEqual(a, b []byte) bool {
|
||||
var objA any
|
||||
var objB any
|
||||
if err := json.Unmarshal(a, &objA); err != nil {
|
||||
return false
|
||||
}
|
||||
if err := json.Unmarshal(b, &objB); err != nil {
|
||||
return false
|
||||
}
|
||||
return deepEqualJSON(objA, objB)
|
||||
}
|
||||
|
||||
func deepEqualJSON(a, b any) bool {
|
||||
switch valA := a.(type) {
|
||||
case map[string]any:
|
||||
valB, ok := b.(map[string]any)
|
||||
if !ok || len(valA) != len(valB) {
|
||||
return false
|
||||
}
|
||||
for key, subA := range valA {
|
||||
subB, ok := valB[key]
|
||||
if !ok || !deepEqualJSON(subA, subB) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case []any:
|
||||
sliceB, ok := b.([]any)
|
||||
if !ok || len(valA) != len(sliceB) {
|
||||
return false
|
||||
}
|
||||
for i := range valA {
|
||||
if !deepEqualJSON(valA[i], sliceB[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case float64:
|
||||
valB, ok := b.(float64)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return valA == valB
|
||||
case string:
|
||||
valB, ok := b.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return valA == valB
|
||||
case bool:
|
||||
valB, ok := b.(bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return valA == valB
|
||||
case nil:
|
||||
return b == nil
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Delete removes the auth file.
|
||||
func (s *FileStore) Delete(ctx context.Context, id string) error {
|
||||
if id == "" {
|
||||
return fmt.Errorf("auth filestore: id is empty")
|
||||
}
|
||||
path := filepath.Join(s.dir, id)
|
||||
if strings.ContainsRune(id, os.PathSeparator) {
|
||||
path = id
|
||||
}
|
||||
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("auth filestore: delete failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileStore) readFile(path string) (*Auth, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read file: %w", err)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
metadata := make(map[string]any)
|
||||
if err = json.Unmarshal(data, &metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal auth json: %w", err)
|
||||
}
|
||||
provider, _ := metadata["type"].(string)
|
||||
if provider == "" {
|
||||
provider = "unknown"
|
||||
}
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat file: %w", err)
|
||||
}
|
||||
id := s.idFor(path)
|
||||
auth := &Auth{
|
||||
ID: id,
|
||||
Provider: provider,
|
||||
Label: s.labelFor(metadata),
|
||||
Status: StatusActive,
|
||||
Attributes: map[string]string{"path": path},
|
||||
Metadata: metadata,
|
||||
CreatedAt: info.ModTime(),
|
||||
UpdatedAt: info.ModTime(),
|
||||
LastRefreshedAt: time.Time{},
|
||||
NextRefreshAfter: time.Time{},
|
||||
}
|
||||
if email, ok := metadata["email"].(string); ok && email != "" {
|
||||
auth.Attributes["email"] = email
|
||||
}
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) idFor(path string) string {
|
||||
rel, err := filepath.Rel(s.dir, path)
|
||||
if err != nil {
|
||||
return path
|
||||
}
|
||||
return rel
|
||||
}
|
||||
|
||||
func (s *FileStore) resolvePath(auth *Auth) string {
|
||||
if auth == nil {
|
||||
return ""
|
||||
}
|
||||
if auth.Attributes != nil {
|
||||
if p := auth.Attributes["path"]; p != "" {
|
||||
return p
|
||||
}
|
||||
}
|
||||
if filepath.IsAbs(auth.ID) {
|
||||
return auth.ID
|
||||
}
|
||||
if auth.ID == "" {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(s.dir, auth.ID)
|
||||
}
|
||||
|
||||
func (s *FileStore) labelFor(metadata map[string]any) string {
|
||||
if metadata == nil {
|
||||
return ""
|
||||
}
|
||||
if v, ok := metadata["label"].(string); ok && v != "" {
|
||||
return v
|
||||
}
|
||||
if v, ok := metadata["email"].(string); ok && v != "" {
|
||||
return v
|
||||
}
|
||||
if project, ok := metadata["project_id"].(string); ok && project != "" {
|
||||
return project
|
||||
}
|
||||
return ""
|
||||
}
|
||||
908
sdk/cliproxy/auth/manager.go
Normal file
908
sdk/cliproxy/auth/manager.go
Normal file
@@ -0,0 +1,908 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/util"
|
||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProviderExecutor defines the contract required by Manager to execute provider calls.
|
||||
type ProviderExecutor interface {
|
||||
// Identifier returns the provider key handled by this executor.
|
||||
Identifier() string
|
||||
// Execute handles non-streaming execution and returns the provider response payload.
|
||||
Execute(ctx context.Context, auth *Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error)
|
||||
// ExecuteStream handles streaming execution and returns a channel of provider chunks.
|
||||
ExecuteStream(ctx context.Context, auth *Auth, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error)
|
||||
// Refresh attempts to refresh provider credentials and returns the updated auth state.
|
||||
Refresh(ctx context.Context, auth *Auth) (*Auth, error)
|
||||
}
|
||||
|
||||
// RefreshEvaluator allows runtime state to override refresh decisions.
|
||||
type RefreshEvaluator interface {
|
||||
ShouldRefresh(now time.Time, auth *Auth) bool
|
||||
}
|
||||
|
||||
const (
|
||||
refreshCheckInterval = 5 * time.Second
|
||||
refreshPendingBackoff = time.Minute
|
||||
refreshFailureBackoff = 5 * time.Minute
|
||||
)
|
||||
|
||||
// Result captures execution outcome used to adjust auth state.
|
||||
type Result struct {
|
||||
// AuthID references the auth that produced this result.
|
||||
AuthID string
|
||||
// Provider is copied for convenience when emitting hooks.
|
||||
Provider string
|
||||
// Model is the upstream model identifier used for the request.
|
||||
Model string
|
||||
// Success marks whether the execution succeeded.
|
||||
Success bool
|
||||
// Error describes the failure when Success is false.
|
||||
Error *Error
|
||||
}
|
||||
|
||||
// Selector chooses an auth candidate for execution.
|
||||
type Selector interface {
|
||||
Pick(ctx context.Context, provider, model string, opts cliproxyexecutor.Options, auths []*Auth) (*Auth, error)
|
||||
}
|
||||
|
||||
// Hook captures lifecycle callbacks for observing auth changes.
|
||||
type Hook interface {
|
||||
// OnAuthRegistered fires when a new auth is registered.
|
||||
OnAuthRegistered(ctx context.Context, auth *Auth)
|
||||
// OnAuthUpdated fires when an existing auth changes state.
|
||||
OnAuthUpdated(ctx context.Context, auth *Auth)
|
||||
// OnResult fires when execution result is recorded.
|
||||
OnResult(ctx context.Context, result Result)
|
||||
}
|
||||
|
||||
// NoopHook provides optional hook defaults.
|
||||
type NoopHook struct{}
|
||||
|
||||
// OnAuthRegistered implements Hook.
|
||||
func (NoopHook) OnAuthRegistered(context.Context, *Auth) {}
|
||||
|
||||
// OnAuthUpdated implements Hook.
|
||||
func (NoopHook) OnAuthUpdated(context.Context, *Auth) {}
|
||||
|
||||
// OnResult implements Hook.
|
||||
func (NoopHook) OnResult(context.Context, Result) {}
|
||||
|
||||
// Manager orchestrates auth lifecycle, selection, execution, and persistence.
|
||||
type Manager struct {
|
||||
store Store
|
||||
executors map[string]ProviderExecutor
|
||||
selector Selector
|
||||
hook Hook
|
||||
mu sync.RWMutex
|
||||
auths map[string]*Auth
|
||||
// providerOffsets tracks per-model provider rotation state for multi-provider routing.
|
||||
providerOffsets map[string]int
|
||||
|
||||
// Optional HTTP RoundTripper provider injected by host.
|
||||
rtProvider RoundTripperProvider
|
||||
|
||||
// Auto refresh state
|
||||
refreshCancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewManager constructs a manager with optional custom selector and hook.
|
||||
func NewManager(store Store, selector Selector, hook Hook) *Manager {
|
||||
if selector == nil {
|
||||
selector = &RoundRobinSelector{}
|
||||
}
|
||||
if hook == nil {
|
||||
hook = NoopHook{}
|
||||
}
|
||||
return &Manager{
|
||||
store: store,
|
||||
executors: make(map[string]ProviderExecutor),
|
||||
selector: selector,
|
||||
hook: hook,
|
||||
auths: make(map[string]*Auth),
|
||||
providerOffsets: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
// SetStore swaps the underlying persistence store.
|
||||
func (m *Manager) SetStore(store Store) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.store = store
|
||||
}
|
||||
|
||||
// SetRoundTripperProvider register a provider that returns a per-auth RoundTripper.
|
||||
func (m *Manager) SetRoundTripperProvider(p RoundTripperProvider) {
|
||||
m.mu.Lock()
|
||||
m.rtProvider = p
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
// RegisterExecutor registers a provider executor with the manager.
|
||||
func (m *Manager) RegisterExecutor(executor ProviderExecutor) {
|
||||
if executor == nil {
|
||||
return
|
||||
}
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.executors[executor.Identifier()] = executor
|
||||
}
|
||||
|
||||
// Register inserts a new auth entry into the manager.
|
||||
func (m *Manager) Register(ctx context.Context, auth *Auth) (*Auth, error) {
|
||||
if auth == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if auth.ID == "" {
|
||||
auth.ID = uuid.NewString()
|
||||
}
|
||||
m.mu.Lock()
|
||||
m.auths[auth.ID] = auth.Clone()
|
||||
m.mu.Unlock()
|
||||
_ = m.persist(ctx, auth)
|
||||
m.hook.OnAuthRegistered(ctx, auth.Clone())
|
||||
return auth.Clone(), nil
|
||||
}
|
||||
|
||||
// Update replaces an existing auth entry and notifies hooks.
|
||||
func (m *Manager) Update(ctx context.Context, auth *Auth) (*Auth, error) {
|
||||
if auth == nil || auth.ID == "" {
|
||||
return nil, nil
|
||||
}
|
||||
m.mu.Lock()
|
||||
m.auths[auth.ID] = auth.Clone()
|
||||
m.mu.Unlock()
|
||||
_ = m.persist(ctx, auth)
|
||||
m.hook.OnAuthUpdated(ctx, auth.Clone())
|
||||
return auth.Clone(), nil
|
||||
}
|
||||
|
||||
// Load resets manager state from the backing store.
|
||||
func (m *Manager) Load(ctx context.Context) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if m.store == nil {
|
||||
return nil
|
||||
}
|
||||
items, err := m.store.List(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.auths = make(map[string]*Auth, len(items))
|
||||
for _, auth := range items {
|
||||
if auth == nil || auth.ID == "" {
|
||||
continue
|
||||
}
|
||||
m.auths[auth.ID] = auth.Clone()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Execute performs a non-streaming execution using the configured selector and executor.
|
||||
// It supports multiple providers for the same model and round-robins the starting provider per model.
|
||||
func (m *Manager) Execute(ctx context.Context, providers []string, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||
normalized := m.normalizeProviders(providers)
|
||||
if len(normalized) == 0 {
|
||||
return cliproxyexecutor.Response{}, &Error{Code: "provider_not_found", Message: "no provider supplied"}
|
||||
}
|
||||
rotated := m.rotateProviders(req.Model, normalized)
|
||||
defer m.advanceProviderCursor(req.Model, normalized)
|
||||
|
||||
var lastErr error
|
||||
for _, provider := range rotated {
|
||||
resp, errExec := m.executeWithProvider(ctx, provider, req, opts)
|
||||
if errExec == nil {
|
||||
return resp, nil
|
||||
}
|
||||
lastErr = errExec
|
||||
}
|
||||
if lastErr != nil {
|
||||
return cliproxyexecutor.Response{}, lastErr
|
||||
}
|
||||
return cliproxyexecutor.Response{}, &Error{Code: "auth_not_found", Message: "no auth available"}
|
||||
}
|
||||
|
||||
// ExecuteStream performs a streaming execution using the configured selector and executor.
|
||||
// It supports multiple providers for the same model and round-robins the starting provider per model.
|
||||
func (m *Manager) ExecuteStream(ctx context.Context, providers []string, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error) {
|
||||
normalized := m.normalizeProviders(providers)
|
||||
if len(normalized) == 0 {
|
||||
return nil, &Error{Code: "provider_not_found", Message: "no provider supplied"}
|
||||
}
|
||||
rotated := m.rotateProviders(req.Model, normalized)
|
||||
defer m.advanceProviderCursor(req.Model, normalized)
|
||||
|
||||
var lastErr error
|
||||
for _, provider := range rotated {
|
||||
chunks, errStream := m.executeStreamWithProvider(ctx, provider, req, opts)
|
||||
if errStream == nil {
|
||||
return chunks, nil
|
||||
}
|
||||
lastErr = errStream
|
||||
}
|
||||
if lastErr != nil {
|
||||
return nil, lastErr
|
||||
}
|
||||
return nil, &Error{Code: "auth_not_found", Message: "no auth available"}
|
||||
}
|
||||
|
||||
func (m *Manager) executeWithProvider(ctx context.Context, provider string, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (cliproxyexecutor.Response, error) {
|
||||
if provider == "" {
|
||||
return cliproxyexecutor.Response{}, &Error{Code: "provider_not_found", Message: "provider identifier is empty"}
|
||||
}
|
||||
tried := make(map[string]struct{})
|
||||
var lastErr error
|
||||
for {
|
||||
auth, executor, errPick := m.pickNext(ctx, provider, req.Model, opts, tried)
|
||||
if errPick != nil {
|
||||
if lastErr != nil {
|
||||
return cliproxyexecutor.Response{}, lastErr
|
||||
}
|
||||
return cliproxyexecutor.Response{}, errPick
|
||||
}
|
||||
|
||||
if isAPIKey, info := auth.AccountInfo(); isAPIKey {
|
||||
log.Debugf("Use API key %s for model %s", util.HideAPIKey(info), req.Model)
|
||||
} else {
|
||||
log.Debugf("Use OAuth %s for model %s", info, req.Model)
|
||||
}
|
||||
|
||||
tried[auth.ID] = struct{}{}
|
||||
execCtx := ctx
|
||||
if rt := m.roundTripperFor(auth); rt != nil {
|
||||
execCtx = context.WithValue(execCtx, roundTripperContextKey{}, rt)
|
||||
execCtx = context.WithValue(execCtx, "cliproxy.roundtripper", rt)
|
||||
}
|
||||
resp, errExec := executor.Execute(execCtx, auth, req, opts)
|
||||
result := Result{AuthID: auth.ID, Provider: provider, Model: req.Model, Success: errExec == nil}
|
||||
if errExec != nil {
|
||||
result.Error = &Error{Message: errExec.Error()}
|
||||
var se cliproxyexecutor.StatusError
|
||||
if errors.As(errExec, &se) && se != nil {
|
||||
result.Error.HTTPStatus = se.StatusCode()
|
||||
}
|
||||
m.MarkResult(execCtx, result)
|
||||
lastErr = errExec
|
||||
continue
|
||||
}
|
||||
m.MarkResult(execCtx, result)
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) executeStreamWithProvider(ctx context.Context, provider string, req cliproxyexecutor.Request, opts cliproxyexecutor.Options) (<-chan cliproxyexecutor.StreamChunk, error) {
|
||||
if provider == "" {
|
||||
return nil, &Error{Code: "provider_not_found", Message: "provider identifier is empty"}
|
||||
}
|
||||
tried := make(map[string]struct{})
|
||||
var lastErr error
|
||||
for {
|
||||
auth, executor, errPick := m.pickNext(ctx, provider, req.Model, opts, tried)
|
||||
if errPick != nil {
|
||||
if lastErr != nil {
|
||||
return nil, lastErr
|
||||
}
|
||||
return nil, errPick
|
||||
}
|
||||
|
||||
if isAPIKey, info := auth.AccountInfo(); isAPIKey {
|
||||
log.Debugf("Use API key %s for model %s", util.HideAPIKey(info), req.Model)
|
||||
} else {
|
||||
log.Debugf("Use OAuth %s for model %s", info, req.Model)
|
||||
}
|
||||
|
||||
tried[auth.ID] = struct{}{}
|
||||
execCtx := ctx
|
||||
if rt := m.roundTripperFor(auth); rt != nil {
|
||||
execCtx = context.WithValue(execCtx, roundTripperContextKey{}, rt)
|
||||
execCtx = context.WithValue(execCtx, "cliproxy.roundtripper", rt)
|
||||
}
|
||||
chunks, errStream := executor.ExecuteStream(execCtx, auth, req, opts)
|
||||
if errStream != nil {
|
||||
rerr := &Error{Message: errStream.Error()}
|
||||
var se cliproxyexecutor.StatusError
|
||||
if errors.As(errStream, &se) && se != nil {
|
||||
rerr.HTTPStatus = se.StatusCode()
|
||||
}
|
||||
result := Result{AuthID: auth.ID, Provider: provider, Model: req.Model, Success: false, Error: rerr}
|
||||
m.MarkResult(execCtx, result)
|
||||
lastErr = errStream
|
||||
continue
|
||||
}
|
||||
out := make(chan cliproxyexecutor.StreamChunk)
|
||||
go func(streamCtx context.Context, streamAuth *Auth, streamProvider string, streamChunks <-chan cliproxyexecutor.StreamChunk) {
|
||||
defer close(out)
|
||||
var failed bool
|
||||
for chunk := range streamChunks {
|
||||
if chunk.Err != nil && !failed {
|
||||
failed = true
|
||||
rerr := &Error{Message: chunk.Err.Error()}
|
||||
var se cliproxyexecutor.StatusError
|
||||
if errors.As(chunk.Err, &se) && se != nil {
|
||||
rerr.HTTPStatus = se.StatusCode()
|
||||
}
|
||||
m.MarkResult(streamCtx, Result{AuthID: streamAuth.ID, Provider: streamProvider, Model: req.Model, Success: false, Error: rerr})
|
||||
}
|
||||
out <- chunk
|
||||
}
|
||||
if !failed {
|
||||
m.MarkResult(streamCtx, Result{AuthID: streamAuth.ID, Provider: streamProvider, Model: req.Model, Success: true})
|
||||
}
|
||||
}(execCtx, auth.Clone(), provider, chunks)
|
||||
return out, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) normalizeProviders(providers []string) []string {
|
||||
if len(providers) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make([]string, 0, len(providers))
|
||||
seen := make(map[string]struct{}, len(providers))
|
||||
for _, provider := range providers {
|
||||
p := strings.TrimSpace(strings.ToLower(provider))
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[p]; ok {
|
||||
continue
|
||||
}
|
||||
seen[p] = struct{}{}
|
||||
result = append(result, p)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (m *Manager) rotateProviders(model string, providers []string) []string {
|
||||
if len(providers) == 0 {
|
||||
return nil
|
||||
}
|
||||
m.mu.RLock()
|
||||
offset := m.providerOffsets[model]
|
||||
m.mu.RUnlock()
|
||||
if len(providers) > 0 {
|
||||
offset %= len(providers)
|
||||
}
|
||||
if offset < 0 {
|
||||
offset = 0
|
||||
}
|
||||
if offset == 0 {
|
||||
return providers
|
||||
}
|
||||
rotated := make([]string, 0, len(providers))
|
||||
rotated = append(rotated, providers[offset:]...)
|
||||
rotated = append(rotated, providers[:offset]...)
|
||||
return rotated
|
||||
}
|
||||
|
||||
func (m *Manager) advanceProviderCursor(model string, providers []string) {
|
||||
if len(providers) == 0 {
|
||||
m.mu.Lock()
|
||||
delete(m.providerOffsets, model)
|
||||
m.mu.Unlock()
|
||||
return
|
||||
}
|
||||
m.mu.Lock()
|
||||
current := m.providerOffsets[model]
|
||||
m.providerOffsets[model] = (current + 1) % len(providers)
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
// MarkResult records an execution result and notifies hooks.
|
||||
func (m *Manager) MarkResult(ctx context.Context, result Result) {
|
||||
if result.AuthID == "" {
|
||||
return
|
||||
}
|
||||
// Update in-memory auth status based on result.
|
||||
m.mu.Lock()
|
||||
if auth, ok := m.auths[result.AuthID]; ok && auth != nil {
|
||||
now := time.Now()
|
||||
if result.Success {
|
||||
// Clear transient error/quota flags on success.
|
||||
auth.Unavailable = false
|
||||
auth.Status = StatusActive
|
||||
auth.StatusMessage = ""
|
||||
auth.Quota.Exceeded = false
|
||||
auth.Quota.Reason = ""
|
||||
auth.Quota.NextRecoverAt = time.Time{}
|
||||
auth.LastError = nil
|
||||
auth.UpdatedAt = now
|
||||
if result.Model != "" {
|
||||
registry.GetGlobalRegistry().ClearModelQuotaExceeded(auth.ID, result.Model)
|
||||
}
|
||||
} else {
|
||||
// Default transient error state.
|
||||
auth.Unavailable = true
|
||||
auth.Status = StatusError
|
||||
auth.UpdatedAt = now
|
||||
if result.Error != nil {
|
||||
auth.LastError = &Error{Code: result.Error.Code, Message: result.Error.Message, Retryable: result.Error.Retryable}
|
||||
}
|
||||
// If the error carries a status code, adjust backoff/quota accordingly.
|
||||
// 401 -> auth issue; 402/429 -> quota; 5xx -> transient.
|
||||
var statusCode int
|
||||
if se, isOk := any(result.Error).(interface{ StatusCode() int }); isOk && se != nil {
|
||||
statusCode = se.StatusCode()
|
||||
}
|
||||
switch statusCode {
|
||||
case 401:
|
||||
auth.StatusMessage = "unauthorized"
|
||||
auth.NextRefreshAfter = now.Add(5 * time.Minute)
|
||||
case 402, 429:
|
||||
auth.StatusMessage = "quota exhausted"
|
||||
auth.Quota.Exceeded = true
|
||||
auth.Quota.Reason = "quota"
|
||||
auth.Quota.NextRecoverAt = now.Add(10 * time.Minute)
|
||||
auth.NextRefreshAfter = auth.Quota.NextRecoverAt
|
||||
if result.Model != "" {
|
||||
registry.GetGlobalRegistry().SetModelQuotaExceeded(auth.ID, result.Model)
|
||||
}
|
||||
case 403, 408, 500, 502, 503, 504:
|
||||
auth.StatusMessage = "transient upstream error"
|
||||
auth.NextRefreshAfter = now.Add(1 * time.Minute)
|
||||
default:
|
||||
// keep generic
|
||||
if auth.StatusMessage == "" {
|
||||
auth.StatusMessage = "request failed"
|
||||
}
|
||||
}
|
||||
}
|
||||
// Persist best-effort (only metadata is stored for file store).
|
||||
_ = m.persist(ctx, auth)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
m.hook.OnResult(ctx, result)
|
||||
}
|
||||
|
||||
// List returns all auth entries currently known by the manager.
|
||||
func (m *Manager) List() []*Auth {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
list := make([]*Auth, 0, len(m.auths))
|
||||
for _, auth := range m.auths {
|
||||
list = append(list, auth.Clone())
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// GetByID retrieves an auth entry by its ID.
|
||||
|
||||
func (m *Manager) GetByID(id string) (*Auth, bool) {
|
||||
if id == "" {
|
||||
return nil, false
|
||||
}
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
auth, ok := m.auths[id]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return auth.Clone(), true
|
||||
}
|
||||
|
||||
func (m *Manager) pickNext(ctx context.Context, provider, model string, opts cliproxyexecutor.Options, tried map[string]struct{}) (*Auth, ProviderExecutor, error) {
|
||||
m.mu.RLock()
|
||||
executor, okExecutor := m.executors[provider]
|
||||
if !okExecutor {
|
||||
m.mu.RUnlock()
|
||||
return nil, nil, &Error{Code: "executor_not_found", Message: "executor not registered"}
|
||||
}
|
||||
candidates := make([]*Auth, 0, len(m.auths))
|
||||
for _, auth := range m.auths {
|
||||
if auth.Provider != provider || auth.Disabled {
|
||||
continue
|
||||
}
|
||||
if _, used := tried[auth.ID]; used {
|
||||
continue
|
||||
}
|
||||
candidates = append(candidates, auth.Clone())
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
if len(candidates) == 0 {
|
||||
return nil, nil, &Error{Code: "auth_not_found", Message: "no auth available"}
|
||||
}
|
||||
auth, errPick := m.selector.Pick(ctx, provider, model, opts, candidates)
|
||||
if errPick != nil {
|
||||
return nil, nil, errPick
|
||||
}
|
||||
if auth == nil {
|
||||
return nil, nil, &Error{Code: "auth_not_found", Message: "selector returned no auth"}
|
||||
}
|
||||
return auth, executor, nil
|
||||
}
|
||||
|
||||
func (m *Manager) persist(ctx context.Context, auth *Auth) error {
|
||||
if m.store == nil || auth == nil {
|
||||
return nil
|
||||
}
|
||||
// Skip persistence when metadata is absent (e.g., runtime-only auths).
|
||||
if auth.Metadata == nil {
|
||||
return nil
|
||||
}
|
||||
return m.store.Save(ctx, auth)
|
||||
}
|
||||
|
||||
// StartAutoRefresh launches a background loop that evaluates auth freshness
|
||||
// every few seconds and triggers refresh operations when required.
|
||||
// Only one loop is kept alive; starting a new one cancels the previous run.
|
||||
func (m *Manager) StartAutoRefresh(parent context.Context, interval time.Duration) {
|
||||
if interval <= 0 || interval > refreshCheckInterval {
|
||||
interval = refreshCheckInterval
|
||||
} else {
|
||||
interval = refreshCheckInterval
|
||||
}
|
||||
if m.refreshCancel != nil {
|
||||
m.refreshCancel()
|
||||
m.refreshCancel = nil
|
||||
}
|
||||
ctx, cancel := context.WithCancel(parent)
|
||||
m.refreshCancel = cancel
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
m.checkRefreshes(ctx)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
m.checkRefreshes(ctx)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// StopAutoRefresh cancels the background refresh loop, if running.
|
||||
func (m *Manager) StopAutoRefresh() {
|
||||
if m.refreshCancel != nil {
|
||||
m.refreshCancel()
|
||||
m.refreshCancel = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) checkRefreshes(ctx context.Context) {
|
||||
now := time.Now()
|
||||
snapshot := m.snapshotAuths()
|
||||
for _, a := range snapshot {
|
||||
if !m.shouldRefresh(a, now) {
|
||||
continue
|
||||
}
|
||||
if exec := m.executorFor(a.Provider); exec == nil {
|
||||
continue
|
||||
}
|
||||
if !m.markRefreshPending(a.ID, now) {
|
||||
continue
|
||||
}
|
||||
go m.refreshAuth(ctx, a.ID)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) snapshotAuths() []*Auth {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
out := make([]*Auth, 0, len(m.auths))
|
||||
for _, a := range m.auths {
|
||||
out = append(out, a.Clone())
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *Manager) shouldRefresh(a *Auth, now time.Time) bool {
|
||||
if a == nil || a.Disabled {
|
||||
return false
|
||||
}
|
||||
if !a.NextRefreshAfter.IsZero() && now.Before(a.NextRefreshAfter) {
|
||||
return false
|
||||
}
|
||||
if evaluator, ok := a.Runtime.(RefreshEvaluator); ok && evaluator != nil {
|
||||
return evaluator.ShouldRefresh(now, a)
|
||||
}
|
||||
|
||||
lastRefresh := a.LastRefreshedAt
|
||||
if lastRefresh.IsZero() {
|
||||
if ts, ok := authLastRefreshTimestamp(a); ok {
|
||||
lastRefresh = ts
|
||||
}
|
||||
}
|
||||
|
||||
expiry, hasExpiry := a.ExpirationTime()
|
||||
|
||||
if interval := authPreferredInterval(a); interval > 0 {
|
||||
if hasExpiry && !expiry.IsZero() {
|
||||
if !expiry.After(now) {
|
||||
return true
|
||||
}
|
||||
if expiry.Sub(now) <= interval {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if lastRefresh.IsZero() {
|
||||
return true
|
||||
}
|
||||
return now.Sub(lastRefresh) >= interval
|
||||
}
|
||||
|
||||
provider := strings.ToLower(a.Provider)
|
||||
lead := ProviderRefreshLead(provider, a.Runtime)
|
||||
if lead <= 0 {
|
||||
if hasExpiry && !expiry.IsZero() {
|
||||
return now.After(expiry)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if hasExpiry && !expiry.IsZero() {
|
||||
return time.Until(expiry) <= lead
|
||||
}
|
||||
if !lastRefresh.IsZero() {
|
||||
return now.Sub(lastRefresh) >= lead
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func authPreferredInterval(a *Auth) time.Duration {
|
||||
if a == nil {
|
||||
return 0
|
||||
}
|
||||
if d := durationFromMetadata(a.Metadata, "refresh_interval_seconds", "refreshIntervalSeconds", "refresh_interval", "refreshInterval"); d > 0 {
|
||||
return d
|
||||
}
|
||||
if d := durationFromAttributes(a.Attributes, "refresh_interval_seconds", "refreshIntervalSeconds", "refresh_interval", "refreshInterval"); d > 0 {
|
||||
return d
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func durationFromMetadata(meta map[string]any, keys ...string) time.Duration {
|
||||
if len(meta) == 0 {
|
||||
return 0
|
||||
}
|
||||
for _, key := range keys {
|
||||
if val, ok := meta[key]; ok {
|
||||
if dur := parseDurationValue(val); dur > 0 {
|
||||
return dur
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func durationFromAttributes(attrs map[string]string, keys ...string) time.Duration {
|
||||
if len(attrs) == 0 {
|
||||
return 0
|
||||
}
|
||||
for _, key := range keys {
|
||||
if val, ok := attrs[key]; ok {
|
||||
if dur := parseDurationString(val); dur > 0 {
|
||||
return dur
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func parseDurationValue(val any) time.Duration {
|
||||
switch v := val.(type) {
|
||||
case time.Duration:
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return v
|
||||
case int:
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(v) * time.Second
|
||||
case int32:
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(v) * time.Second
|
||||
case int64:
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(v) * time.Second
|
||||
case uint:
|
||||
if v == 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(v) * time.Second
|
||||
case uint32:
|
||||
if v == 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(v) * time.Second
|
||||
case uint64:
|
||||
if v == 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(v) * time.Second
|
||||
case float32:
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(float64(v) * float64(time.Second))
|
||||
case float64:
|
||||
if v <= 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(v * float64(time.Second))
|
||||
case json.Number:
|
||||
if i, err := v.Int64(); err == nil {
|
||||
if i <= 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(i) * time.Second
|
||||
}
|
||||
if f, err := v.Float64(); err == nil && f > 0 {
|
||||
return time.Duration(f * float64(time.Second))
|
||||
}
|
||||
case string:
|
||||
return parseDurationString(v)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func parseDurationString(raw string) time.Duration {
|
||||
s := strings.TrimSpace(raw)
|
||||
if s == "" {
|
||||
return 0
|
||||
}
|
||||
if dur, err := time.ParseDuration(s); err == nil && dur > 0 {
|
||||
return dur
|
||||
}
|
||||
if secs, err := strconv.ParseFloat(s, 64); err == nil && secs > 0 {
|
||||
return time.Duration(secs * float64(time.Second))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func authLastRefreshTimestamp(a *Auth) (time.Time, bool) {
|
||||
if a == nil {
|
||||
return time.Time{}, false
|
||||
}
|
||||
if a.Metadata != nil {
|
||||
if ts, ok := lookupMetadataTime(a.Metadata, "last_refresh", "lastRefresh", "last_refreshed_at", "lastRefreshedAt"); ok {
|
||||
return ts, true
|
||||
}
|
||||
}
|
||||
if a.Attributes != nil {
|
||||
for _, key := range []string{"last_refresh", "lastRefresh", "last_refreshed_at", "lastRefreshedAt"} {
|
||||
if val := strings.TrimSpace(a.Attributes[key]); val != "" {
|
||||
if ts, ok := parseTimeValue(val); ok {
|
||||
return ts, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func lookupMetadataTime(meta map[string]any, keys ...string) (time.Time, bool) {
|
||||
for _, key := range keys {
|
||||
if val, ok := meta[key]; ok {
|
||||
if ts, ok := parseTimeValue(val); ok {
|
||||
return ts, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func (m *Manager) markRefreshPending(id string, now time.Time) bool {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
auth, ok := m.auths[id]
|
||||
if !ok || auth == nil || auth.Disabled {
|
||||
return false
|
||||
}
|
||||
if !auth.NextRefreshAfter.IsZero() && now.Before(auth.NextRefreshAfter) {
|
||||
return false
|
||||
}
|
||||
auth.NextRefreshAfter = now.Add(refreshPendingBackoff)
|
||||
m.auths[id] = auth
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Manager) refreshAuth(ctx context.Context, id string) {
|
||||
m.mu.RLock()
|
||||
auth := m.auths[id]
|
||||
var exec ProviderExecutor
|
||||
if auth != nil {
|
||||
exec = m.executors[auth.Provider]
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
if auth == nil || exec == nil {
|
||||
return
|
||||
}
|
||||
cloned := auth.Clone()
|
||||
updated, err := exec.Refresh(ctx, cloned)
|
||||
now := time.Now()
|
||||
if err != nil {
|
||||
m.mu.Lock()
|
||||
if current := m.auths[id]; current != nil {
|
||||
current.NextRefreshAfter = now.Add(refreshFailureBackoff)
|
||||
current.LastError = &Error{Message: err.Error()}
|
||||
m.auths[id] = current
|
||||
}
|
||||
m.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if updated == nil {
|
||||
updated = cloned
|
||||
}
|
||||
updated.Runtime = auth.Runtime
|
||||
updated.LastRefreshedAt = now
|
||||
updated.NextRefreshAfter = time.Time{}
|
||||
updated.LastError = nil
|
||||
updated.UpdatedAt = now
|
||||
_, _ = m.Update(ctx, updated)
|
||||
}
|
||||
|
||||
func (m *Manager) executorFor(provider string) ProviderExecutor {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.executors[provider]
|
||||
}
|
||||
|
||||
// roundTripperContextKey is an unexported context key type to avoid collisions.
|
||||
type roundTripperContextKey struct{}
|
||||
|
||||
// roundTripperFor retrieves an HTTP RoundTripper for the given auth if a provider is registered.
|
||||
func (m *Manager) roundTripperFor(auth *Auth) http.RoundTripper {
|
||||
m.mu.RLock()
|
||||
p := m.rtProvider
|
||||
m.mu.RUnlock()
|
||||
if p == nil || auth == nil {
|
||||
return nil
|
||||
}
|
||||
return p.RoundTripperFor(auth)
|
||||
}
|
||||
|
||||
// RoundTripperProvider defines a minimal provider of per-auth HTTP transports.
|
||||
type RoundTripperProvider interface {
|
||||
RoundTripperFor(auth *Auth) http.RoundTripper
|
||||
}
|
||||
|
||||
// RequestPreparer is an optional interface that provider executors can implement
|
||||
// to mutate outbound HTTP requests with provider credentials.
|
||||
type RequestPreparer interface {
|
||||
PrepareRequest(req *http.Request, auth *Auth) error
|
||||
}
|
||||
|
||||
// InjectCredentials delegates per-provider HTTP request preparation when supported.
|
||||
// If the registered executor for the auth provider implements RequestPreparer,
|
||||
// it will be invoked to modify the request (e.g., add headers).
|
||||
func (m *Manager) InjectCredentials(req *http.Request, authID string) error {
|
||||
if req == nil || authID == "" {
|
||||
return nil
|
||||
}
|
||||
m.mu.RLock()
|
||||
a := m.auths[authID]
|
||||
var exec ProviderExecutor
|
||||
if a != nil {
|
||||
exec = m.executors[a.Provider]
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
if a == nil || exec == nil {
|
||||
return nil
|
||||
}
|
||||
if p, ok := exec.(RequestPreparer); ok && p != nil {
|
||||
return p.PrepareRequest(req, a)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
48
sdk/cliproxy/auth/selector.go
Normal file
48
sdk/cliproxy/auth/selector.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
)
|
||||
|
||||
// RoundRobinSelector provides a simple provider scoped round-robin selection strategy.
|
||||
type RoundRobinSelector struct {
|
||||
mu sync.Mutex
|
||||
cursors map[string]int
|
||||
}
|
||||
|
||||
// Pick selects the next available auth for the provider in a round-robin manner.
|
||||
func (s *RoundRobinSelector) Pick(ctx context.Context, provider, model string, opts cliproxyexecutor.Options, auths []*Auth) (*Auth, error) {
|
||||
_ = ctx
|
||||
_ = opts
|
||||
if len(auths) == 0 {
|
||||
return nil, &Error{Code: "auth_not_found", Message: "no auth candidates"}
|
||||
}
|
||||
if s.cursors == nil {
|
||||
s.cursors = make(map[string]int)
|
||||
}
|
||||
available := make([]*Auth, 0, len(auths))
|
||||
now := time.Now()
|
||||
for i := range auths {
|
||||
candidate := auths[i]
|
||||
if candidate.Unavailable && candidate.Quota.NextRecoverAt.After(now) {
|
||||
continue
|
||||
}
|
||||
if candidate.Status == StatusDisabled || candidate.Disabled {
|
||||
continue
|
||||
}
|
||||
available = append(available, candidate)
|
||||
}
|
||||
if len(available) == 0 {
|
||||
return nil, &Error{Code: "auth_unavailable", Message: "no auth available"}
|
||||
}
|
||||
key := provider + ":" + model
|
||||
s.mu.Lock()
|
||||
index := s.cursors[key]
|
||||
s.cursors[key] = (index + 1) % len(available)
|
||||
s.mu.Unlock()
|
||||
return available[index%len(available)], nil
|
||||
}
|
||||
19
sdk/cliproxy/auth/status.go
Normal file
19
sdk/cliproxy/auth/status.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package auth
|
||||
|
||||
// Status represents the lifecycle state of an Auth entry.
|
||||
type Status string
|
||||
|
||||
const (
|
||||
// StatusUnknown means the auth state could not be determined.
|
||||
StatusUnknown Status = "unknown"
|
||||
// StatusActive indicates the auth is valid and ready for execution.
|
||||
StatusActive Status = "active"
|
||||
// StatusPending indicates the auth is waiting for an external action, such as MFA.
|
||||
StatusPending Status = "pending"
|
||||
// StatusRefreshing indicates the auth is undergoing a refresh flow.
|
||||
StatusRefreshing Status = "refreshing"
|
||||
// StatusError indicates the auth is temporarily unavailable due to errors.
|
||||
StatusError Status = "error"
|
||||
// StatusDisabled marks the auth as intentionally disabled.
|
||||
StatusDisabled Status = "disabled"
|
||||
)
|
||||
13
sdk/cliproxy/auth/store.go
Normal file
13
sdk/cliproxy/auth/store.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package auth
|
||||
|
||||
import "context"
|
||||
|
||||
// Store abstracts persistence of Auth state across restarts.
|
||||
type Store interface {
|
||||
// List returns all auth records stored in the backend.
|
||||
List(ctx context.Context) ([]*Auth, error)
|
||||
// Save persists the provided auth record, replacing any existing one with same ID.
|
||||
Save(ctx context.Context, auth *Auth) error
|
||||
// Delete removes the auth record identified by id.
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
||||
218
sdk/cliproxy/auth/types.go
Normal file
218
sdk/cliproxy/auth/types.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
clipauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
)
|
||||
|
||||
// Auth encapsulates the runtime state and metadata associated with a single credential.
|
||||
type Auth struct {
|
||||
// ID uniquely identifies the auth record across restarts.
|
||||
ID string `json:"id"`
|
||||
// Provider is the upstream provider key (e.g. "gemini", "claude").
|
||||
Provider string `json:"provider"`
|
||||
// Label is an optional human readable label for logging.
|
||||
Label string `json:"label,omitempty"`
|
||||
// Status is the lifecycle status managed by the AuthManager.
|
||||
Status Status `json:"status"`
|
||||
// StatusMessage holds a short description for the current status.
|
||||
StatusMessage string `json:"status_message,omitempty"`
|
||||
// Disabled indicates the auth is intentionally disabled by operator.
|
||||
Disabled bool `json:"disabled"`
|
||||
// Unavailable flags transient provider unavailability (e.g. quota exceeded).
|
||||
Unavailable bool `json:"unavailable"`
|
||||
// ProxyURL overrides the global proxy setting for this auth if provided.
|
||||
ProxyURL string `json:"proxy_url,omitempty"`
|
||||
// Attributes stores provider specific metadata needed by executors (immutable configuration).
|
||||
Attributes map[string]string `json:"attributes,omitempty"`
|
||||
// Metadata stores runtime mutable provider state (e.g. tokens, cookies).
|
||||
Metadata map[string]any `json:"metadata,omitempty"`
|
||||
// Quota captures recent quota information for load balancers.
|
||||
Quota QuotaState `json:"quota"`
|
||||
// LastError stores the last failure encountered while executing or refreshing.
|
||||
LastError *Error `json:"last_error,omitempty"`
|
||||
// CreatedAt is the creation timestamp in UTC.
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
// UpdatedAt is the last modification timestamp in UTC.
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
// LastRefreshedAt records the last successful refresh time in UTC.
|
||||
LastRefreshedAt time.Time `json:"last_refreshed_at"`
|
||||
// NextRefreshAfter is the earliest time a refresh should retrigger.
|
||||
NextRefreshAfter time.Time `json:"next_refresh_after"`
|
||||
|
||||
// Runtime carries non-serialisable data used during execution (in-memory only).
|
||||
Runtime any `json:"-"`
|
||||
}
|
||||
|
||||
// QuotaState contains limiter tracking data for a credential.
|
||||
type QuotaState struct {
|
||||
// Exceeded indicates the credential recently hit a quota error.
|
||||
Exceeded bool `json:"exceeded"`
|
||||
// Reason provides an optional provider specific human readable description.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// NextRecoverAt is when the credential may become available again.
|
||||
NextRecoverAt time.Time `json:"next_recover_at"`
|
||||
}
|
||||
|
||||
// Clone shallow copies the Auth structure, duplicating maps to avoid accidental mutation.
|
||||
func (a *Auth) Clone() *Auth {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
copyAuth := *a
|
||||
if len(a.Attributes) > 0 {
|
||||
copyAuth.Attributes = make(map[string]string, len(a.Attributes))
|
||||
for key, value := range a.Attributes {
|
||||
copyAuth.Attributes[key] = value
|
||||
}
|
||||
}
|
||||
if len(a.Metadata) > 0 {
|
||||
copyAuth.Metadata = make(map[string]any, len(a.Metadata))
|
||||
for key, value := range a.Metadata {
|
||||
copyAuth.Metadata[key] = value
|
||||
}
|
||||
}
|
||||
copyAuth.Runtime = a.Runtime
|
||||
return ©Auth
|
||||
}
|
||||
|
||||
func (a *Auth) AccountInfo() (bool, string) {
|
||||
if a == nil {
|
||||
return false, ""
|
||||
}
|
||||
if a.Metadata != nil {
|
||||
if v, ok := a.Metadata["email"].(string); ok {
|
||||
return false, v
|
||||
}
|
||||
} else if a.Attributes != nil {
|
||||
if v := a.Attributes["api_key"]; v != "" {
|
||||
return true, v
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// ExpirationTime attempts to extract the credential expiration timestamp from metadata.
|
||||
// It inspects common keys such as "expired", "expire", "expires_at", and also
|
||||
// nested "token" objects to remain compatible with legacy auth file formats.
|
||||
func (a *Auth) ExpirationTime() (time.Time, bool) {
|
||||
if a == nil {
|
||||
return time.Time{}, false
|
||||
}
|
||||
if ts, ok := expirationFromMap(a.Metadata); ok {
|
||||
return ts, true
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
var defaultAuthenticatorFactories = map[string]func() clipauth.Authenticator{
|
||||
"codex": func() clipauth.Authenticator { return clipauth.NewCodexAuthenticator() },
|
||||
"claude": func() clipauth.Authenticator { return clipauth.NewClaudeAuthenticator() },
|
||||
"qwen": func() clipauth.Authenticator { return clipauth.NewQwenAuthenticator() },
|
||||
"gemini": func() clipauth.Authenticator { return clipauth.NewGeminiAuthenticator() },
|
||||
"gemini-cli": func() clipauth.Authenticator { return clipauth.NewGeminiAuthenticator() },
|
||||
}
|
||||
|
||||
var expireKeys = [...]string{"expired", "expire", "expires_at", "expiresAt", "expiry", "expires"}
|
||||
|
||||
func expirationFromMap(meta map[string]any) (time.Time, bool) {
|
||||
if meta == nil {
|
||||
return time.Time{}, false
|
||||
}
|
||||
for _, key := range expireKeys {
|
||||
if v, ok := meta[key]; ok {
|
||||
if ts, ok := parseTimeValue(v); ok {
|
||||
return ts, true
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, nestedKey := range []string{"token", "Token"} {
|
||||
if nested, ok := meta[nestedKey]; ok {
|
||||
switch val := nested.(type) {
|
||||
case map[string]any:
|
||||
if ts, ok := expirationFromMap(val); ok {
|
||||
return ts, true
|
||||
}
|
||||
case map[string]string:
|
||||
temp := make(map[string]any, len(val))
|
||||
for k, v := range val {
|
||||
temp[k] = v
|
||||
}
|
||||
if ts, ok := expirationFromMap(temp); ok {
|
||||
return ts, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func ProviderRefreshLead(provider string, runtime any) time.Duration {
|
||||
provider = strings.ToLower(provider)
|
||||
if runtime != nil {
|
||||
if eval, ok := runtime.(interface{ RefreshLead() *time.Duration }); ok {
|
||||
if lead := eval.RefreshLead(); lead != nil && *lead > 0 {
|
||||
return *lead
|
||||
}
|
||||
}
|
||||
}
|
||||
if factory, ok := defaultAuthenticatorFactories[provider]; ok {
|
||||
if auth := factory(); auth != nil {
|
||||
if lead := auth.RefreshLead(); lead != nil && *lead > 0 {
|
||||
return *lead
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func parseTimeValue(v any) (time.Time, bool) {
|
||||
switch value := v.(type) {
|
||||
case string:
|
||||
s := strings.TrimSpace(value)
|
||||
if s == "" {
|
||||
return time.Time{}, false
|
||||
}
|
||||
layouts := []string{
|
||||
time.RFC3339,
|
||||
time.RFC3339Nano,
|
||||
"2006-01-02 15:04:05",
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
}
|
||||
for _, layout := range layouts {
|
||||
if ts, err := time.Parse(layout, s); err == nil {
|
||||
return ts, true
|
||||
}
|
||||
}
|
||||
if unix, err := strconv.ParseInt(s, 10, 64); err == nil {
|
||||
return normaliseUnix(unix), true
|
||||
}
|
||||
case float64:
|
||||
return normaliseUnix(int64(value)), true
|
||||
case int64:
|
||||
return normaliseUnix(value), true
|
||||
case json.Number:
|
||||
if i, err := value.Int64(); err == nil {
|
||||
return normaliseUnix(i), true
|
||||
}
|
||||
if f, err := value.Float64(); err == nil {
|
||||
return normaliseUnix(int64(f)), true
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func normaliseUnix(raw int64) time.Time {
|
||||
if raw <= 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
// Heuristic: treat values with millisecond precision (>1e12) accordingly.
|
||||
if raw > 1_000_000_000_000 {
|
||||
return time.UnixMilli(raw)
|
||||
}
|
||||
return time.Unix(raw, 0)
|
||||
}
|
||||
138
sdk/cliproxy/builder.go
Normal file
138
sdk/cliproxy/builder.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package cliproxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
// Builder constructs a Service instance with customizable providers.
|
||||
type Builder struct {
|
||||
cfg *config.Config
|
||||
configPath string
|
||||
tokenProvider TokenClientProvider
|
||||
apiKeyProvider APIKeyClientProvider
|
||||
watcherFactory WatcherFactory
|
||||
hooks Hooks
|
||||
authManager *sdkAuth.Manager
|
||||
coreManager *coreauth.Manager
|
||||
serverOptions []api.ServerOption
|
||||
}
|
||||
|
||||
// Hooks allows callers to plug into service lifecycle stages.
|
||||
type Hooks struct {
|
||||
OnBeforeStart func(*config.Config)
|
||||
OnAfterStart func(*Service)
|
||||
}
|
||||
|
||||
// NewBuilder creates a Builder with default dependencies left unset.
|
||||
func NewBuilder() *Builder {
|
||||
return &Builder{}
|
||||
}
|
||||
|
||||
// WithConfig sets the configuration instance used by the service.
|
||||
func (b *Builder) WithConfig(cfg *config.Config) *Builder {
|
||||
b.cfg = cfg
|
||||
return b
|
||||
}
|
||||
|
||||
// WithConfigPath sets the absolute configuration file path used for reload watching.
|
||||
func (b *Builder) WithConfigPath(path string) *Builder {
|
||||
b.configPath = path
|
||||
return b
|
||||
}
|
||||
|
||||
// WithTokenClientProvider overrides the provider responsible for token-backed clients.
|
||||
func (b *Builder) WithTokenClientProvider(provider TokenClientProvider) *Builder {
|
||||
b.tokenProvider = provider
|
||||
return b
|
||||
}
|
||||
|
||||
// WithAPIKeyClientProvider overrides the provider responsible for API key-backed clients.
|
||||
func (b *Builder) WithAPIKeyClientProvider(provider APIKeyClientProvider) *Builder {
|
||||
b.apiKeyProvider = provider
|
||||
return b
|
||||
}
|
||||
|
||||
// WithWatcherFactory allows customizing the watcher factory that handles reloads.
|
||||
func (b *Builder) WithWatcherFactory(factory WatcherFactory) *Builder {
|
||||
b.watcherFactory = factory
|
||||
return b
|
||||
}
|
||||
|
||||
// WithHooks registers lifecycle hooks executed around service startup.
|
||||
func (b *Builder) WithHooks(h Hooks) *Builder {
|
||||
b.hooks = h
|
||||
return b
|
||||
}
|
||||
|
||||
// WithAuthManager overrides the authentication manager used for token lifecycle operations.
|
||||
func (b *Builder) WithAuthManager(mgr *sdkAuth.Manager) *Builder {
|
||||
b.authManager = mgr
|
||||
return b
|
||||
}
|
||||
|
||||
// WithCoreAuthManager overrides the runtime auth manager responsible for request execution.
|
||||
func (b *Builder) WithCoreAuthManager(mgr *coreauth.Manager) *Builder {
|
||||
b.coreManager = mgr
|
||||
return b
|
||||
}
|
||||
|
||||
// WithServerOptions appends server configuration options used during construction.
|
||||
func (b *Builder) WithServerOptions(opts ...api.ServerOption) *Builder {
|
||||
b.serverOptions = append(b.serverOptions, opts...)
|
||||
return b
|
||||
}
|
||||
|
||||
// Build validates inputs, applies defaults, and returns a ready-to-run service.
|
||||
func (b *Builder) Build() (*Service, error) {
|
||||
if b.cfg == nil {
|
||||
return nil, fmt.Errorf("cliproxy: configuration is required")
|
||||
}
|
||||
if b.configPath == "" {
|
||||
return nil, fmt.Errorf("cliproxy: configuration path is required")
|
||||
}
|
||||
|
||||
tokenProvider := b.tokenProvider
|
||||
if tokenProvider == nil {
|
||||
tokenProvider = NewFileTokenClientProvider()
|
||||
}
|
||||
|
||||
apiKeyProvider := b.apiKeyProvider
|
||||
if apiKeyProvider == nil {
|
||||
apiKeyProvider = NewAPIKeyClientProvider()
|
||||
}
|
||||
|
||||
watcherFactory := b.watcherFactory
|
||||
if watcherFactory == nil {
|
||||
watcherFactory = defaultWatcherFactory
|
||||
}
|
||||
|
||||
authManager := b.authManager
|
||||
if authManager == nil {
|
||||
authManager = newDefaultAuthManager()
|
||||
}
|
||||
|
||||
coreManager := b.coreManager
|
||||
if coreManager == nil {
|
||||
coreManager = coreauth.NewManager(coreauth.NewFileStore(b.cfg.AuthDir), nil, nil)
|
||||
}
|
||||
// Attach a default RoundTripper provider so providers can opt-in per-auth transports.
|
||||
coreManager.SetRoundTripperProvider(newDefaultRoundTripperProvider())
|
||||
|
||||
service := &Service{
|
||||
cfg: b.cfg,
|
||||
configPath: b.configPath,
|
||||
tokenProvider: tokenProvider,
|
||||
apiKeyProvider: apiKeyProvider,
|
||||
watcherFactory: watcherFactory,
|
||||
hooks: b.hooks,
|
||||
authManager: authManager,
|
||||
coreManager: coreManager,
|
||||
serverOptions: append([]api.ServerOption(nil), b.serverOptions...),
|
||||
}
|
||||
return service, nil
|
||||
}
|
||||
60
sdk/cliproxy/executor/types.go
Normal file
60
sdk/cliproxy/executor/types.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package executor
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
)
|
||||
|
||||
// Request encapsulates the translated payload that will be sent to a provider executor.
|
||||
type Request struct {
|
||||
// Model is the upstream model identifier after translation.
|
||||
Model string
|
||||
// Payload is the provider specific JSON payload.
|
||||
Payload []byte
|
||||
// Format represents the provider payload schema.
|
||||
Format sdktranslator.Format
|
||||
// Metadata carries optional provider specific execution hints.
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// Options controls execution behavior for both streaming and non-streaming calls.
|
||||
type Options struct {
|
||||
// Stream toggles streaming mode.
|
||||
Stream bool
|
||||
// Alt carries optional alternate format hint (e.g. SSE JSON key).
|
||||
Alt string
|
||||
// Headers are forwarded to the provider request builder.
|
||||
Headers http.Header
|
||||
// Query contains optional query string parameters.
|
||||
Query url.Values
|
||||
// OriginalRequest preserves the inbound request bytes prior to translation.
|
||||
OriginalRequest []byte
|
||||
// SourceFormat identifies the inbound schema.
|
||||
SourceFormat sdktranslator.Format
|
||||
}
|
||||
|
||||
// Response wraps either a full provider response or metadata for streaming flows.
|
||||
type Response struct {
|
||||
// Payload is the provider response in the executor format.
|
||||
Payload []byte
|
||||
// Metadata exposes optional structured data for translators.
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// StreamChunk represents a single streaming payload unit emitted by provider executors.
|
||||
type StreamChunk struct {
|
||||
// Payload is the raw provider chunk payload.
|
||||
Payload []byte
|
||||
// Err reports any terminal error encountered while producing chunks.
|
||||
Err error
|
||||
}
|
||||
|
||||
// StatusError represents an error that carries an HTTP-like status code.
|
||||
// Provider executors should implement this when possible to enable
|
||||
// better auth state updates on failures (e.g., 401/402/429).
|
||||
type StatusError interface {
|
||||
error
|
||||
StatusCode() int
|
||||
}
|
||||
20
sdk/cliproxy/model_registry.go
Normal file
20
sdk/cliproxy/model_registry.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package cliproxy
|
||||
|
||||
import "github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
|
||||
// ModelInfo re-exports the registry model info structure.
|
||||
type ModelInfo = registry.ModelInfo
|
||||
|
||||
// ModelRegistry describes registry operations consumed by external callers.
|
||||
type ModelRegistry interface {
|
||||
RegisterClient(clientID, clientProvider string, models []*ModelInfo)
|
||||
UnregisterClient(clientID string)
|
||||
SetModelQuotaExceeded(clientID, modelID string)
|
||||
ClearModelQuotaExceeded(clientID, modelID string)
|
||||
GetAvailableModels(handlerType string) []map[string]any
|
||||
}
|
||||
|
||||
// GlobalModelRegistry returns the shared registry instance.
|
||||
func GlobalModelRegistry() ModelRegistry {
|
||||
return registry.GetGlobalRegistry()
|
||||
}
|
||||
64
sdk/cliproxy/pipeline/context.go
Normal file
64
sdk/cliproxy/pipeline/context.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
cliproxyauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
cliproxyexecutor "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/executor"
|
||||
sdktranslator "github.com/router-for-me/CLIProxyAPI/v6/sdk/translator"
|
||||
)
|
||||
|
||||
// Context encapsulates execution state shared across middleware, translators, and executors.
|
||||
type Context struct {
|
||||
// Request encapsulates the provider facing request payload.
|
||||
Request cliproxyexecutor.Request
|
||||
// Options carries execution flags (streaming, headers, etc.).
|
||||
Options cliproxyexecutor.Options
|
||||
// Auth references the credential selected for execution.
|
||||
Auth *cliproxyauth.Auth
|
||||
// Translator represents the pipeline responsible for schema adaptation.
|
||||
Translator *sdktranslator.Pipeline
|
||||
// HTTPClient allows middleware to customise the outbound transport per request.
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
// Hook captures middleware callbacks around execution.
|
||||
type Hook interface {
|
||||
BeforeExecute(ctx context.Context, execCtx *Context)
|
||||
AfterExecute(ctx context.Context, execCtx *Context, resp cliproxyexecutor.Response, err error)
|
||||
OnStreamChunk(ctx context.Context, execCtx *Context, chunk cliproxyexecutor.StreamChunk)
|
||||
}
|
||||
|
||||
// HookFunc aggregates optional hook implementations.
|
||||
type HookFunc struct {
|
||||
Before func(context.Context, *Context)
|
||||
After func(context.Context, *Context, cliproxyexecutor.Response, error)
|
||||
Stream func(context.Context, *Context, cliproxyexecutor.StreamChunk)
|
||||
}
|
||||
|
||||
// BeforeExecute implements Hook.
|
||||
func (h HookFunc) BeforeExecute(ctx context.Context, execCtx *Context) {
|
||||
if h.Before != nil {
|
||||
h.Before(ctx, execCtx)
|
||||
}
|
||||
}
|
||||
|
||||
// AfterExecute implements Hook.
|
||||
func (h HookFunc) AfterExecute(ctx context.Context, execCtx *Context, resp cliproxyexecutor.Response, err error) {
|
||||
if h.After != nil {
|
||||
h.After(ctx, execCtx, resp, err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnStreamChunk implements Hook.
|
||||
func (h HookFunc) OnStreamChunk(ctx context.Context, execCtx *Context, chunk cliproxyexecutor.StreamChunk) {
|
||||
if h.Stream != nil {
|
||||
h.Stream(ctx, execCtx, chunk)
|
||||
}
|
||||
}
|
||||
|
||||
// RoundTripperProvider allows injection of custom HTTP transports per auth entry.
|
||||
type RoundTripperProvider interface {
|
||||
RoundTripperFor(auth *cliproxyauth.Auth) http.RoundTripper
|
||||
}
|
||||
46
sdk/cliproxy/providers.go
Normal file
46
sdk/cliproxy/providers.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package cliproxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/watcher"
|
||||
)
|
||||
|
||||
// NewFileTokenClientProvider returns the default token-backed client loader.
|
||||
func NewFileTokenClientProvider() TokenClientProvider {
|
||||
return &fileTokenClientProvider{}
|
||||
}
|
||||
|
||||
type fileTokenClientProvider struct{}
|
||||
|
||||
func (p *fileTokenClientProvider) Load(ctx context.Context, cfg *config.Config) (*TokenClientResult, error) {
|
||||
// Stateless executors handle tokens
|
||||
_ = ctx
|
||||
_ = cfg
|
||||
return &TokenClientResult{SuccessfulAuthed: 0}, nil
|
||||
}
|
||||
|
||||
// NewAPIKeyClientProvider returns the default API key client loader that reuses existing logic.
|
||||
func NewAPIKeyClientProvider() APIKeyClientProvider {
|
||||
return &apiKeyClientProvider{}
|
||||
}
|
||||
|
||||
type apiKeyClientProvider struct{}
|
||||
|
||||
func (p *apiKeyClientProvider) Load(ctx context.Context, cfg *config.Config) (*APIKeyClientResult, error) {
|
||||
glCount, claudeCount, codexCount, openAICompat := watcher.BuildAPIKeyClients(cfg)
|
||||
if ctx != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
}
|
||||
return &APIKeyClientResult{
|
||||
GeminiKeyCount: glCount,
|
||||
ClaudeKeyCount: claudeCount,
|
||||
CodexKeyCount: codexCount,
|
||||
OpenAICompatCount: openAICompat,
|
||||
}, nil
|
||||
}
|
||||
51
sdk/cliproxy/rtprovider.go
Normal file
51
sdk/cliproxy/rtprovider.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package cliproxy
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
// defaultRoundTripperProvider returns a per-auth HTTP RoundTripper based on
|
||||
// the Auth.ProxyURL value. It caches transports per proxy URL string.
|
||||
type defaultRoundTripperProvider struct {
|
||||
mu sync.RWMutex
|
||||
cache map[string]http.RoundTripper
|
||||
}
|
||||
|
||||
func newDefaultRoundTripperProvider() *defaultRoundTripperProvider {
|
||||
return &defaultRoundTripperProvider{cache: make(map[string]http.RoundTripper)}
|
||||
}
|
||||
|
||||
// RoundTripperFor implements coreauth.RoundTripperProvider.
|
||||
func (p *defaultRoundTripperProvider) RoundTripperFor(auth *coreauth.Auth) http.RoundTripper {
|
||||
if auth == nil {
|
||||
return nil
|
||||
}
|
||||
proxy := strings.TrimSpace(auth.ProxyURL)
|
||||
if proxy == "" {
|
||||
return nil
|
||||
}
|
||||
p.mu.RLock()
|
||||
rt := p.cache[proxy]
|
||||
p.mu.RUnlock()
|
||||
if rt != nil {
|
||||
return rt
|
||||
}
|
||||
// Build HTTP/HTTPS proxy transport; ignore SOCKS for simplicity here.
|
||||
u, err := url.Parse(proxy)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return nil
|
||||
}
|
||||
transport := &http.Transport{Proxy: http.ProxyURL(u)}
|
||||
p.mu.Lock()
|
||||
p.cache[proxy] = transport
|
||||
p.mu.Unlock()
|
||||
return transport
|
||||
}
|
||||
406
sdk/cliproxy/service.go
Normal file
406
sdk/cliproxy/service.go
Normal file
@@ -0,0 +1,406 @@
|
||||
package cliproxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/api"
|
||||
baseauth "github.com/router-for-me/CLIProxyAPI/v6/internal/auth"
|
||||
geminiwebclient "github.com/router-for-me/CLIProxyAPI/v6/internal/client/gemini-web"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/runtime/executor"
|
||||
sdkAuth "github.com/router-for-me/CLIProxyAPI/v6/sdk/auth"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Service wraps the proxy server lifecycle so external programs can embed the CLI proxy.
|
||||
type Service struct {
|
||||
cfg *config.Config
|
||||
cfgMu sync.RWMutex
|
||||
configPath string
|
||||
|
||||
tokenProvider TokenClientProvider
|
||||
apiKeyProvider APIKeyClientProvider
|
||||
watcherFactory WatcherFactory
|
||||
hooks Hooks
|
||||
serverOptions []api.ServerOption
|
||||
|
||||
server *api.Server
|
||||
serverErr chan error
|
||||
|
||||
watcher *WatcherWrapper
|
||||
watcherCancel context.CancelFunc
|
||||
|
||||
// legacy client caches removed
|
||||
authManager *sdkAuth.Manager
|
||||
coreManager *coreauth.Manager
|
||||
|
||||
shutdownOnce sync.Once
|
||||
}
|
||||
|
||||
func newDefaultAuthManager() *sdkAuth.Manager {
|
||||
return sdkAuth.NewManager(
|
||||
sdkAuth.NewFileTokenStore(),
|
||||
sdkAuth.NewGeminiAuthenticator(),
|
||||
sdkAuth.NewCodexAuthenticator(),
|
||||
sdkAuth.NewClaudeAuthenticator(),
|
||||
sdkAuth.NewQwenAuthenticator(),
|
||||
)
|
||||
}
|
||||
|
||||
// Run starts the service and blocks until the context is cancelled or the server stops.
|
||||
func (s *Service) Run(ctx context.Context) error {
|
||||
if s == nil {
|
||||
return fmt.Errorf("cliproxy: service is nil")
|
||||
}
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer shutdownCancel()
|
||||
defer func() {
|
||||
if err := s.Shutdown(shutdownCtx); err != nil {
|
||||
log.Errorf("service shutdown returned error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := s.ensureAuthDir(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.coreManager != nil {
|
||||
if errLoad := s.coreManager.Load(ctx); errLoad != nil {
|
||||
log.Warnf("failed to load auth store: %v", errLoad)
|
||||
}
|
||||
}
|
||||
|
||||
tokenResult, err := s.tokenProvider.Load(ctx, s.cfg)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
return err
|
||||
}
|
||||
if tokenResult == nil {
|
||||
tokenResult = &TokenClientResult{}
|
||||
}
|
||||
|
||||
apiKeyResult, err := s.apiKeyProvider.Load(ctx, s.cfg)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
return err
|
||||
}
|
||||
if apiKeyResult == nil {
|
||||
apiKeyResult = &APIKeyClientResult{}
|
||||
}
|
||||
|
||||
// legacy clients removed; no caches to refresh
|
||||
|
||||
// handlers no longer depend on legacy clients; pass nil slice initially
|
||||
s.server = api.NewServer(s.cfg, s.coreManager, s.configPath, s.serverOptions...)
|
||||
|
||||
if s.authManager == nil {
|
||||
s.authManager = newDefaultAuthManager()
|
||||
}
|
||||
|
||||
if s.hooks.OnBeforeStart != nil {
|
||||
s.hooks.OnBeforeStart(s.cfg)
|
||||
}
|
||||
|
||||
s.serverErr = make(chan error, 1)
|
||||
go func() {
|
||||
if errStart := s.server.Start(); errStart != nil {
|
||||
s.serverErr <- errStart
|
||||
} else {
|
||||
s.serverErr <- nil
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
log.Info("API server started successfully")
|
||||
|
||||
if s.hooks.OnAfterStart != nil {
|
||||
s.hooks.OnAfterStart(s)
|
||||
}
|
||||
|
||||
var watcherWrapper *WatcherWrapper
|
||||
reloadCallback := func(newCfg *config.Config) {
|
||||
if newCfg == nil {
|
||||
s.cfgMu.RLock()
|
||||
newCfg = s.cfg
|
||||
s.cfgMu.RUnlock()
|
||||
}
|
||||
|
||||
// Pull the latest auth snapshot and sync
|
||||
auths := watcherWrapper.SnapshotAuths()
|
||||
s.syncCoreAuthFromAuths(ctx, auths)
|
||||
if s.server != nil {
|
||||
s.server.UpdateClients(newCfg)
|
||||
}
|
||||
|
||||
s.cfgMu.Lock()
|
||||
s.cfg = newCfg
|
||||
s.cfgMu.Unlock()
|
||||
|
||||
}
|
||||
|
||||
watcherWrapper, err = s.watcherFactory(s.configPath, s.cfg.AuthDir, reloadCallback)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cliproxy: failed to create watcher: %w", err)
|
||||
}
|
||||
s.watcher = watcherWrapper
|
||||
watcherWrapper.SetConfig(s.cfg)
|
||||
|
||||
watcherCtx, watcherCancel := context.WithCancel(context.Background())
|
||||
s.watcherCancel = watcherCancel
|
||||
if err = watcherWrapper.Start(watcherCtx); err != nil {
|
||||
return fmt.Errorf("cliproxy: failed to start watcher: %w", err)
|
||||
}
|
||||
log.Info("file watcher started for config and auth directory changes")
|
||||
|
||||
// Prefer core auth manager auto refresh if available.
|
||||
if s.coreManager != nil {
|
||||
interval := 15 * time.Minute
|
||||
if sec := s.cfg.GeminiWeb.TokenRefreshSeconds; sec > 0 {
|
||||
interval = time.Duration(sec) * time.Second
|
||||
}
|
||||
s.coreManager.StartAutoRefresh(context.Background(), interval)
|
||||
log.Infof("core auth auto-refresh started (interval=%s)", interval)
|
||||
}
|
||||
|
||||
totalNewClients := tokenResult.SuccessfulAuthed + apiKeyResult.GeminiKeyCount + apiKeyResult.ClaudeKeyCount + apiKeyResult.CodexKeyCount + apiKeyResult.OpenAICompatCount
|
||||
log.Infof("full client load complete - %d clients (%d auth files + %d GL API keys + %d Claude API keys + %d Codex keys + %d OpenAI-compat)",
|
||||
totalNewClients,
|
||||
tokenResult.SuccessfulAuthed,
|
||||
apiKeyResult.GeminiKeyCount,
|
||||
apiKeyResult.ClaudeKeyCount,
|
||||
apiKeyResult.CodexKeyCount,
|
||||
apiKeyResult.OpenAICompatCount,
|
||||
)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Debug("service context cancelled, shutting down...")
|
||||
return ctx.Err()
|
||||
case err = <-s.serverErr:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown gracefully stops background workers and the HTTP server.
|
||||
func (s *Service) Shutdown(ctx context.Context) error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
var shutdownErr error
|
||||
s.shutdownOnce.Do(func() {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// legacy refresh loop removed; only stopping core auth manager below
|
||||
|
||||
if s.watcherCancel != nil {
|
||||
s.watcherCancel()
|
||||
}
|
||||
if s.coreManager != nil {
|
||||
s.coreManager.StopAutoRefresh()
|
||||
}
|
||||
if s.watcher != nil {
|
||||
if err := s.watcher.Stop(); err != nil {
|
||||
log.Errorf("failed to stop file watcher: %v", err)
|
||||
shutdownErr = err
|
||||
}
|
||||
}
|
||||
|
||||
// no legacy clients to persist
|
||||
|
||||
if s.server != nil {
|
||||
shutdownCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
if err := s.server.Stop(shutdownCtx); err != nil {
|
||||
log.Errorf("error stopping API server: %v", err)
|
||||
if shutdownErr == nil {
|
||||
shutdownErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
return shutdownErr
|
||||
}
|
||||
|
||||
func (s *Service) ensureAuthDir() error {
|
||||
info, err := os.Stat(s.cfg.AuthDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if mkErr := os.MkdirAll(s.cfg.AuthDir, 0o755); mkErr != nil {
|
||||
return fmt.Errorf("cliproxy: failed to create auth directory %s: %w", s.cfg.AuthDir, mkErr)
|
||||
}
|
||||
log.Infof("created missing auth directory: %s", s.cfg.AuthDir)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("cliproxy: error checking auth directory %s: %w", s.cfg.AuthDir, err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return fmt.Errorf("cliproxy: auth path exists but is not a directory: %s", s.cfg.AuthDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) syncCoreAuthFromClients(ctx context.Context, _ map[string]any) { _ = ctx }
|
||||
|
||||
func (s *Service) startRefreshLoop() {
|
||||
// legacy refresh loop disabled; core auth manager handles auto refresh
|
||||
}
|
||||
|
||||
func (s *Service) refreshTokens(ctx context.Context) { _ = ctx /* no-op */ }
|
||||
|
||||
func (s *Service) snapshotFileClients() map[string]any { return nil }
|
||||
|
||||
// persistClients deprecated: no legacy clients remain
|
||||
func (s *Service) persistClients() {}
|
||||
|
||||
// refreshCachesFromCombined deprecated: no legacy clients remain
|
||||
func (s *Service) refreshCachesFromCombined(_ map[string]any) {}
|
||||
|
||||
// combineClients deprecated
|
||||
|
||||
func (s *Service) refreshWithManager(ctx context.Context, provider, filePath string, storage baseauth.TokenStorage, metadata map[string]string) {
|
||||
_ = ctx
|
||||
_ = provider
|
||||
_ = filePath
|
||||
_ = storage
|
||||
_ = metadata
|
||||
// legacy file-backed refresh was replaced by core auth manager auto refresh
|
||||
}
|
||||
|
||||
// syncCoreAuthFromAuths registers or updates core auths and disables missing ones.
|
||||
func (s *Service) syncCoreAuthFromAuths(ctx context.Context, auths []*coreauth.Auth) {
|
||||
if s.coreManager == nil {
|
||||
return
|
||||
}
|
||||
seen := make(map[string]struct{}, len(auths))
|
||||
for _, a := range auths {
|
||||
if a == nil || a.ID == "" {
|
||||
continue
|
||||
}
|
||||
seen[a.ID] = struct{}{}
|
||||
// Ensure executors registered per provider: prefer stateless where available.
|
||||
switch strings.ToLower(a.Provider) {
|
||||
case "gemini":
|
||||
s.coreManager.RegisterExecutor(executor.NewGeminiExecutor())
|
||||
case "gemini-cli":
|
||||
s.coreManager.RegisterExecutor(executor.NewGeminiCLIExecutor())
|
||||
case "gemini-web":
|
||||
s.coreManager.RegisterExecutor(executor.NewGeminiWebExecutor(s.cfg))
|
||||
case "claude":
|
||||
s.coreManager.RegisterExecutor(executor.NewClaudeExecutor())
|
||||
case "codex":
|
||||
s.coreManager.RegisterExecutor(executor.NewCodexExecutor())
|
||||
case "qwen":
|
||||
s.coreManager.RegisterExecutor(executor.NewQwenExecutor())
|
||||
default:
|
||||
s.coreManager.RegisterExecutor(executor.NewOpenAICompatExecutor("openai-compatibility"))
|
||||
}
|
||||
|
||||
// Preserve existing temporal fields
|
||||
if existing, ok := s.coreManager.GetByID(a.ID); ok && existing != nil {
|
||||
a.CreatedAt = existing.CreatedAt
|
||||
a.LastRefreshedAt = existing.LastRefreshedAt
|
||||
a.NextRefreshAfter = existing.NextRefreshAfter
|
||||
}
|
||||
// Ensure model registry reflects core auth identity
|
||||
s.registerModelsForAuth(a)
|
||||
if _, ok := s.coreManager.GetByID(a.ID); ok {
|
||||
s.coreManager.Update(ctx, a)
|
||||
} else {
|
||||
s.coreManager.Register(ctx, a)
|
||||
}
|
||||
}
|
||||
// Disable removed auths
|
||||
for _, stored := range s.coreManager.List() {
|
||||
if stored == nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[stored.ID]; ok {
|
||||
continue
|
||||
}
|
||||
stored.Disabled = true
|
||||
stored.Status = coreauth.StatusDisabled
|
||||
// Unregister from model registry when disabled
|
||||
GlobalModelRegistry().UnregisterClient(stored.ID)
|
||||
s.coreManager.Update(ctx, stored)
|
||||
}
|
||||
}
|
||||
|
||||
// registerModelsForAuth (re)binds provider models in the global registry using the core auth ID as client identifier.
|
||||
func (s *Service) registerModelsForAuth(a *coreauth.Auth) {
|
||||
if a == nil || a.ID == "" {
|
||||
return
|
||||
}
|
||||
// Unregister legacy client ID (if present) to avoid double counting
|
||||
if a.Runtime != nil {
|
||||
if idGetter, ok := a.Runtime.(interface{ GetClientID() string }); ok {
|
||||
if rid := idGetter.GetClientID(); rid != "" && rid != a.ID {
|
||||
GlobalModelRegistry().UnregisterClient(rid)
|
||||
}
|
||||
}
|
||||
}
|
||||
provider := strings.ToLower(a.Provider)
|
||||
var models []*ModelInfo
|
||||
switch provider {
|
||||
case "gemini":
|
||||
models = registry.GetGeminiModels()
|
||||
case "gemini-cli":
|
||||
models = registry.GetGeminiCLIModels()
|
||||
case "gemini-web":
|
||||
models = geminiwebclient.GetGeminiWebAliasedModels()
|
||||
case "claude":
|
||||
models = registry.GetClaudeModels()
|
||||
case "codex":
|
||||
models = registry.GetOpenAIModels()
|
||||
case "qwen":
|
||||
models = registry.GetQwenModels()
|
||||
default:
|
||||
// Handle OpenAI-compatibility providers by name using config
|
||||
if s.cfg != nil {
|
||||
// When provider is normalized to "openai-compatibility", read the original name from attributes.
|
||||
compatName := a.Provider
|
||||
if strings.EqualFold(compatName, "openai-compatibility") {
|
||||
if a.Attributes != nil && a.Attributes["compat_name"] != "" {
|
||||
compatName = a.Attributes["compat_name"]
|
||||
}
|
||||
}
|
||||
for i := range s.cfg.OpenAICompatibility {
|
||||
compat := &s.cfg.OpenAICompatibility[i]
|
||||
if strings.EqualFold(compat.Name, compatName) {
|
||||
// Convert compatibility models to registry models
|
||||
ms := make([]*ModelInfo, 0, len(compat.Models))
|
||||
for j := range compat.Models {
|
||||
m := compat.Models[j]
|
||||
ms = append(ms, &ModelInfo{
|
||||
ID: m.Alias,
|
||||
Object: "model",
|
||||
Created: time.Now().Unix(),
|
||||
OwnedBy: compat.Name,
|
||||
Type: "openai-compatibility",
|
||||
DisplayName: m.Name,
|
||||
})
|
||||
}
|
||||
// Register and return
|
||||
if len(ms) > 0 {
|
||||
GlobalModelRegistry().RegisterClient(a.ID, a.Provider, ms)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(models) > 0 {
|
||||
GlobalModelRegistry().RegisterClient(a.ID, a.Provider, models)
|
||||
}
|
||||
}
|
||||
82
sdk/cliproxy/types.go
Normal file
82
sdk/cliproxy/types.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package cliproxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
// TokenClientProvider loads clients backed by stored authentication tokens.
|
||||
type TokenClientProvider interface {
|
||||
Load(ctx context.Context, cfg *config.Config) (*TokenClientResult, error)
|
||||
}
|
||||
|
||||
// TokenClientResult represents clients generated from persisted tokens.
|
||||
type TokenClientResult struct {
|
||||
SuccessfulAuthed int
|
||||
}
|
||||
|
||||
// APIKeyClientProvider loads clients backed directly by configured API keys.
|
||||
type APIKeyClientProvider interface {
|
||||
Load(ctx context.Context, cfg *config.Config) (*APIKeyClientResult, error)
|
||||
}
|
||||
|
||||
// APIKeyClientResult contains API key based clients along with type counts.
|
||||
type APIKeyClientResult struct {
|
||||
GeminiKeyCount int
|
||||
ClaudeKeyCount int
|
||||
CodexKeyCount int
|
||||
OpenAICompatCount int
|
||||
}
|
||||
|
||||
// WatcherFactory creates a watcher for configuration and token changes.
|
||||
// The reload callback now only receives the updated configuration.
|
||||
type WatcherFactory func(configPath, authDir string, reload func(*config.Config)) (*WatcherWrapper, error)
|
||||
|
||||
// WatcherWrapper exposes the subset of watcher methods required by the SDK.
|
||||
type WatcherWrapper struct {
|
||||
start func(ctx context.Context) error
|
||||
stop func() error
|
||||
|
||||
setConfig func(cfg *config.Config)
|
||||
snapshotAuths func() []*coreauth.Auth
|
||||
}
|
||||
|
||||
// Start proxies to the underlying watcher Start implementation.
|
||||
func (w *WatcherWrapper) Start(ctx context.Context) error {
|
||||
if w == nil || w.start == nil {
|
||||
return nil
|
||||
}
|
||||
return w.start(ctx)
|
||||
}
|
||||
|
||||
// Stop proxies to the underlying watcher Stop implementation.
|
||||
func (w *WatcherWrapper) Stop() error {
|
||||
if w == nil || w.stop == nil {
|
||||
return nil
|
||||
}
|
||||
return w.stop()
|
||||
}
|
||||
|
||||
// SetConfig updates the watcher configuration cache.
|
||||
func (w *WatcherWrapper) SetConfig(cfg *config.Config) {
|
||||
if w == nil || w.setConfig == nil {
|
||||
return
|
||||
}
|
||||
w.setConfig(cfg)
|
||||
}
|
||||
|
||||
// SetClients updates the watcher file-backed clients registry.
|
||||
// SetClients and SetAPIKeyClients removed; watcher manages its own caches
|
||||
|
||||
// SnapshotClients returns the current combined clients snapshot from the underlying watcher.
|
||||
// SnapshotClients removed; use SnapshotAuths
|
||||
|
||||
// SnapshotAuths returns the current auth entries derived from legacy clients.
|
||||
func (w *WatcherWrapper) SnapshotAuths() []*coreauth.Auth {
|
||||
if w == nil || w.snapshotAuths == nil {
|
||||
return nil
|
||||
}
|
||||
return w.snapshotAuths()
|
||||
}
|
||||
29
sdk/cliproxy/watcher.go
Normal file
29
sdk/cliproxy/watcher.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package cliproxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/watcher"
|
||||
coreauth "github.com/router-for-me/CLIProxyAPI/v6/sdk/cliproxy/auth"
|
||||
)
|
||||
|
||||
func defaultWatcherFactory(configPath, authDir string, reload func(*config.Config)) (*WatcherWrapper, error) {
|
||||
w, err := watcher.NewWatcher(configPath, authDir, reload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &WatcherWrapper{
|
||||
start: func(ctx context.Context) error {
|
||||
return w.Start(ctx)
|
||||
},
|
||||
stop: func() error {
|
||||
return w.Stop()
|
||||
},
|
||||
setConfig: func(cfg *config.Config) {
|
||||
w.SetConfig(cfg)
|
||||
},
|
||||
snapshotAuths: func() []*coreauth.Auth { return w.SnapshotCoreAuths() },
|
||||
}, nil
|
||||
}
|
||||
Reference in New Issue
Block a user