mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-02 20:40:52 +08:00
Refactor model management to include an optional `prefix` field for model credentials, enabling better namespace handling. Update affected configuration files, APIs, and handlers to support prefix normalization and routing. Remove unused OpenAI compatibility provider logic to simplify processing.
1408 lines
45 KiB
Go
1408 lines
45 KiB
Go
// Package config provides configuration management for the CLI Proxy API server.
|
|
// It handles loading and parsing YAML configuration files, and provides structured
|
|
// access to application settings including server port, authentication directory,
|
|
// debug settings, proxy configuration, and API keys.
|
|
package config
|
|
|
|
import (
|
|
"bytes"
|
|
"errors"
|
|
"fmt"
|
|
"os"
|
|
"strings"
|
|
"syscall"
|
|
|
|
"github.com/router-for-me/CLIProxyAPI/v6/sdk/config"
|
|
"golang.org/x/crypto/bcrypt"
|
|
"gopkg.in/yaml.v3"
|
|
)
|
|
|
|
const DefaultPanelGitHubRepository = "https://github.com/router-for-me/Cli-Proxy-API-Management-Center"
|
|
|
|
// Config represents the application's configuration, loaded from a YAML file.
|
|
type Config struct {
|
|
config.SDKConfig `yaml:",inline"`
|
|
// Host is the network host/interface on which the API server will bind.
|
|
// Default is empty ("") to bind all interfaces (IPv4 + IPv6). Use "127.0.0.1" or "localhost" for local-only access.
|
|
Host string `yaml:"host" json:"-"`
|
|
// Port is the network port on which the API server will listen.
|
|
Port int `yaml:"port" json:"-"`
|
|
|
|
// TLS config controls HTTPS server settings.
|
|
TLS TLSConfig `yaml:"tls" json:"tls"`
|
|
|
|
// RemoteManagement nests management-related options under 'remote-management'.
|
|
RemoteManagement RemoteManagement `yaml:"remote-management" json:"-"`
|
|
|
|
// AuthDir is the directory where authentication token files are stored.
|
|
AuthDir string `yaml:"auth-dir" json:"-"`
|
|
|
|
// Debug enables or disables debug-level logging and other debug features.
|
|
Debug bool `yaml:"debug" json:"debug"`
|
|
|
|
// LoggingToFile controls whether application logs are written to rotating files or stdout.
|
|
LoggingToFile bool `yaml:"logging-to-file" json:"logging-to-file"`
|
|
|
|
// UsageStatisticsEnabled toggles in-memory usage aggregation; when false, usage data is discarded.
|
|
UsageStatisticsEnabled bool `yaml:"usage-statistics-enabled" json:"usage-statistics-enabled"`
|
|
|
|
// DisableCooling disables quota cooldown scheduling when true.
|
|
DisableCooling bool `yaml:"disable-cooling" json:"disable-cooling"`
|
|
|
|
// RequestRetry defines the retry times when the request failed.
|
|
RequestRetry int `yaml:"request-retry" json:"request-retry"`
|
|
// MaxRetryInterval defines the maximum wait time in seconds before retrying a cooled-down credential.
|
|
MaxRetryInterval int `yaml:"max-retry-interval" json:"max-retry-interval"`
|
|
|
|
// QuotaExceeded defines the behavior when a quota is exceeded.
|
|
QuotaExceeded QuotaExceeded `yaml:"quota-exceeded" json:"quota-exceeded"`
|
|
|
|
// WebsocketAuth enables or disables authentication for the WebSocket API.
|
|
WebsocketAuth bool `yaml:"ws-auth" json:"ws-auth"`
|
|
|
|
// GeminiKey defines Gemini API key configurations with optional routing overrides.
|
|
GeminiKey []GeminiKey `yaml:"gemini-api-key" json:"gemini-api-key"`
|
|
|
|
// Codex defines a list of Codex API key configurations as specified in the YAML configuration file.
|
|
CodexKey []CodexKey `yaml:"codex-api-key" json:"codex-api-key"`
|
|
|
|
// ClaudeKey defines a list of Claude API key configurations as specified in the YAML configuration file.
|
|
ClaudeKey []ClaudeKey `yaml:"claude-api-key" json:"claude-api-key"`
|
|
|
|
// OpenAICompatibility defines OpenAI API compatibility configurations for external providers.
|
|
OpenAICompatibility []OpenAICompatibility `yaml:"openai-compatibility" json:"openai-compatibility"`
|
|
|
|
// VertexCompatAPIKey defines Vertex AI-compatible API key configurations for third-party providers.
|
|
// Used for services that use Vertex AI-style paths but with simple API key authentication.
|
|
VertexCompatAPIKey []VertexCompatKey `yaml:"vertex-api-key" json:"vertex-api-key"`
|
|
|
|
// AmpCode contains Amp CLI upstream configuration, management restrictions, and model mappings.
|
|
AmpCode AmpCode `yaml:"ampcode" json:"ampcode"`
|
|
|
|
// OAuthExcludedModels defines per-provider global model exclusions applied to OAuth/file-backed auth entries.
|
|
OAuthExcludedModels map[string][]string `yaml:"oauth-excluded-models,omitempty" json:"oauth-excluded-models,omitempty"`
|
|
|
|
// Payload defines default and override rules for provider payload parameters.
|
|
Payload PayloadConfig `yaml:"payload" json:"payload"`
|
|
|
|
legacyMigrationPending bool `yaml:"-" json:"-"`
|
|
}
|
|
|
|
// TLSConfig holds HTTPS server settings.
|
|
type TLSConfig struct {
|
|
// Enable toggles HTTPS server mode.
|
|
Enable bool `yaml:"enable" json:"enable"`
|
|
// Cert is the path to the TLS certificate file.
|
|
Cert string `yaml:"cert" json:"cert"`
|
|
// Key is the path to the TLS private key file.
|
|
Key string `yaml:"key" json:"key"`
|
|
}
|
|
|
|
// RemoteManagement holds management API configuration under 'remote-management'.
|
|
type RemoteManagement struct {
|
|
// AllowRemote toggles remote (non-localhost) access to management API.
|
|
AllowRemote bool `yaml:"allow-remote"`
|
|
// SecretKey is the management key (plaintext or bcrypt hashed). YAML key intentionally 'secret-key'.
|
|
SecretKey string `yaml:"secret-key"`
|
|
// DisableControlPanel skips serving and syncing the bundled management UI when true.
|
|
DisableControlPanel bool `yaml:"disable-control-panel"`
|
|
// PanelGitHubRepository overrides the GitHub repository used to fetch the management panel asset.
|
|
// Accepts either a repository URL (https://github.com/org/repo) or an API releases endpoint.
|
|
PanelGitHubRepository string `yaml:"panel-github-repository"`
|
|
}
|
|
|
|
// QuotaExceeded defines the behavior when API quota limits are exceeded.
|
|
// It provides configuration options for automatic failover mechanisms.
|
|
type QuotaExceeded struct {
|
|
// SwitchProject indicates whether to automatically switch to another project when a quota is exceeded.
|
|
SwitchProject bool `yaml:"switch-project" json:"switch-project"`
|
|
|
|
// SwitchPreviewModel indicates whether to automatically switch to a preview model when a quota is exceeded.
|
|
SwitchPreviewModel bool `yaml:"switch-preview-model" json:"switch-preview-model"`
|
|
}
|
|
|
|
// AmpModelMapping defines a model name mapping for Amp CLI requests.
|
|
// When Amp requests a model that isn't available locally, this mapping
|
|
// allows routing to an alternative model that IS available.
|
|
type AmpModelMapping struct {
|
|
// From is the model name that Amp CLI requests (e.g., "claude-opus-4.5").
|
|
From string `yaml:"from" json:"from"`
|
|
|
|
// To is the target model name to route to (e.g., "claude-sonnet-4").
|
|
// The target model must have available providers in the registry.
|
|
To string `yaml:"to" json:"to"`
|
|
}
|
|
|
|
// AmpCode groups Amp CLI integration settings including upstream routing,
|
|
// optional overrides, management route restrictions, and model fallback mappings.
|
|
type AmpCode struct {
|
|
// UpstreamURL defines the upstream Amp control plane used for non-provider calls.
|
|
UpstreamURL string `yaml:"upstream-url" json:"upstream-url"`
|
|
|
|
// UpstreamAPIKey optionally overrides the Authorization header when proxying Amp upstream calls.
|
|
UpstreamAPIKey string `yaml:"upstream-api-key" json:"upstream-api-key"`
|
|
|
|
// RestrictManagementToLocalhost restricts Amp management routes (/api/user, /api/threads, etc.)
|
|
// to only accept connections from localhost (127.0.0.1, ::1). When true, prevents drive-by
|
|
// browser attacks and remote access to management endpoints. Default: false (API key auth is sufficient).
|
|
RestrictManagementToLocalhost bool `yaml:"restrict-management-to-localhost" json:"restrict-management-to-localhost"`
|
|
|
|
// ModelMappings defines model name mappings for Amp CLI requests.
|
|
// When Amp requests a model that isn't available locally, these mappings
|
|
// allow routing to an alternative model that IS available.
|
|
ModelMappings []AmpModelMapping `yaml:"model-mappings" json:"model-mappings"`
|
|
|
|
// ForceModelMappings when true, model mappings take precedence over local API keys.
|
|
// When false (default), local API keys are used first if available.
|
|
ForceModelMappings bool `yaml:"force-model-mappings" json:"force-model-mappings"`
|
|
}
|
|
|
|
// PayloadConfig defines default and override parameter rules applied to provider payloads.
|
|
type PayloadConfig struct {
|
|
// Default defines rules that only set parameters when they are missing in the payload.
|
|
Default []PayloadRule `yaml:"default" json:"default"`
|
|
// Override defines rules that always set parameters, overwriting any existing values.
|
|
Override []PayloadRule `yaml:"override" json:"override"`
|
|
}
|
|
|
|
// PayloadRule describes a single rule targeting a list of models with parameter updates.
|
|
type PayloadRule struct {
|
|
// Models lists model entries with name pattern and protocol constraint.
|
|
Models []PayloadModelRule `yaml:"models" json:"models"`
|
|
// Params maps JSON paths (gjson/sjson syntax) to values written into the payload.
|
|
Params map[string]any `yaml:"params" json:"params"`
|
|
}
|
|
|
|
// PayloadModelRule ties a model name pattern to a specific translator protocol.
|
|
type PayloadModelRule struct {
|
|
// Name is the model name or wildcard pattern (e.g., "gpt-*", "*-5", "gemini-*-pro").
|
|
Name string `yaml:"name" json:"name"`
|
|
// Protocol restricts the rule to a specific translator format (e.g., "gemini", "responses").
|
|
Protocol string `yaml:"protocol" json:"protocol"`
|
|
}
|
|
|
|
// ClaudeKey represents the configuration for a Claude API key,
|
|
// including the API key itself and an optional base URL for the API endpoint.
|
|
type ClaudeKey struct {
|
|
// APIKey is the authentication key for accessing Claude API services.
|
|
APIKey string `yaml:"api-key" json:"api-key"`
|
|
|
|
// Prefix optionally namespaces models for this credential (e.g., "teamA/claude-sonnet-4").
|
|
Prefix string `yaml:"prefix,omitempty" json:"prefix,omitempty"`
|
|
|
|
// BaseURL is the base URL for the Claude API endpoint.
|
|
// If empty, the default Claude API URL will be used.
|
|
BaseURL string `yaml:"base-url" json:"base-url"`
|
|
|
|
// ProxyURL overrides the global proxy setting for this API key if provided.
|
|
ProxyURL string `yaml:"proxy-url" json:"proxy-url"`
|
|
|
|
// Models defines upstream model names and aliases for request routing.
|
|
Models []ClaudeModel `yaml:"models" json:"models"`
|
|
|
|
// Headers optionally adds extra HTTP headers for requests sent with this key.
|
|
Headers map[string]string `yaml:"headers,omitempty" json:"headers,omitempty"`
|
|
|
|
// ExcludedModels lists model IDs that should be excluded for this provider.
|
|
ExcludedModels []string `yaml:"excluded-models,omitempty" json:"excluded-models,omitempty"`
|
|
}
|
|
|
|
// ClaudeModel describes a mapping between an alias and the actual upstream model name.
|
|
type ClaudeModel struct {
|
|
// Name is the upstream model identifier used when issuing requests.
|
|
Name string `yaml:"name" json:"name"`
|
|
|
|
// Alias is the client-facing model name that maps to Name.
|
|
Alias string `yaml:"alias" json:"alias"`
|
|
}
|
|
|
|
// CodexKey represents the configuration for a Codex API key,
|
|
// including the API key itself and an optional base URL for the API endpoint.
|
|
type CodexKey struct {
|
|
// APIKey is the authentication key for accessing Codex API services.
|
|
APIKey string `yaml:"api-key" json:"api-key"`
|
|
|
|
// Prefix optionally namespaces models for this credential (e.g., "teamA/gpt-5-codex").
|
|
Prefix string `yaml:"prefix,omitempty" json:"prefix,omitempty"`
|
|
|
|
// BaseURL is the base URL for the Codex API endpoint.
|
|
// If empty, the default Codex API URL will be used.
|
|
BaseURL string `yaml:"base-url" json:"base-url"`
|
|
|
|
// ProxyURL overrides the global proxy setting for this API key if provided.
|
|
ProxyURL string `yaml:"proxy-url" json:"proxy-url"`
|
|
|
|
// Headers optionally adds extra HTTP headers for requests sent with this key.
|
|
Headers map[string]string `yaml:"headers,omitempty" json:"headers,omitempty"`
|
|
|
|
// ExcludedModels lists model IDs that should be excluded for this provider.
|
|
ExcludedModels []string `yaml:"excluded-models,omitempty" json:"excluded-models,omitempty"`
|
|
}
|
|
|
|
// GeminiKey represents the configuration for a Gemini API key,
|
|
// including optional overrides for upstream base URL, proxy routing, and headers.
|
|
type GeminiKey struct {
|
|
// APIKey is the authentication key for accessing Gemini API services.
|
|
APIKey string `yaml:"api-key" json:"api-key"`
|
|
|
|
// Prefix optionally namespaces models for this credential (e.g., "teamA/gemini-3-pro-preview").
|
|
Prefix string `yaml:"prefix,omitempty" json:"prefix,omitempty"`
|
|
|
|
// BaseURL optionally overrides the Gemini API endpoint.
|
|
BaseURL string `yaml:"base-url,omitempty" json:"base-url,omitempty"`
|
|
|
|
// ProxyURL optionally overrides the global proxy for this API key.
|
|
ProxyURL string `yaml:"proxy-url,omitempty" json:"proxy-url,omitempty"`
|
|
|
|
// Headers optionally adds extra HTTP headers for requests sent with this key.
|
|
Headers map[string]string `yaml:"headers,omitempty" json:"headers,omitempty"`
|
|
|
|
// ExcludedModels lists model IDs that should be excluded for this provider.
|
|
ExcludedModels []string `yaml:"excluded-models,omitempty" json:"excluded-models,omitempty"`
|
|
}
|
|
|
|
// OpenAICompatibility represents the configuration for OpenAI API compatibility
|
|
// with external providers, allowing model aliases to be routed through OpenAI API format.
|
|
type OpenAICompatibility struct {
|
|
// Name is the identifier for this OpenAI compatibility configuration.
|
|
Name string `yaml:"name" json:"name"`
|
|
|
|
// Prefix optionally namespaces model aliases for this provider (e.g., "teamA/kimi-k2").
|
|
Prefix string `yaml:"prefix,omitempty" json:"prefix,omitempty"`
|
|
|
|
// BaseURL is the base URL for the external OpenAI-compatible API endpoint.
|
|
BaseURL string `yaml:"base-url" json:"base-url"`
|
|
|
|
// APIKeyEntries defines API keys with optional per-key proxy configuration.
|
|
APIKeyEntries []OpenAICompatibilityAPIKey `yaml:"api-key-entries,omitempty" json:"api-key-entries,omitempty"`
|
|
|
|
// Models defines the model configurations including aliases for routing.
|
|
Models []OpenAICompatibilityModel `yaml:"models" json:"models"`
|
|
|
|
// Headers optionally adds extra HTTP headers for requests sent to this provider.
|
|
Headers map[string]string `yaml:"headers,omitempty" json:"headers,omitempty"`
|
|
}
|
|
|
|
// OpenAICompatibilityAPIKey represents an API key configuration with optional proxy setting.
|
|
type OpenAICompatibilityAPIKey struct {
|
|
// APIKey is the authentication key for accessing the external API services.
|
|
APIKey string `yaml:"api-key" json:"api-key"`
|
|
|
|
// ProxyURL overrides the global proxy setting for this API key if provided.
|
|
ProxyURL string `yaml:"proxy-url,omitempty" json:"proxy-url,omitempty"`
|
|
}
|
|
|
|
// OpenAICompatibilityModel represents a model configuration for OpenAI compatibility,
|
|
// including the actual model name and its alias for API routing.
|
|
type OpenAICompatibilityModel struct {
|
|
// Name is the actual model name used by the external provider.
|
|
Name string `yaml:"name" json:"name"`
|
|
|
|
// Alias is the model name alias that clients will use to reference this model.
|
|
Alias string `yaml:"alias" json:"alias"`
|
|
}
|
|
|
|
// LoadConfig reads a YAML configuration file from the given path,
|
|
// unmarshals it into a Config struct, applies environment variable overrides,
|
|
// and returns it.
|
|
//
|
|
// Parameters:
|
|
// - configFile: The path to the YAML configuration file
|
|
//
|
|
// Returns:
|
|
// - *Config: The loaded configuration
|
|
// - error: An error if the configuration could not be loaded
|
|
func LoadConfig(configFile string) (*Config, error) {
|
|
return LoadConfigOptional(configFile, false)
|
|
}
|
|
|
|
// LoadConfigOptional reads YAML from configFile.
|
|
// If optional is true and the file is missing, it returns an empty Config.
|
|
// If optional is true and the file is empty or invalid, it returns an empty Config.
|
|
func LoadConfigOptional(configFile string, optional bool) (*Config, error) {
|
|
// Read the entire configuration file into memory.
|
|
data, err := os.ReadFile(configFile)
|
|
if err != nil {
|
|
if optional {
|
|
if os.IsNotExist(err) || errors.Is(err, syscall.EISDIR) {
|
|
// Missing and optional: return empty config (cloud deploy standby).
|
|
return &Config{}, nil
|
|
}
|
|
}
|
|
return nil, fmt.Errorf("failed to read config file: %w", err)
|
|
}
|
|
|
|
// In cloud deploy mode (optional=true), if file is empty or contains only whitespace, return empty config.
|
|
if optional && len(data) == 0 {
|
|
return &Config{}, nil
|
|
}
|
|
|
|
// Unmarshal the YAML data into the Config struct.
|
|
var cfg Config
|
|
// Set defaults before unmarshal so that absent keys keep defaults.
|
|
cfg.Host = "" // Default empty: binds to all interfaces (IPv4 + IPv6)
|
|
cfg.LoggingToFile = false
|
|
cfg.UsageStatisticsEnabled = false
|
|
cfg.DisableCooling = false
|
|
cfg.AmpCode.RestrictManagementToLocalhost = false // Default to false: API key auth is sufficient
|
|
cfg.RemoteManagement.PanelGitHubRepository = DefaultPanelGitHubRepository
|
|
if err = yaml.Unmarshal(data, &cfg); err != nil {
|
|
if optional {
|
|
// In cloud deploy mode, if YAML parsing fails, return empty config instead of error.
|
|
return &Config{}, nil
|
|
}
|
|
return nil, fmt.Errorf("failed to parse config file: %w", err)
|
|
}
|
|
|
|
var legacy legacyConfigData
|
|
if errLegacy := yaml.Unmarshal(data, &legacy); errLegacy == nil {
|
|
if cfg.migrateLegacyGeminiKeys(legacy.LegacyGeminiKeys) {
|
|
cfg.legacyMigrationPending = true
|
|
}
|
|
if cfg.migrateLegacyOpenAICompatibilityKeys(legacy.OpenAICompat) {
|
|
cfg.legacyMigrationPending = true
|
|
}
|
|
if cfg.migrateLegacyAmpConfig(&legacy) {
|
|
cfg.legacyMigrationPending = true
|
|
}
|
|
}
|
|
|
|
// Hash remote management key if plaintext is detected (nested)
|
|
// We consider a value to be already hashed if it looks like a bcrypt hash ($2a$, $2b$, or $2y$ prefix).
|
|
if cfg.RemoteManagement.SecretKey != "" && !looksLikeBcrypt(cfg.RemoteManagement.SecretKey) {
|
|
hashed, errHash := hashSecret(cfg.RemoteManagement.SecretKey)
|
|
if errHash != nil {
|
|
return nil, fmt.Errorf("failed to hash remote management key: %w", errHash)
|
|
}
|
|
cfg.RemoteManagement.SecretKey = hashed
|
|
|
|
// Persist the hashed value back to the config file to avoid re-hashing on next startup.
|
|
// Preserve YAML comments and ordering; update only the nested key.
|
|
_ = SaveConfigPreserveCommentsUpdateNestedScalar(configFile, []string{"remote-management", "secret-key"}, hashed)
|
|
}
|
|
|
|
cfg.RemoteManagement.PanelGitHubRepository = strings.TrimSpace(cfg.RemoteManagement.PanelGitHubRepository)
|
|
if cfg.RemoteManagement.PanelGitHubRepository == "" {
|
|
cfg.RemoteManagement.PanelGitHubRepository = DefaultPanelGitHubRepository
|
|
}
|
|
|
|
// Sync request authentication providers with inline API keys for backwards compatibility.
|
|
syncInlineAccessProvider(&cfg)
|
|
|
|
// Sanitize Gemini API key configuration and migrate legacy entries.
|
|
cfg.SanitizeGeminiKeys()
|
|
|
|
// Sanitize Vertex-compatible API keys: drop entries without base-url
|
|
cfg.SanitizeVertexCompatKeys()
|
|
|
|
// Sanitize Codex keys: drop entries without base-url
|
|
cfg.SanitizeCodexKeys()
|
|
|
|
// Sanitize Claude key headers
|
|
cfg.SanitizeClaudeKeys()
|
|
|
|
// Sanitize OpenAI compatibility providers: drop entries without base-url
|
|
cfg.SanitizeOpenAICompatibility()
|
|
|
|
// Normalize OAuth provider model exclusion map.
|
|
cfg.OAuthExcludedModels = NormalizeOAuthExcludedModels(cfg.OAuthExcludedModels)
|
|
|
|
if cfg.legacyMigrationPending {
|
|
fmt.Println("Detected legacy configuration keys, attempting to persist the normalized config...")
|
|
if !optional && configFile != "" {
|
|
if err := SaveConfigPreserveComments(configFile, &cfg); err != nil {
|
|
return nil, fmt.Errorf("failed to persist migrated legacy config: %w", err)
|
|
}
|
|
fmt.Println("Legacy configuration normalized and persisted.")
|
|
} else {
|
|
fmt.Println("Legacy configuration normalized in memory; persistence skipped.")
|
|
}
|
|
}
|
|
|
|
// Return the populated configuration struct.
|
|
return &cfg, nil
|
|
}
|
|
|
|
// SanitizeOpenAICompatibility removes OpenAI-compatibility provider entries that are
|
|
// not actionable, specifically those missing a BaseURL. It trims whitespace before
|
|
// evaluation and preserves the relative order of remaining entries.
|
|
func (cfg *Config) SanitizeOpenAICompatibility() {
|
|
if cfg == nil || len(cfg.OpenAICompatibility) == 0 {
|
|
return
|
|
}
|
|
out := make([]OpenAICompatibility, 0, len(cfg.OpenAICompatibility))
|
|
for i := range cfg.OpenAICompatibility {
|
|
e := cfg.OpenAICompatibility[i]
|
|
e.Name = strings.TrimSpace(e.Name)
|
|
e.Prefix = normalizeModelPrefix(e.Prefix)
|
|
e.BaseURL = strings.TrimSpace(e.BaseURL)
|
|
e.Headers = NormalizeHeaders(e.Headers)
|
|
if e.BaseURL == "" {
|
|
// Skip providers with no base-url; treated as removed
|
|
continue
|
|
}
|
|
out = append(out, e)
|
|
}
|
|
cfg.OpenAICompatibility = out
|
|
}
|
|
|
|
// SanitizeCodexKeys removes Codex API key entries missing a BaseURL.
|
|
// It trims whitespace and preserves order for remaining entries.
|
|
func (cfg *Config) SanitizeCodexKeys() {
|
|
if cfg == nil || len(cfg.CodexKey) == 0 {
|
|
return
|
|
}
|
|
out := make([]CodexKey, 0, len(cfg.CodexKey))
|
|
for i := range cfg.CodexKey {
|
|
e := cfg.CodexKey[i]
|
|
e.Prefix = normalizeModelPrefix(e.Prefix)
|
|
e.BaseURL = strings.TrimSpace(e.BaseURL)
|
|
e.Headers = NormalizeHeaders(e.Headers)
|
|
e.ExcludedModels = NormalizeExcludedModels(e.ExcludedModels)
|
|
if e.BaseURL == "" {
|
|
continue
|
|
}
|
|
out = append(out, e)
|
|
}
|
|
cfg.CodexKey = out
|
|
}
|
|
|
|
// SanitizeClaudeKeys normalizes headers for Claude credentials.
|
|
func (cfg *Config) SanitizeClaudeKeys() {
|
|
if cfg == nil || len(cfg.ClaudeKey) == 0 {
|
|
return
|
|
}
|
|
for i := range cfg.ClaudeKey {
|
|
entry := &cfg.ClaudeKey[i]
|
|
entry.Prefix = normalizeModelPrefix(entry.Prefix)
|
|
entry.Headers = NormalizeHeaders(entry.Headers)
|
|
entry.ExcludedModels = NormalizeExcludedModels(entry.ExcludedModels)
|
|
}
|
|
}
|
|
|
|
// SanitizeGeminiKeys deduplicates and normalizes Gemini credentials.
|
|
func (cfg *Config) SanitizeGeminiKeys() {
|
|
if cfg == nil {
|
|
return
|
|
}
|
|
|
|
seen := make(map[string]struct{}, len(cfg.GeminiKey))
|
|
out := cfg.GeminiKey[:0]
|
|
for i := range cfg.GeminiKey {
|
|
entry := cfg.GeminiKey[i]
|
|
entry.APIKey = strings.TrimSpace(entry.APIKey)
|
|
if entry.APIKey == "" {
|
|
continue
|
|
}
|
|
entry.Prefix = normalizeModelPrefix(entry.Prefix)
|
|
entry.BaseURL = strings.TrimSpace(entry.BaseURL)
|
|
entry.ProxyURL = strings.TrimSpace(entry.ProxyURL)
|
|
entry.Headers = NormalizeHeaders(entry.Headers)
|
|
entry.ExcludedModels = NormalizeExcludedModels(entry.ExcludedModels)
|
|
if _, exists := seen[entry.APIKey]; exists {
|
|
continue
|
|
}
|
|
seen[entry.APIKey] = struct{}{}
|
|
out = append(out, entry)
|
|
}
|
|
cfg.GeminiKey = out
|
|
}
|
|
|
|
func normalizeModelPrefix(prefix string) string {
|
|
trimmed := strings.TrimSpace(prefix)
|
|
trimmed = strings.Trim(trimmed, "/")
|
|
if trimmed == "" {
|
|
return ""
|
|
}
|
|
if strings.Contains(trimmed, "/") {
|
|
return ""
|
|
}
|
|
return trimmed
|
|
}
|
|
|
|
func syncInlineAccessProvider(cfg *Config) {
|
|
if cfg == nil {
|
|
return
|
|
}
|
|
if len(cfg.APIKeys) == 0 {
|
|
if provider := cfg.ConfigAPIKeyProvider(); provider != nil && len(provider.APIKeys) > 0 {
|
|
cfg.APIKeys = append([]string(nil), provider.APIKeys...)
|
|
}
|
|
}
|
|
cfg.Access.Providers = nil
|
|
}
|
|
|
|
// looksLikeBcrypt returns true if the provided string appears to be a bcrypt hash.
|
|
func looksLikeBcrypt(s string) bool {
|
|
return len(s) > 4 && (s[:4] == "$2a$" || s[:4] == "$2b$" || s[:4] == "$2y$")
|
|
}
|
|
|
|
// NormalizeHeaders trims header keys and values and removes empty pairs.
|
|
func NormalizeHeaders(headers map[string]string) map[string]string {
|
|
if len(headers) == 0 {
|
|
return nil
|
|
}
|
|
clean := make(map[string]string, len(headers))
|
|
for k, v := range headers {
|
|
key := strings.TrimSpace(k)
|
|
val := strings.TrimSpace(v)
|
|
if key == "" || val == "" {
|
|
continue
|
|
}
|
|
clean[key] = val
|
|
}
|
|
if len(clean) == 0 {
|
|
return nil
|
|
}
|
|
return clean
|
|
}
|
|
|
|
// NormalizeExcludedModels trims, lowercases, and deduplicates model exclusion patterns.
|
|
// It preserves the order of first occurrences and drops empty entries.
|
|
func NormalizeExcludedModels(models []string) []string {
|
|
if len(models) == 0 {
|
|
return nil
|
|
}
|
|
seen := make(map[string]struct{}, len(models))
|
|
out := make([]string, 0, len(models))
|
|
for _, raw := range models {
|
|
trimmed := strings.ToLower(strings.TrimSpace(raw))
|
|
if trimmed == "" {
|
|
continue
|
|
}
|
|
if _, exists := seen[trimmed]; exists {
|
|
continue
|
|
}
|
|
seen[trimmed] = struct{}{}
|
|
out = append(out, trimmed)
|
|
}
|
|
if len(out) == 0 {
|
|
return nil
|
|
}
|
|
return out
|
|
}
|
|
|
|
// NormalizeOAuthExcludedModels cleans provider -> excluded models mappings by normalizing provider keys
|
|
// and applying model exclusion normalization to each entry.
|
|
func NormalizeOAuthExcludedModels(entries map[string][]string) map[string][]string {
|
|
if len(entries) == 0 {
|
|
return nil
|
|
}
|
|
out := make(map[string][]string, len(entries))
|
|
for provider, models := range entries {
|
|
key := strings.ToLower(strings.TrimSpace(provider))
|
|
if key == "" {
|
|
continue
|
|
}
|
|
normalized := NormalizeExcludedModels(models)
|
|
if len(normalized) == 0 {
|
|
continue
|
|
}
|
|
out[key] = normalized
|
|
}
|
|
if len(out) == 0 {
|
|
return nil
|
|
}
|
|
return out
|
|
}
|
|
|
|
// hashSecret hashes the given secret using bcrypt.
|
|
func hashSecret(secret string) (string, error) {
|
|
// Use default cost for simplicity.
|
|
hashedBytes, err := bcrypt.GenerateFromPassword([]byte(secret), bcrypt.DefaultCost)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
return string(hashedBytes), nil
|
|
}
|
|
|
|
// SaveConfigPreserveComments writes the config back to YAML while preserving existing comments
|
|
// and key ordering by loading the original file into a yaml.Node tree and updating values in-place.
|
|
func SaveConfigPreserveComments(configFile string, cfg *Config) error {
|
|
persistCfg := sanitizeConfigForPersist(cfg)
|
|
// Load original YAML as a node tree to preserve comments and ordering.
|
|
data, err := os.ReadFile(configFile)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
var original yaml.Node
|
|
if err = yaml.Unmarshal(data, &original); err != nil {
|
|
return err
|
|
}
|
|
if original.Kind != yaml.DocumentNode || len(original.Content) == 0 {
|
|
return fmt.Errorf("invalid yaml document structure")
|
|
}
|
|
if original.Content[0] == nil || original.Content[0].Kind != yaml.MappingNode {
|
|
return fmt.Errorf("expected root mapping node")
|
|
}
|
|
|
|
// Marshal the current cfg to YAML, then unmarshal to a yaml.Node we can merge from.
|
|
rendered, err := yaml.Marshal(persistCfg)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
var generated yaml.Node
|
|
if err = yaml.Unmarshal(rendered, &generated); err != nil {
|
|
return err
|
|
}
|
|
if generated.Kind != yaml.DocumentNode || len(generated.Content) == 0 || generated.Content[0] == nil {
|
|
return fmt.Errorf("invalid generated yaml structure")
|
|
}
|
|
if generated.Content[0].Kind != yaml.MappingNode {
|
|
return fmt.Errorf("expected generated root mapping node")
|
|
}
|
|
|
|
// Remove deprecated sections before merging back the sanitized config.
|
|
removeLegacyAuthBlock(original.Content[0])
|
|
removeLegacyOpenAICompatAPIKeys(original.Content[0])
|
|
removeLegacyAmpKeys(original.Content[0])
|
|
removeLegacyGenerativeLanguageKeys(original.Content[0])
|
|
|
|
pruneMappingToGeneratedKeys(original.Content[0], generated.Content[0], "oauth-excluded-models")
|
|
|
|
// Merge generated into original in-place, preserving comments/order of existing nodes.
|
|
mergeMappingPreserve(original.Content[0], generated.Content[0])
|
|
normalizeCollectionNodeStyles(original.Content[0])
|
|
|
|
// Write back.
|
|
f, err := os.Create(configFile)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() { _ = f.Close() }()
|
|
var buf bytes.Buffer
|
|
enc := yaml.NewEncoder(&buf)
|
|
enc.SetIndent(2)
|
|
if err = enc.Encode(&original); err != nil {
|
|
_ = enc.Close()
|
|
return err
|
|
}
|
|
if err = enc.Close(); err != nil {
|
|
return err
|
|
}
|
|
data = NormalizeCommentIndentation(buf.Bytes())
|
|
_, err = f.Write(data)
|
|
return err
|
|
}
|
|
|
|
func sanitizeConfigForPersist(cfg *Config) *Config {
|
|
if cfg == nil {
|
|
return nil
|
|
}
|
|
clone := *cfg
|
|
clone.SDKConfig = cfg.SDKConfig
|
|
clone.SDKConfig.Access = config.AccessConfig{}
|
|
return &clone
|
|
}
|
|
|
|
// SaveConfigPreserveCommentsUpdateNestedScalar updates a nested scalar key path like ["a","b"]
|
|
// while preserving comments and positions.
|
|
func SaveConfigPreserveCommentsUpdateNestedScalar(configFile string, path []string, value string) error {
|
|
data, err := os.ReadFile(configFile)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
var root yaml.Node
|
|
if err = yaml.Unmarshal(data, &root); err != nil {
|
|
return err
|
|
}
|
|
if root.Kind != yaml.DocumentNode || len(root.Content) == 0 {
|
|
return fmt.Errorf("invalid yaml document structure")
|
|
}
|
|
node := root.Content[0]
|
|
// descend mapping nodes following path
|
|
for i, key := range path {
|
|
if i == len(path)-1 {
|
|
// set final scalar
|
|
v := getOrCreateMapValue(node, key)
|
|
v.Kind = yaml.ScalarNode
|
|
v.Tag = "!!str"
|
|
v.Value = value
|
|
} else {
|
|
next := getOrCreateMapValue(node, key)
|
|
if next.Kind != yaml.MappingNode {
|
|
next.Kind = yaml.MappingNode
|
|
next.Tag = "!!map"
|
|
}
|
|
node = next
|
|
}
|
|
}
|
|
f, err := os.Create(configFile)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() { _ = f.Close() }()
|
|
var buf bytes.Buffer
|
|
enc := yaml.NewEncoder(&buf)
|
|
enc.SetIndent(2)
|
|
if err = enc.Encode(&root); err != nil {
|
|
_ = enc.Close()
|
|
return err
|
|
}
|
|
if err = enc.Close(); err != nil {
|
|
return err
|
|
}
|
|
data = NormalizeCommentIndentation(buf.Bytes())
|
|
_, err = f.Write(data)
|
|
return err
|
|
}
|
|
|
|
// NormalizeCommentIndentation removes indentation from standalone YAML comment lines to keep them left aligned.
|
|
func NormalizeCommentIndentation(data []byte) []byte {
|
|
lines := bytes.Split(data, []byte("\n"))
|
|
changed := false
|
|
for i, line := range lines {
|
|
trimmed := bytes.TrimLeft(line, " \t")
|
|
if len(trimmed) == 0 || trimmed[0] != '#' {
|
|
continue
|
|
}
|
|
if len(trimmed) == len(line) {
|
|
continue
|
|
}
|
|
lines[i] = append([]byte(nil), trimmed...)
|
|
changed = true
|
|
}
|
|
if !changed {
|
|
return data
|
|
}
|
|
return bytes.Join(lines, []byte("\n"))
|
|
}
|
|
|
|
// getOrCreateMapValue finds the value node for a given key in a mapping node.
|
|
// If not found, it appends a new key/value pair and returns the new value node.
|
|
func getOrCreateMapValue(mapNode *yaml.Node, key string) *yaml.Node {
|
|
if mapNode.Kind != yaml.MappingNode {
|
|
mapNode.Kind = yaml.MappingNode
|
|
mapNode.Tag = "!!map"
|
|
mapNode.Content = nil
|
|
}
|
|
for i := 0; i+1 < len(mapNode.Content); i += 2 {
|
|
k := mapNode.Content[i]
|
|
if k.Value == key {
|
|
return mapNode.Content[i+1]
|
|
}
|
|
}
|
|
// append new key/value
|
|
mapNode.Content = append(mapNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Tag: "!!str", Value: key})
|
|
val := &yaml.Node{Kind: yaml.ScalarNode, Tag: "!!str", Value: ""}
|
|
mapNode.Content = append(mapNode.Content, val)
|
|
return val
|
|
}
|
|
|
|
// mergeMappingPreserve merges keys from src into dst mapping node while preserving
|
|
// key order and comments of existing keys in dst. Unknown keys from src are appended
|
|
// to dst at the end, copying their node structure from src.
|
|
func mergeMappingPreserve(dst, src *yaml.Node) {
|
|
if dst == nil || src == nil {
|
|
return
|
|
}
|
|
if dst.Kind != yaml.MappingNode || src.Kind != yaml.MappingNode {
|
|
// If kinds do not match, prefer replacing dst with src semantics in-place
|
|
// but keep dst node object to preserve any attached comments at the parent level.
|
|
copyNodeShallow(dst, src)
|
|
return
|
|
}
|
|
// Build a lookup of existing keys in dst
|
|
for i := 0; i+1 < len(src.Content); i += 2 {
|
|
sk := src.Content[i]
|
|
sv := src.Content[i+1]
|
|
idx := findMapKeyIndex(dst, sk.Value)
|
|
if idx >= 0 {
|
|
// Merge into existing value node
|
|
dv := dst.Content[idx+1]
|
|
mergeNodePreserve(dv, sv)
|
|
} else {
|
|
if shouldSkipEmptyCollectionOnPersist(sk.Value, sv) {
|
|
continue
|
|
}
|
|
// Append new key/value pair by deep-copying from src
|
|
dst.Content = append(dst.Content, deepCopyNode(sk), deepCopyNode(sv))
|
|
}
|
|
}
|
|
}
|
|
|
|
// mergeNodePreserve merges src into dst for scalars, mappings and sequences while
|
|
// reusing destination nodes to keep comments and anchors. For sequences, it updates
|
|
// in-place by index.
|
|
func mergeNodePreserve(dst, src *yaml.Node) {
|
|
if dst == nil || src == nil {
|
|
return
|
|
}
|
|
switch src.Kind {
|
|
case yaml.MappingNode:
|
|
if dst.Kind != yaml.MappingNode {
|
|
copyNodeShallow(dst, src)
|
|
}
|
|
mergeMappingPreserve(dst, src)
|
|
case yaml.SequenceNode:
|
|
// Preserve explicit null style if dst was null and src is empty sequence
|
|
if dst.Kind == yaml.ScalarNode && dst.Tag == "!!null" && len(src.Content) == 0 {
|
|
// Keep as null to preserve original style
|
|
return
|
|
}
|
|
if dst.Kind != yaml.SequenceNode {
|
|
dst.Kind = yaml.SequenceNode
|
|
dst.Tag = "!!seq"
|
|
dst.Content = nil
|
|
}
|
|
reorderSequenceForMerge(dst, src)
|
|
// Update elements in place
|
|
minContent := len(dst.Content)
|
|
if len(src.Content) < minContent {
|
|
minContent = len(src.Content)
|
|
}
|
|
for i := 0; i < minContent; i++ {
|
|
if dst.Content[i] == nil {
|
|
dst.Content[i] = deepCopyNode(src.Content[i])
|
|
continue
|
|
}
|
|
mergeNodePreserve(dst.Content[i], src.Content[i])
|
|
if dst.Content[i] != nil && src.Content[i] != nil &&
|
|
dst.Content[i].Kind == yaml.MappingNode && src.Content[i].Kind == yaml.MappingNode {
|
|
pruneMissingMapKeys(dst.Content[i], src.Content[i])
|
|
}
|
|
}
|
|
// Append any extra items from src
|
|
for i := len(dst.Content); i < len(src.Content); i++ {
|
|
dst.Content = append(dst.Content, deepCopyNode(src.Content[i]))
|
|
}
|
|
// Truncate if dst has extra items not in src
|
|
if len(src.Content) < len(dst.Content) {
|
|
dst.Content = dst.Content[:len(src.Content)]
|
|
}
|
|
case yaml.ScalarNode, yaml.AliasNode:
|
|
// For scalars, update Tag and Value but keep Style from dst to preserve quoting
|
|
dst.Kind = src.Kind
|
|
dst.Tag = src.Tag
|
|
dst.Value = src.Value
|
|
// Keep dst.Style as-is intentionally
|
|
case 0:
|
|
// Unknown/empty kind; do nothing
|
|
default:
|
|
// Fallback: replace shallowly
|
|
copyNodeShallow(dst, src)
|
|
}
|
|
}
|
|
|
|
// findMapKeyIndex returns the index of key node in dst mapping (index of key, not value).
|
|
// Returns -1 when not found.
|
|
func findMapKeyIndex(mapNode *yaml.Node, key string) int {
|
|
if mapNode == nil || mapNode.Kind != yaml.MappingNode {
|
|
return -1
|
|
}
|
|
for i := 0; i+1 < len(mapNode.Content); i += 2 {
|
|
if mapNode.Content[i] != nil && mapNode.Content[i].Value == key {
|
|
return i
|
|
}
|
|
}
|
|
return -1
|
|
}
|
|
|
|
func shouldSkipEmptyCollectionOnPersist(key string, node *yaml.Node) bool {
|
|
switch key {
|
|
case "generative-language-api-key",
|
|
"gemini-api-key",
|
|
"vertex-api-key",
|
|
"claude-api-key",
|
|
"codex-api-key",
|
|
"openai-compatibility":
|
|
return isEmptyCollectionNode(node)
|
|
default:
|
|
return false
|
|
}
|
|
}
|
|
|
|
func isEmptyCollectionNode(node *yaml.Node) bool {
|
|
if node == nil {
|
|
return true
|
|
}
|
|
switch node.Kind {
|
|
case yaml.SequenceNode:
|
|
return len(node.Content) == 0
|
|
case yaml.ScalarNode:
|
|
return node.Tag == "!!null"
|
|
default:
|
|
return false
|
|
}
|
|
}
|
|
|
|
// deepCopyNode creates a deep copy of a yaml.Node graph.
|
|
func deepCopyNode(n *yaml.Node) *yaml.Node {
|
|
if n == nil {
|
|
return nil
|
|
}
|
|
cp := *n
|
|
if len(n.Content) > 0 {
|
|
cp.Content = make([]*yaml.Node, len(n.Content))
|
|
for i := range n.Content {
|
|
cp.Content[i] = deepCopyNode(n.Content[i])
|
|
}
|
|
}
|
|
return &cp
|
|
}
|
|
|
|
// copyNodeShallow copies type/tag/value and resets content to match src, but
|
|
// keeps the same destination node pointer to preserve parent relations/comments.
|
|
func copyNodeShallow(dst, src *yaml.Node) {
|
|
if dst == nil || src == nil {
|
|
return
|
|
}
|
|
dst.Kind = src.Kind
|
|
dst.Tag = src.Tag
|
|
dst.Value = src.Value
|
|
// Replace content with deep copy from src
|
|
if len(src.Content) > 0 {
|
|
dst.Content = make([]*yaml.Node, len(src.Content))
|
|
for i := range src.Content {
|
|
dst.Content[i] = deepCopyNode(src.Content[i])
|
|
}
|
|
} else {
|
|
dst.Content = nil
|
|
}
|
|
}
|
|
|
|
func reorderSequenceForMerge(dst, src *yaml.Node) {
|
|
if dst == nil || src == nil {
|
|
return
|
|
}
|
|
if len(dst.Content) == 0 {
|
|
return
|
|
}
|
|
if len(src.Content) == 0 {
|
|
return
|
|
}
|
|
original := append([]*yaml.Node(nil), dst.Content...)
|
|
used := make([]bool, len(original))
|
|
ordered := make([]*yaml.Node, len(src.Content))
|
|
for i := range src.Content {
|
|
if idx := matchSequenceElement(original, used, src.Content[i]); idx >= 0 {
|
|
ordered[i] = original[idx]
|
|
used[idx] = true
|
|
}
|
|
}
|
|
dst.Content = ordered
|
|
}
|
|
|
|
func matchSequenceElement(original []*yaml.Node, used []bool, target *yaml.Node) int {
|
|
if target == nil {
|
|
return -1
|
|
}
|
|
switch target.Kind {
|
|
case yaml.MappingNode:
|
|
id := sequenceElementIdentity(target)
|
|
if id != "" {
|
|
for i := range original {
|
|
if used[i] || original[i] == nil || original[i].Kind != yaml.MappingNode {
|
|
continue
|
|
}
|
|
if sequenceElementIdentity(original[i]) == id {
|
|
return i
|
|
}
|
|
}
|
|
}
|
|
case yaml.ScalarNode:
|
|
val := strings.TrimSpace(target.Value)
|
|
if val != "" {
|
|
for i := range original {
|
|
if used[i] || original[i] == nil || original[i].Kind != yaml.ScalarNode {
|
|
continue
|
|
}
|
|
if strings.TrimSpace(original[i].Value) == val {
|
|
return i
|
|
}
|
|
}
|
|
}
|
|
default:
|
|
}
|
|
// Fallback to structural equality to preserve nodes lacking explicit identifiers.
|
|
for i := range original {
|
|
if used[i] || original[i] == nil {
|
|
continue
|
|
}
|
|
if nodesStructurallyEqual(original[i], target) {
|
|
return i
|
|
}
|
|
}
|
|
return -1
|
|
}
|
|
|
|
func sequenceElementIdentity(node *yaml.Node) string {
|
|
if node == nil || node.Kind != yaml.MappingNode {
|
|
return ""
|
|
}
|
|
identityKeys := []string{"id", "name", "alias", "api-key", "api_key", "apikey", "key", "provider", "model"}
|
|
for _, k := range identityKeys {
|
|
if v := mappingScalarValue(node, k); v != "" {
|
|
return k + "=" + v
|
|
}
|
|
}
|
|
for i := 0; i+1 < len(node.Content); i += 2 {
|
|
keyNode := node.Content[i]
|
|
valNode := node.Content[i+1]
|
|
if keyNode == nil || valNode == nil || valNode.Kind != yaml.ScalarNode {
|
|
continue
|
|
}
|
|
val := strings.TrimSpace(valNode.Value)
|
|
if val != "" {
|
|
return strings.ToLower(strings.TrimSpace(keyNode.Value)) + "=" + val
|
|
}
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func mappingScalarValue(node *yaml.Node, key string) string {
|
|
if node == nil || node.Kind != yaml.MappingNode {
|
|
return ""
|
|
}
|
|
lowerKey := strings.ToLower(key)
|
|
for i := 0; i+1 < len(node.Content); i += 2 {
|
|
keyNode := node.Content[i]
|
|
valNode := node.Content[i+1]
|
|
if keyNode == nil || valNode == nil || valNode.Kind != yaml.ScalarNode {
|
|
continue
|
|
}
|
|
if strings.ToLower(strings.TrimSpace(keyNode.Value)) == lowerKey {
|
|
return strings.TrimSpace(valNode.Value)
|
|
}
|
|
}
|
|
return ""
|
|
}
|
|
|
|
func nodesStructurallyEqual(a, b *yaml.Node) bool {
|
|
if a == nil || b == nil {
|
|
return a == b
|
|
}
|
|
if a.Kind != b.Kind {
|
|
return false
|
|
}
|
|
switch a.Kind {
|
|
case yaml.MappingNode:
|
|
if len(a.Content) != len(b.Content) {
|
|
return false
|
|
}
|
|
for i := 0; i+1 < len(a.Content); i += 2 {
|
|
if !nodesStructurallyEqual(a.Content[i], b.Content[i]) {
|
|
return false
|
|
}
|
|
if !nodesStructurallyEqual(a.Content[i+1], b.Content[i+1]) {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
case yaml.SequenceNode:
|
|
if len(a.Content) != len(b.Content) {
|
|
return false
|
|
}
|
|
for i := range a.Content {
|
|
if !nodesStructurallyEqual(a.Content[i], b.Content[i]) {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
case yaml.ScalarNode:
|
|
return strings.TrimSpace(a.Value) == strings.TrimSpace(b.Value)
|
|
case yaml.AliasNode:
|
|
return nodesStructurallyEqual(a.Alias, b.Alias)
|
|
default:
|
|
return strings.TrimSpace(a.Value) == strings.TrimSpace(b.Value)
|
|
}
|
|
}
|
|
|
|
func removeMapKey(mapNode *yaml.Node, key string) {
|
|
if mapNode == nil || mapNode.Kind != yaml.MappingNode || key == "" {
|
|
return
|
|
}
|
|
for i := 0; i+1 < len(mapNode.Content); i += 2 {
|
|
if mapNode.Content[i] != nil && mapNode.Content[i].Value == key {
|
|
mapNode.Content = append(mapNode.Content[:i], mapNode.Content[i+2:]...)
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
func pruneMappingToGeneratedKeys(dstRoot, srcRoot *yaml.Node, key string) {
|
|
if key == "" || dstRoot == nil || srcRoot == nil {
|
|
return
|
|
}
|
|
if dstRoot.Kind != yaml.MappingNode || srcRoot.Kind != yaml.MappingNode {
|
|
return
|
|
}
|
|
dstIdx := findMapKeyIndex(dstRoot, key)
|
|
if dstIdx < 0 || dstIdx+1 >= len(dstRoot.Content) {
|
|
return
|
|
}
|
|
srcIdx := findMapKeyIndex(srcRoot, key)
|
|
if srcIdx < 0 {
|
|
removeMapKey(dstRoot, key)
|
|
return
|
|
}
|
|
if srcIdx+1 >= len(srcRoot.Content) {
|
|
return
|
|
}
|
|
srcVal := srcRoot.Content[srcIdx+1]
|
|
dstVal := dstRoot.Content[dstIdx+1]
|
|
if srcVal == nil {
|
|
dstRoot.Content[dstIdx+1] = nil
|
|
return
|
|
}
|
|
if srcVal.Kind != yaml.MappingNode {
|
|
dstRoot.Content[dstIdx+1] = deepCopyNode(srcVal)
|
|
return
|
|
}
|
|
if dstVal == nil || dstVal.Kind != yaml.MappingNode {
|
|
dstRoot.Content[dstIdx+1] = deepCopyNode(srcVal)
|
|
return
|
|
}
|
|
pruneMissingMapKeys(dstVal, srcVal)
|
|
}
|
|
|
|
func pruneMissingMapKeys(dstMap, srcMap *yaml.Node) {
|
|
if dstMap == nil || srcMap == nil || dstMap.Kind != yaml.MappingNode || srcMap.Kind != yaml.MappingNode {
|
|
return
|
|
}
|
|
keep := make(map[string]struct{}, len(srcMap.Content)/2)
|
|
for i := 0; i+1 < len(srcMap.Content); i += 2 {
|
|
keyNode := srcMap.Content[i]
|
|
if keyNode == nil {
|
|
continue
|
|
}
|
|
key := strings.TrimSpace(keyNode.Value)
|
|
if key == "" {
|
|
continue
|
|
}
|
|
keep[key] = struct{}{}
|
|
}
|
|
for i := 0; i+1 < len(dstMap.Content); {
|
|
keyNode := dstMap.Content[i]
|
|
if keyNode == nil {
|
|
i += 2
|
|
continue
|
|
}
|
|
key := strings.TrimSpace(keyNode.Value)
|
|
if _, ok := keep[key]; !ok {
|
|
dstMap.Content = append(dstMap.Content[:i], dstMap.Content[i+2:]...)
|
|
continue
|
|
}
|
|
i += 2
|
|
}
|
|
}
|
|
|
|
// normalizeCollectionNodeStyles forces YAML collections to use block notation, keeping
|
|
// lists and maps readable. Empty sequences retain flow style ([]) so empty list markers
|
|
// remain compact.
|
|
func normalizeCollectionNodeStyles(node *yaml.Node) {
|
|
if node == nil {
|
|
return
|
|
}
|
|
switch node.Kind {
|
|
case yaml.MappingNode:
|
|
node.Style = 0
|
|
for i := range node.Content {
|
|
normalizeCollectionNodeStyles(node.Content[i])
|
|
}
|
|
case yaml.SequenceNode:
|
|
if len(node.Content) == 0 {
|
|
node.Style = yaml.FlowStyle
|
|
} else {
|
|
node.Style = 0
|
|
}
|
|
for i := range node.Content {
|
|
normalizeCollectionNodeStyles(node.Content[i])
|
|
}
|
|
default:
|
|
// Scalars keep their existing style to preserve quoting
|
|
}
|
|
}
|
|
|
|
// Legacy migration helpers (move deprecated config keys into structured fields).
|
|
type legacyConfigData struct {
|
|
LegacyGeminiKeys []string `yaml:"generative-language-api-key"`
|
|
OpenAICompat []legacyOpenAICompatibility `yaml:"openai-compatibility"`
|
|
AmpUpstreamURL string `yaml:"amp-upstream-url"`
|
|
AmpUpstreamAPIKey string `yaml:"amp-upstream-api-key"`
|
|
AmpRestrictManagement *bool `yaml:"amp-restrict-management-to-localhost"`
|
|
AmpModelMappings []AmpModelMapping `yaml:"amp-model-mappings"`
|
|
}
|
|
|
|
type legacyOpenAICompatibility struct {
|
|
Name string `yaml:"name"`
|
|
BaseURL string `yaml:"base-url"`
|
|
APIKeys []string `yaml:"api-keys"`
|
|
}
|
|
|
|
func (cfg *Config) migrateLegacyGeminiKeys(legacy []string) bool {
|
|
if cfg == nil || len(legacy) == 0 {
|
|
return false
|
|
}
|
|
changed := false
|
|
seen := make(map[string]struct{}, len(cfg.GeminiKey))
|
|
for i := range cfg.GeminiKey {
|
|
key := strings.TrimSpace(cfg.GeminiKey[i].APIKey)
|
|
if key == "" {
|
|
continue
|
|
}
|
|
seen[key] = struct{}{}
|
|
}
|
|
for _, raw := range legacy {
|
|
key := strings.TrimSpace(raw)
|
|
if key == "" {
|
|
continue
|
|
}
|
|
if _, exists := seen[key]; exists {
|
|
continue
|
|
}
|
|
cfg.GeminiKey = append(cfg.GeminiKey, GeminiKey{APIKey: key})
|
|
seen[key] = struct{}{}
|
|
changed = true
|
|
}
|
|
return changed
|
|
}
|
|
|
|
func (cfg *Config) migrateLegacyOpenAICompatibilityKeys(legacy []legacyOpenAICompatibility) bool {
|
|
if cfg == nil || len(cfg.OpenAICompatibility) == 0 || len(legacy) == 0 {
|
|
return false
|
|
}
|
|
changed := false
|
|
for _, legacyEntry := range legacy {
|
|
if len(legacyEntry.APIKeys) == 0 {
|
|
continue
|
|
}
|
|
target := findOpenAICompatTarget(cfg.OpenAICompatibility, legacyEntry.Name, legacyEntry.BaseURL)
|
|
if target == nil {
|
|
continue
|
|
}
|
|
if mergeLegacyOpenAICompatAPIKeys(target, legacyEntry.APIKeys) {
|
|
changed = true
|
|
}
|
|
}
|
|
return changed
|
|
}
|
|
|
|
func mergeLegacyOpenAICompatAPIKeys(entry *OpenAICompatibility, keys []string) bool {
|
|
if entry == nil || len(keys) == 0 {
|
|
return false
|
|
}
|
|
changed := false
|
|
existing := make(map[string]struct{}, len(entry.APIKeyEntries))
|
|
for i := range entry.APIKeyEntries {
|
|
key := strings.TrimSpace(entry.APIKeyEntries[i].APIKey)
|
|
if key == "" {
|
|
continue
|
|
}
|
|
existing[key] = struct{}{}
|
|
}
|
|
for _, raw := range keys {
|
|
key := strings.TrimSpace(raw)
|
|
if key == "" {
|
|
continue
|
|
}
|
|
if _, ok := existing[key]; ok {
|
|
continue
|
|
}
|
|
entry.APIKeyEntries = append(entry.APIKeyEntries, OpenAICompatibilityAPIKey{APIKey: key})
|
|
existing[key] = struct{}{}
|
|
changed = true
|
|
}
|
|
return changed
|
|
}
|
|
|
|
func findOpenAICompatTarget(entries []OpenAICompatibility, legacyName, legacyBase string) *OpenAICompatibility {
|
|
nameKey := strings.ToLower(strings.TrimSpace(legacyName))
|
|
baseKey := strings.ToLower(strings.TrimSpace(legacyBase))
|
|
if nameKey != "" && baseKey != "" {
|
|
for i := range entries {
|
|
if strings.ToLower(strings.TrimSpace(entries[i].Name)) == nameKey &&
|
|
strings.ToLower(strings.TrimSpace(entries[i].BaseURL)) == baseKey {
|
|
return &entries[i]
|
|
}
|
|
}
|
|
}
|
|
if baseKey != "" {
|
|
for i := range entries {
|
|
if strings.ToLower(strings.TrimSpace(entries[i].BaseURL)) == baseKey {
|
|
return &entries[i]
|
|
}
|
|
}
|
|
}
|
|
if nameKey != "" {
|
|
for i := range entries {
|
|
if strings.ToLower(strings.TrimSpace(entries[i].Name)) == nameKey {
|
|
return &entries[i]
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (cfg *Config) migrateLegacyAmpConfig(legacy *legacyConfigData) bool {
|
|
if cfg == nil || legacy == nil {
|
|
return false
|
|
}
|
|
changed := false
|
|
if cfg.AmpCode.UpstreamURL == "" {
|
|
if val := strings.TrimSpace(legacy.AmpUpstreamURL); val != "" {
|
|
cfg.AmpCode.UpstreamURL = val
|
|
changed = true
|
|
}
|
|
}
|
|
if cfg.AmpCode.UpstreamAPIKey == "" {
|
|
if val := strings.TrimSpace(legacy.AmpUpstreamAPIKey); val != "" {
|
|
cfg.AmpCode.UpstreamAPIKey = val
|
|
changed = true
|
|
}
|
|
}
|
|
if legacy.AmpRestrictManagement != nil {
|
|
cfg.AmpCode.RestrictManagementToLocalhost = *legacy.AmpRestrictManagement
|
|
changed = true
|
|
}
|
|
if len(cfg.AmpCode.ModelMappings) == 0 && len(legacy.AmpModelMappings) > 0 {
|
|
cfg.AmpCode.ModelMappings = append([]AmpModelMapping(nil), legacy.AmpModelMappings...)
|
|
changed = true
|
|
}
|
|
return changed
|
|
}
|
|
|
|
func removeLegacyOpenAICompatAPIKeys(root *yaml.Node) {
|
|
if root == nil || root.Kind != yaml.MappingNode {
|
|
return
|
|
}
|
|
idx := findMapKeyIndex(root, "openai-compatibility")
|
|
if idx < 0 || idx+1 >= len(root.Content) {
|
|
return
|
|
}
|
|
seq := root.Content[idx+1]
|
|
if seq == nil || seq.Kind != yaml.SequenceNode {
|
|
return
|
|
}
|
|
for i := range seq.Content {
|
|
if seq.Content[i] != nil && seq.Content[i].Kind == yaml.MappingNode {
|
|
removeMapKey(seq.Content[i], "api-keys")
|
|
}
|
|
}
|
|
}
|
|
|
|
func removeLegacyAmpKeys(root *yaml.Node) {
|
|
if root == nil || root.Kind != yaml.MappingNode {
|
|
return
|
|
}
|
|
removeMapKey(root, "amp-upstream-url")
|
|
removeMapKey(root, "amp-upstream-api-key")
|
|
removeMapKey(root, "amp-restrict-management-to-localhost")
|
|
removeMapKey(root, "amp-model-mappings")
|
|
}
|
|
|
|
func removeLegacyGenerativeLanguageKeys(root *yaml.Node) {
|
|
if root == nil || root.Kind != yaml.MappingNode {
|
|
return
|
|
}
|
|
removeMapKey(root, "generative-language-api-key")
|
|
}
|
|
|
|
func removeLegacyAuthBlock(root *yaml.Node) {
|
|
if root == nil || root.Kind != yaml.MappingNode {
|
|
return
|
|
}
|
|
removeMapKey(root, "auth")
|
|
}
|