mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-03 04:50:52 +08:00
feat(api): add management model definitions endpoint
This commit is contained in:
33
internal/api/handlers/management/model_definitions.go
Normal file
33
internal/api/handlers/management/model_definitions.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package management
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetStaticModelDefinitions returns static model metadata for a given channel.
|
||||||
|
// Channel is provided via path param (:channel) or query param (?channel=...).
|
||||||
|
func (h *Handler) GetStaticModelDefinitions(c *gin.Context) {
|
||||||
|
channel := strings.TrimSpace(c.Param("channel"))
|
||||||
|
if channel == "" {
|
||||||
|
channel = strings.TrimSpace(c.Query("channel"))
|
||||||
|
}
|
||||||
|
if channel == "" {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "channel is required"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
models := registry.GetStaticModelDefinitionsByChannel(channel)
|
||||||
|
if models == nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "unknown channel", "channel": channel})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"channel": strings.ToLower(strings.TrimSpace(channel)),
|
||||||
|
"models": models,
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -607,6 +607,7 @@ func (s *Server) registerManagementRoutes() {
|
|||||||
|
|
||||||
mgmt.GET("/auth-files", s.mgmt.ListAuthFiles)
|
mgmt.GET("/auth-files", s.mgmt.ListAuthFiles)
|
||||||
mgmt.GET("/auth-files/models", s.mgmt.GetAuthFileModels)
|
mgmt.GET("/auth-files/models", s.mgmt.GetAuthFileModels)
|
||||||
|
mgmt.GET("/model-definitions/:channel", s.mgmt.GetStaticModelDefinitions)
|
||||||
mgmt.GET("/auth-files/download", s.mgmt.DownloadAuthFile)
|
mgmt.GET("/auth-files/download", s.mgmt.DownloadAuthFile)
|
||||||
mgmt.POST("/auth-files", s.mgmt.UploadAuthFile)
|
mgmt.POST("/auth-files", s.mgmt.UploadAuthFile)
|
||||||
mgmt.DELETE("/auth-files", s.mgmt.DeleteAuthFile)
|
mgmt.DELETE("/auth-files", s.mgmt.DeleteAuthFile)
|
||||||
|
|||||||
@@ -1,848 +1,69 @@
|
|||||||
// Package registry provides model definitions for various AI service providers.
|
// Package registry provides model definitions and lookup helpers for various AI providers.
|
||||||
// This file contains static model definitions that can be used by clients
|
// Static model metadata is stored in model_definitions_static_data.go.
|
||||||
// when registering their supported models.
|
|
||||||
package registry
|
package registry
|
||||||
|
|
||||||
// GetClaudeModels returns the standard Claude model definitions
|
import (
|
||||||
func GetClaudeModels() []*ModelInfo {
|
"sort"
|
||||||
return []*ModelInfo{
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
{
|
// GetStaticModelDefinitionsByChannel returns static model definitions for a given channel/provider.
|
||||||
ID: "claude-haiku-4-5-20251001",
|
// It returns nil when the channel is unknown.
|
||||||
Object: "model",
|
//
|
||||||
Created: 1759276800, // 2025-10-01
|
// Supported channels:
|
||||||
OwnedBy: "anthropic",
|
// - claude
|
||||||
Type: "claude",
|
// - gemini
|
||||||
DisplayName: "Claude 4.5 Haiku",
|
// - vertex
|
||||||
ContextLength: 200000,
|
// - gemini-cli
|
||||||
MaxCompletionTokens: 64000,
|
// - aistudio
|
||||||
// Thinking: not supported for Haiku models
|
// - codex
|
||||||
},
|
// - qwen
|
||||||
{
|
// - iflow
|
||||||
ID: "claude-sonnet-4-5-20250929",
|
// - antigravity (returns static overrides only)
|
||||||
Object: "model",
|
func GetStaticModelDefinitionsByChannel(channel string) []*ModelInfo {
|
||||||
Created: 1759104000, // 2025-09-29
|
key := strings.ToLower(strings.TrimSpace(channel))
|
||||||
OwnedBy: "anthropic",
|
switch key {
|
||||||
Type: "claude",
|
case "claude":
|
||||||
DisplayName: "Claude 4.5 Sonnet",
|
return GetClaudeModels()
|
||||||
ContextLength: 200000,
|
case "gemini":
|
||||||
MaxCompletionTokens: 64000,
|
return GetGeminiModels()
|
||||||
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: true, DynamicAllowed: false},
|
case "vertex":
|
||||||
},
|
return GetGeminiVertexModels()
|
||||||
{
|
case "gemini-cli":
|
||||||
ID: "claude-opus-4-5-20251101",
|
return GetGeminiCLIModels()
|
||||||
Object: "model",
|
case "aistudio":
|
||||||
Created: 1761955200, // 2025-11-01
|
return GetAIStudioModels()
|
||||||
OwnedBy: "anthropic",
|
case "codex":
|
||||||
Type: "claude",
|
return GetOpenAIModels()
|
||||||
DisplayName: "Claude 4.5 Opus",
|
case "qwen":
|
||||||
Description: "Premium model combining maximum intelligence with practical performance",
|
return GetQwenModels()
|
||||||
ContextLength: 200000,
|
case "iflow":
|
||||||
MaxCompletionTokens: 64000,
|
return GetIFlowModels()
|
||||||
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: true, DynamicAllowed: false},
|
case "antigravity":
|
||||||
},
|
cfg := GetAntigravityModelConfig()
|
||||||
{
|
if len(cfg) == 0 {
|
||||||
ID: "claude-opus-4-1-20250805",
|
return nil
|
||||||
Object: "model",
|
|
||||||
Created: 1722945600, // 2025-08-05
|
|
||||||
OwnedBy: "anthropic",
|
|
||||||
Type: "claude",
|
|
||||||
DisplayName: "Claude 4.1 Opus",
|
|
||||||
ContextLength: 200000,
|
|
||||||
MaxCompletionTokens: 32000,
|
|
||||||
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: false, DynamicAllowed: false},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "claude-opus-4-20250514",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1715644800, // 2025-05-14
|
|
||||||
OwnedBy: "anthropic",
|
|
||||||
Type: "claude",
|
|
||||||
DisplayName: "Claude 4 Opus",
|
|
||||||
ContextLength: 200000,
|
|
||||||
MaxCompletionTokens: 32000,
|
|
||||||
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: false, DynamicAllowed: false},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "claude-sonnet-4-20250514",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1715644800, // 2025-05-14
|
|
||||||
OwnedBy: "anthropic",
|
|
||||||
Type: "claude",
|
|
||||||
DisplayName: "Claude 4 Sonnet",
|
|
||||||
ContextLength: 200000,
|
|
||||||
MaxCompletionTokens: 64000,
|
|
||||||
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: false, DynamicAllowed: false},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "claude-3-7-sonnet-20250219",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1708300800, // 2025-02-19
|
|
||||||
OwnedBy: "anthropic",
|
|
||||||
Type: "claude",
|
|
||||||
DisplayName: "Claude 3.7 Sonnet",
|
|
||||||
ContextLength: 128000,
|
|
||||||
MaxCompletionTokens: 8192,
|
|
||||||
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: false, DynamicAllowed: false},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "claude-3-5-haiku-20241022",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1729555200, // 2024-10-22
|
|
||||||
OwnedBy: "anthropic",
|
|
||||||
Type: "claude",
|
|
||||||
DisplayName: "Claude 3.5 Haiku",
|
|
||||||
ContextLength: 128000,
|
|
||||||
MaxCompletionTokens: 8192,
|
|
||||||
// Thinking: not supported for Haiku models
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
models := make([]*ModelInfo, 0, len(cfg))
|
||||||
|
for modelID, entry := range cfg {
|
||||||
// GetGeminiModels returns the standard Gemini model definitions
|
if modelID == "" || entry == nil {
|
||||||
func GetGeminiModels() []*ModelInfo {
|
continue
|
||||||
return []*ModelInfo{
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-pro",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-pro",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Pro",
|
|
||||||
Description: "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash",
|
|
||||||
Version: "001",
|
|
||||||
DisplayName: "Gemini 2.5 Flash",
|
|
||||||
Description: "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash-lite",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1753142400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash-lite",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Flash Lite",
|
|
||||||
Description: "Our smallest and most cost effective model, built for at scale usage.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-pro-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1737158400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-pro-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Pro Preview",
|
|
||||||
Description: "Gemini 3 Pro Preview",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-flash-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1765929600,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-flash-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Flash Preview",
|
|
||||||
Description: "Gemini 3 Flash Preview",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"minimal", "low", "medium", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-pro-image-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1737158400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-pro-image-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Pro Image Preview",
|
|
||||||
Description: "Gemini 3 Pro Image Preview",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func GetGeminiVertexModels() []*ModelInfo {
|
|
||||||
return []*ModelInfo{
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-pro",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-pro",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Pro",
|
|
||||||
Description: "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash",
|
|
||||||
Version: "001",
|
|
||||||
DisplayName: "Gemini 2.5 Flash",
|
|
||||||
Description: "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash-lite",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1753142400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash-lite",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Flash Lite",
|
|
||||||
Description: "Our smallest and most cost effective model, built for at scale usage.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-pro-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1737158400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-pro-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Pro Preview",
|
|
||||||
Description: "Gemini 3 Pro Preview",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-flash-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1765929600,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-flash-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Flash Preview",
|
|
||||||
Description: "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"minimal", "low", "medium", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-pro-image-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1737158400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-pro-image-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Pro Image Preview",
|
|
||||||
Description: "Gemini 3 Pro Image Preview",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
|
||||||
},
|
|
||||||
// Imagen image generation models - use :predict action
|
|
||||||
{
|
|
||||||
ID: "imagen-4.0-generate-001",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750000000,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/imagen-4.0-generate-001",
|
|
||||||
Version: "4.0",
|
|
||||||
DisplayName: "Imagen 4.0 Generate",
|
|
||||||
Description: "Imagen 4.0 image generation model",
|
|
||||||
SupportedGenerationMethods: []string{"predict"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "imagen-4.0-ultra-generate-001",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750000000,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/imagen-4.0-ultra-generate-001",
|
|
||||||
Version: "4.0",
|
|
||||||
DisplayName: "Imagen 4.0 Ultra Generate",
|
|
||||||
Description: "Imagen 4.0 Ultra high-quality image generation model",
|
|
||||||
SupportedGenerationMethods: []string{"predict"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "imagen-3.0-generate-002",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1740000000,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/imagen-3.0-generate-002",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Imagen 3.0 Generate",
|
|
||||||
Description: "Imagen 3.0 image generation model",
|
|
||||||
SupportedGenerationMethods: []string{"predict"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "imagen-3.0-fast-generate-001",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1740000000,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/imagen-3.0-fast-generate-001",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Imagen 3.0 Fast Generate",
|
|
||||||
Description: "Imagen 3.0 fast image generation model",
|
|
||||||
SupportedGenerationMethods: []string{"predict"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "imagen-4.0-fast-generate-001",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750000000,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/imagen-4.0-fast-generate-001",
|
|
||||||
Version: "4.0",
|
|
||||||
DisplayName: "Imagen 4.0 Fast Generate",
|
|
||||||
Description: "Imagen 4.0 fast image generation model",
|
|
||||||
SupportedGenerationMethods: []string{"predict"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetGeminiCLIModels returns the standard Gemini model definitions
|
|
||||||
func GetGeminiCLIModels() []*ModelInfo {
|
|
||||||
return []*ModelInfo{
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-pro",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-pro",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Pro",
|
|
||||||
Description: "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash",
|
|
||||||
Version: "001",
|
|
||||||
DisplayName: "Gemini 2.5 Flash",
|
|
||||||
Description: "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash-lite",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1753142400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash-lite",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Flash Lite",
|
|
||||||
Description: "Our smallest and most cost effective model, built for at scale usage.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-pro-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1737158400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-pro-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Pro Preview",
|
|
||||||
Description: "Our most intelligent model with SOTA reasoning and multimodal understanding, and powerful agentic and vibe coding capabilities",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-flash-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1765929600,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-flash-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Flash Preview",
|
|
||||||
Description: "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"minimal", "low", "medium", "high"}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAIStudioModels returns the Gemini model definitions for AI Studio integrations
|
|
||||||
func GetAIStudioModels() []*ModelInfo {
|
|
||||||
return []*ModelInfo{
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-pro",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-pro",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Pro",
|
|
||||||
Description: "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash",
|
|
||||||
Version: "001",
|
|
||||||
DisplayName: "Gemini 2.5 Flash",
|
|
||||||
Description: "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash-lite",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1753142400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash-lite",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Flash Lite",
|
|
||||||
Description: "Our smallest and most cost effective model, built for at scale usage.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-pro-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1737158400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-pro-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Pro Preview",
|
|
||||||
Description: "Gemini 3 Pro Preview",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-3-flash-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1765929600,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-3-flash-preview",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Gemini 3 Flash Preview",
|
|
||||||
Description: "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-pro-latest",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-pro-latest",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini Pro Latest",
|
|
||||||
Description: "Latest release of Gemini Pro",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-flash-latest",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1750118400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-flash-latest",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini Flash Latest",
|
|
||||||
Description: "Latest release of Gemini Flash",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-flash-lite-latest",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1753142400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-flash-lite-latest",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini Flash-Lite Latest",
|
|
||||||
Description: "Latest release of Gemini Flash-Lite",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 65536,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
Thinking: &ThinkingSupport{Min: 512, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash-image-preview",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1756166400,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash-image-preview",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Flash Image Preview",
|
|
||||||
Description: "State-of-the-art image generation and editing model.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 8192,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
// image models don't support thinkingConfig; leave Thinking nil
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gemini-2.5-flash-image",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1759363200,
|
|
||||||
OwnedBy: "google",
|
|
||||||
Type: "gemini",
|
|
||||||
Name: "models/gemini-2.5-flash-image",
|
|
||||||
Version: "2.5",
|
|
||||||
DisplayName: "Gemini 2.5 Flash Image",
|
|
||||||
Description: "State-of-the-art image generation and editing model.",
|
|
||||||
InputTokenLimit: 1048576,
|
|
||||||
OutputTokenLimit: 8192,
|
|
||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
|
||||||
// image models don't support thinkingConfig; leave Thinking nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOpenAIModels returns the standard OpenAI model definitions
|
|
||||||
func GetOpenAIModels() []*ModelInfo {
|
|
||||||
return []*ModelInfo{
|
|
||||||
{
|
|
||||||
ID: "gpt-5",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1754524800,
|
|
||||||
OwnedBy: "openai",
|
|
||||||
Type: "openai",
|
|
||||||
Version: "gpt-5-2025-08-07",
|
|
||||||
DisplayName: "GPT 5",
|
|
||||||
Description: "Stable version of GPT 5, The best model for coding and agentic tasks across domains.",
|
|
||||||
ContextLength: 400000,
|
|
||||||
MaxCompletionTokens: 128000,
|
|
||||||
SupportedParameters: []string{"tools"},
|
|
||||||
Thinking: &ThinkingSupport{Levels: []string{"minimal", "low", "medium", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gpt-5-codex",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1757894400,
|
|
||||||
OwnedBy: "openai",
|
|
||||||
Type: "openai",
|
|
||||||
Version: "gpt-5-2025-09-15",
|
|
||||||
DisplayName: "GPT 5 Codex",
|
|
||||||
Description: "Stable version of GPT 5 Codex, The best model for coding and agentic tasks across domains.",
|
|
||||||
ContextLength: 400000,
|
|
||||||
MaxCompletionTokens: 128000,
|
|
||||||
SupportedParameters: []string{"tools"},
|
|
||||||
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gpt-5-codex-mini",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1762473600,
|
|
||||||
OwnedBy: "openai",
|
|
||||||
Type: "openai",
|
|
||||||
Version: "gpt-5-2025-11-07",
|
|
||||||
DisplayName: "GPT 5 Codex Mini",
|
|
||||||
Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.",
|
|
||||||
ContextLength: 400000,
|
|
||||||
MaxCompletionTokens: 128000,
|
|
||||||
SupportedParameters: []string{"tools"},
|
|
||||||
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gpt-5.1",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1762905600,
|
|
||||||
OwnedBy: "openai",
|
|
||||||
Type: "openai",
|
|
||||||
Version: "gpt-5.1-2025-11-12",
|
|
||||||
DisplayName: "GPT 5",
|
|
||||||
Description: "Stable version of GPT 5, The best model for coding and agentic tasks across domains.",
|
|
||||||
ContextLength: 400000,
|
|
||||||
MaxCompletionTokens: 128000,
|
|
||||||
SupportedParameters: []string{"tools"},
|
|
||||||
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gpt-5.1-codex",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1762905600,
|
|
||||||
OwnedBy: "openai",
|
|
||||||
Type: "openai",
|
|
||||||
Version: "gpt-5.1-2025-11-12",
|
|
||||||
DisplayName: "GPT 5.1 Codex",
|
|
||||||
Description: "Stable version of GPT 5.1 Codex, The best model for coding and agentic tasks across domains.",
|
|
||||||
ContextLength: 400000,
|
|
||||||
MaxCompletionTokens: 128000,
|
|
||||||
SupportedParameters: []string{"tools"},
|
|
||||||
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gpt-5.1-codex-mini",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1762905600,
|
|
||||||
OwnedBy: "openai",
|
|
||||||
Type: "openai",
|
|
||||||
Version: "gpt-5.1-2025-11-12",
|
|
||||||
DisplayName: "GPT 5.1 Codex Mini",
|
|
||||||
Description: "Stable version of GPT 5.1 Codex Mini: cheaper, faster, but less capable version of GPT 5.1 Codex.",
|
|
||||||
ContextLength: 400000,
|
|
||||||
MaxCompletionTokens: 128000,
|
|
||||||
SupportedParameters: []string{"tools"},
|
|
||||||
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gpt-5.1-codex-max",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1763424000,
|
|
||||||
OwnedBy: "openai",
|
|
||||||
Type: "openai",
|
|
||||||
Version: "gpt-5.1-max",
|
|
||||||
DisplayName: "GPT 5.1 Codex Max",
|
|
||||||
Description: "Stable version of GPT 5.1 Codex Max",
|
|
||||||
ContextLength: 400000,
|
|
||||||
MaxCompletionTokens: 128000,
|
|
||||||
SupportedParameters: []string{"tools"},
|
|
||||||
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high", "xhigh"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gpt-5.2",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1765440000,
|
|
||||||
OwnedBy: "openai",
|
|
||||||
Type: "openai",
|
|
||||||
Version: "gpt-5.2",
|
|
||||||
DisplayName: "GPT 5.2",
|
|
||||||
Description: "Stable version of GPT 5.2",
|
|
||||||
ContextLength: 400000,
|
|
||||||
MaxCompletionTokens: 128000,
|
|
||||||
SupportedParameters: []string{"tools"},
|
|
||||||
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high", "xhigh"}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "gpt-5.2-codex",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1765440000,
|
|
||||||
OwnedBy: "openai",
|
|
||||||
Type: "openai",
|
|
||||||
Version: "gpt-5.2",
|
|
||||||
DisplayName: "GPT 5.2 Codex",
|
|
||||||
Description: "Stable version of GPT 5.2 Codex, The best model for coding and agentic tasks across domains.",
|
|
||||||
ContextLength: 400000,
|
|
||||||
MaxCompletionTokens: 128000,
|
|
||||||
SupportedParameters: []string{"tools"},
|
|
||||||
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high", "xhigh"}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetQwenModels returns the standard Qwen model definitions
|
|
||||||
func GetQwenModels() []*ModelInfo {
|
|
||||||
return []*ModelInfo{
|
|
||||||
{
|
|
||||||
ID: "qwen3-coder-plus",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1753228800,
|
|
||||||
OwnedBy: "qwen",
|
|
||||||
Type: "qwen",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Qwen3 Coder Plus",
|
|
||||||
Description: "Advanced code generation and understanding model",
|
|
||||||
ContextLength: 32768,
|
|
||||||
MaxCompletionTokens: 8192,
|
|
||||||
SupportedParameters: []string{"temperature", "top_p", "max_tokens", "stream", "stop"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "qwen3-coder-flash",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1753228800,
|
|
||||||
OwnedBy: "qwen",
|
|
||||||
Type: "qwen",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Qwen3 Coder Flash",
|
|
||||||
Description: "Fast code generation model",
|
|
||||||
ContextLength: 8192,
|
|
||||||
MaxCompletionTokens: 2048,
|
|
||||||
SupportedParameters: []string{"temperature", "top_p", "max_tokens", "stream", "stop"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ID: "vision-model",
|
|
||||||
Object: "model",
|
|
||||||
Created: 1758672000,
|
|
||||||
OwnedBy: "qwen",
|
|
||||||
Type: "qwen",
|
|
||||||
Version: "3.0",
|
|
||||||
DisplayName: "Qwen3 Vision Model",
|
|
||||||
Description: "Vision model model",
|
|
||||||
ContextLength: 32768,
|
|
||||||
MaxCompletionTokens: 2048,
|
|
||||||
SupportedParameters: []string{"temperature", "top_p", "max_tokens", "stream", "stop"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// iFlowThinkingSupport is a shared ThinkingSupport configuration for iFlow models
|
|
||||||
// that support thinking mode via chat_template_kwargs.enable_thinking (boolean toggle).
|
|
||||||
// Uses level-based configuration so standard normalization flows apply before conversion.
|
|
||||||
var iFlowThinkingSupport = &ThinkingSupport{
|
|
||||||
Levels: []string{"none", "auto", "minimal", "low", "medium", "high", "xhigh"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetIFlowModels returns supported models for iFlow OAuth accounts.
|
|
||||||
func GetIFlowModels() []*ModelInfo {
|
|
||||||
entries := []struct {
|
|
||||||
ID string
|
|
||||||
DisplayName string
|
|
||||||
Description string
|
|
||||||
Created int64
|
|
||||||
Thinking *ThinkingSupport
|
|
||||||
}{
|
|
||||||
{ID: "tstars2.0", DisplayName: "TStars-2.0", Description: "iFlow TStars-2.0 multimodal assistant", Created: 1746489600},
|
|
||||||
{ID: "qwen3-coder-plus", DisplayName: "Qwen3-Coder-Plus", Description: "Qwen3 Coder Plus code generation", Created: 1753228800},
|
|
||||||
{ID: "qwen3-max", DisplayName: "Qwen3-Max", Description: "Qwen3 flagship model", Created: 1758672000},
|
|
||||||
{ID: "qwen3-vl-plus", DisplayName: "Qwen3-VL-Plus", Description: "Qwen3 multimodal vision-language", Created: 1758672000},
|
|
||||||
{ID: "qwen3-max-preview", DisplayName: "Qwen3-Max-Preview", Description: "Qwen3 Max preview build", Created: 1757030400},
|
|
||||||
{ID: "kimi-k2-0905", DisplayName: "Kimi-K2-Instruct-0905", Description: "Moonshot Kimi K2 instruct 0905", Created: 1757030400},
|
|
||||||
{ID: "glm-4.6", DisplayName: "GLM-4.6", Description: "Zhipu GLM 4.6 general model", Created: 1759190400, Thinking: iFlowThinkingSupport},
|
|
||||||
{ID: "glm-4.7", DisplayName: "GLM-4.7", Description: "Zhipu GLM 4.7 general model", Created: 1766448000, Thinking: iFlowThinkingSupport},
|
|
||||||
{ID: "kimi-k2", DisplayName: "Kimi-K2", Description: "Moonshot Kimi K2 general model", Created: 1752192000},
|
|
||||||
{ID: "kimi-k2-thinking", DisplayName: "Kimi-K2-Thinking", Description: "Moonshot Kimi K2 thinking model", Created: 1762387200},
|
|
||||||
{ID: "deepseek-v3.2-chat", DisplayName: "DeepSeek-V3.2", Description: "DeepSeek V3.2 Chat", Created: 1764576000},
|
|
||||||
{ID: "deepseek-v3.2-reasoner", DisplayName: "DeepSeek-V3.2", Description: "DeepSeek V3.2 Reasoner", Created: 1764576000},
|
|
||||||
{ID: "deepseek-v3.2", DisplayName: "DeepSeek-V3.2-Exp", Description: "DeepSeek V3.2 experimental", Created: 1759104000},
|
|
||||||
{ID: "deepseek-v3.1", DisplayName: "DeepSeek-V3.1-Terminus", Description: "DeepSeek V3.1 Terminus", Created: 1756339200},
|
|
||||||
{ID: "deepseek-r1", DisplayName: "DeepSeek-R1", Description: "DeepSeek reasoning model R1", Created: 1737331200},
|
|
||||||
{ID: "deepseek-v3", DisplayName: "DeepSeek-V3-671B", Description: "DeepSeek V3 671B", Created: 1734307200},
|
|
||||||
{ID: "qwen3-32b", DisplayName: "Qwen3-32B", Description: "Qwen3 32B", Created: 1747094400},
|
|
||||||
{ID: "qwen3-235b-a22b-thinking-2507", DisplayName: "Qwen3-235B-A22B-Thinking", Description: "Qwen3 235B A22B Thinking (2507)", Created: 1753401600},
|
|
||||||
{ID: "qwen3-235b-a22b-instruct", DisplayName: "Qwen3-235B-A22B-Instruct", Description: "Qwen3 235B A22B Instruct", Created: 1753401600},
|
|
||||||
{ID: "qwen3-235b", DisplayName: "Qwen3-235B-A22B", Description: "Qwen3 235B A22B", Created: 1753401600},
|
|
||||||
{ID: "minimax-m2", DisplayName: "MiniMax-M2", Description: "MiniMax M2", Created: 1758672000, Thinking: iFlowThinkingSupport},
|
|
||||||
{ID: "minimax-m2.1", DisplayName: "MiniMax-M2.1", Description: "MiniMax M2.1", Created: 1766448000, Thinking: iFlowThinkingSupport},
|
|
||||||
{ID: "iflow-rome-30ba3b", DisplayName: "iFlow-ROME", Description: "iFlow Rome 30BA3B model", Created: 1736899200},
|
|
||||||
}
|
|
||||||
models := make([]*ModelInfo, 0, len(entries))
|
|
||||||
for _, entry := range entries {
|
|
||||||
models = append(models, &ModelInfo{
|
models = append(models, &ModelInfo{
|
||||||
ID: entry.ID,
|
ID: modelID,
|
||||||
Object: "model",
|
Object: "model",
|
||||||
Created: entry.Created,
|
OwnedBy: "antigravity",
|
||||||
OwnedBy: "iflow",
|
Type: "antigravity",
|
||||||
Type: "iflow",
|
|
||||||
DisplayName: entry.DisplayName,
|
|
||||||
Description: entry.Description,
|
|
||||||
Thinking: entry.Thinking,
|
Thinking: entry.Thinking,
|
||||||
|
MaxCompletionTokens: entry.MaxCompletionTokens,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
sort.Slice(models, func(i, j int) bool {
|
||||||
|
return strings.ToLower(models[i].ID) < strings.ToLower(models[j].ID)
|
||||||
|
})
|
||||||
return models
|
return models
|
||||||
}
|
default:
|
||||||
|
return nil
|
||||||
// AntigravityModelConfig captures static antigravity model overrides, including
|
|
||||||
// Thinking budget limits and provider max completion tokens.
|
|
||||||
type AntigravityModelConfig struct {
|
|
||||||
Thinking *ThinkingSupport
|
|
||||||
MaxCompletionTokens int
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAntigravityModelConfig returns static configuration for antigravity models.
|
|
||||||
// Keys use upstream model names returned by the Antigravity models endpoint.
|
|
||||||
func GetAntigravityModelConfig() map[string]*AntigravityModelConfig {
|
|
||||||
return map[string]*AntigravityModelConfig{
|
|
||||||
"gemini-2.5-flash": {Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true}},
|
|
||||||
"gemini-2.5-flash-lite": {Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true}},
|
|
||||||
"rev19-uic3-1p": {Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true}},
|
|
||||||
"gemini-3-pro-high": {Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}}},
|
|
||||||
"gemini-3-pro-image": {Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}}},
|
|
||||||
"gemini-3-flash": {Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"minimal", "low", "medium", "high"}}},
|
|
||||||
"claude-sonnet-4-5-thinking": {Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: true, DynamicAllowed: true}, MaxCompletionTokens: 64000},
|
|
||||||
"claude-opus-4-5-thinking": {Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: true, DynamicAllowed: true}, MaxCompletionTokens: 64000},
|
|
||||||
"claude-sonnet-4-5": {MaxCompletionTokens: 64000},
|
|
||||||
"gpt-oss-120b-medium": {},
|
|
||||||
"tab_flash_lite_preview": {},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
846
internal/registry/model_definitions_static_data.go
Normal file
846
internal/registry/model_definitions_static_data.go
Normal file
@@ -0,0 +1,846 @@
|
|||||||
|
// Package registry provides model definitions for various AI service providers.
|
||||||
|
// This file stores the static model metadata catalog.
|
||||||
|
package registry
|
||||||
|
|
||||||
|
// GetClaudeModels returns the standard Claude model definitions
|
||||||
|
func GetClaudeModels() []*ModelInfo {
|
||||||
|
return []*ModelInfo{
|
||||||
|
|
||||||
|
{
|
||||||
|
ID: "claude-haiku-4-5-20251001",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1759276800, // 2025-10-01
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 4.5 Haiku",
|
||||||
|
ContextLength: 200000,
|
||||||
|
MaxCompletionTokens: 64000,
|
||||||
|
// Thinking: not supported for Haiku models
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "claude-sonnet-4-5-20250929",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1759104000, // 2025-09-29
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 4.5 Sonnet",
|
||||||
|
ContextLength: 200000,
|
||||||
|
MaxCompletionTokens: 64000,
|
||||||
|
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: true, DynamicAllowed: false},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "claude-opus-4-5-20251101",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1761955200, // 2025-11-01
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 4.5 Opus",
|
||||||
|
Description: "Premium model combining maximum intelligence with practical performance",
|
||||||
|
ContextLength: 200000,
|
||||||
|
MaxCompletionTokens: 64000,
|
||||||
|
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: true, DynamicAllowed: false},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "claude-opus-4-1-20250805",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1722945600, // 2025-08-05
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 4.1 Opus",
|
||||||
|
ContextLength: 200000,
|
||||||
|
MaxCompletionTokens: 32000,
|
||||||
|
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: false, DynamicAllowed: false},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "claude-opus-4-20250514",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1715644800, // 2025-05-14
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 4 Opus",
|
||||||
|
ContextLength: 200000,
|
||||||
|
MaxCompletionTokens: 32000,
|
||||||
|
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: false, DynamicAllowed: false},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "claude-sonnet-4-20250514",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1715644800, // 2025-05-14
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 4 Sonnet",
|
||||||
|
ContextLength: 200000,
|
||||||
|
MaxCompletionTokens: 64000,
|
||||||
|
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: false, DynamicAllowed: false},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "claude-3-7-sonnet-20250219",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1708300800, // 2025-02-19
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 3.7 Sonnet",
|
||||||
|
ContextLength: 128000,
|
||||||
|
MaxCompletionTokens: 8192,
|
||||||
|
Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: false, DynamicAllowed: false},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "claude-3-5-haiku-20241022",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1729555200, // 2024-10-22
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Type: "claude",
|
||||||
|
DisplayName: "Claude 3.5 Haiku",
|
||||||
|
ContextLength: 128000,
|
||||||
|
MaxCompletionTokens: 8192,
|
||||||
|
// Thinking: not supported for Haiku models
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetGeminiModels returns the standard Gemini model definitions
|
||||||
|
func GetGeminiModels() []*ModelInfo {
|
||||||
|
return []*ModelInfo{
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-pro",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-pro",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Pro",
|
||||||
|
Description: "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash",
|
||||||
|
Version: "001",
|
||||||
|
DisplayName: "Gemini 2.5 Flash",
|
||||||
|
Description: "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash-lite",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1753142400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash-lite",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Flash Lite",
|
||||||
|
Description: "Our smallest and most cost effective model, built for at scale usage.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-pro-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1737158400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-pro-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Pro Preview",
|
||||||
|
Description: "Gemini 3 Pro Preview",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-flash-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1765929600,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-flash-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Flash Preview",
|
||||||
|
Description: "Gemini 3 Flash Preview",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"minimal", "low", "medium", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-pro-image-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1737158400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-pro-image-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Pro Image Preview",
|
||||||
|
Description: "Gemini 3 Pro Image Preview",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetGeminiVertexModels() []*ModelInfo {
|
||||||
|
return []*ModelInfo{
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-pro",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-pro",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Pro",
|
||||||
|
Description: "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash",
|
||||||
|
Version: "001",
|
||||||
|
DisplayName: "Gemini 2.5 Flash",
|
||||||
|
Description: "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash-lite",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1753142400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash-lite",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Flash Lite",
|
||||||
|
Description: "Our smallest and most cost effective model, built for at scale usage.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-pro-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1737158400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-pro-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Pro Preview",
|
||||||
|
Description: "Gemini 3 Pro Preview",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-flash-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1765929600,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-flash-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Flash Preview",
|
||||||
|
Description: "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"minimal", "low", "medium", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-pro-image-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1737158400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-pro-image-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Pro Image Preview",
|
||||||
|
Description: "Gemini 3 Pro Image Preview",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
||||||
|
},
|
||||||
|
// Imagen image generation models - use :predict action
|
||||||
|
{
|
||||||
|
ID: "imagen-4.0-generate-001",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750000000,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/imagen-4.0-generate-001",
|
||||||
|
Version: "4.0",
|
||||||
|
DisplayName: "Imagen 4.0 Generate",
|
||||||
|
Description: "Imagen 4.0 image generation model",
|
||||||
|
SupportedGenerationMethods: []string{"predict"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "imagen-4.0-ultra-generate-001",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750000000,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/imagen-4.0-ultra-generate-001",
|
||||||
|
Version: "4.0",
|
||||||
|
DisplayName: "Imagen 4.0 Ultra Generate",
|
||||||
|
Description: "Imagen 4.0 Ultra high-quality image generation model",
|
||||||
|
SupportedGenerationMethods: []string{"predict"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "imagen-3.0-generate-002",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1740000000,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/imagen-3.0-generate-002",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Imagen 3.0 Generate",
|
||||||
|
Description: "Imagen 3.0 image generation model",
|
||||||
|
SupportedGenerationMethods: []string{"predict"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "imagen-3.0-fast-generate-001",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1740000000,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/imagen-3.0-fast-generate-001",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Imagen 3.0 Fast Generate",
|
||||||
|
Description: "Imagen 3.0 fast image generation model",
|
||||||
|
SupportedGenerationMethods: []string{"predict"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "imagen-4.0-fast-generate-001",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750000000,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/imagen-4.0-fast-generate-001",
|
||||||
|
Version: "4.0",
|
||||||
|
DisplayName: "Imagen 4.0 Fast Generate",
|
||||||
|
Description: "Imagen 4.0 fast image generation model",
|
||||||
|
SupportedGenerationMethods: []string{"predict"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetGeminiCLIModels returns the standard Gemini model definitions
|
||||||
|
func GetGeminiCLIModels() []*ModelInfo {
|
||||||
|
return []*ModelInfo{
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-pro",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-pro",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Pro",
|
||||||
|
Description: "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash",
|
||||||
|
Version: "001",
|
||||||
|
DisplayName: "Gemini 2.5 Flash",
|
||||||
|
Description: "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash-lite",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1753142400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash-lite",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Flash Lite",
|
||||||
|
Description: "Our smallest and most cost effective model, built for at scale usage.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-pro-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1737158400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-pro-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Pro Preview",
|
||||||
|
Description: "Our most intelligent model with SOTA reasoning and multimodal understanding, and powerful agentic and vibe coding capabilities",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-flash-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1765929600,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-flash-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Flash Preview",
|
||||||
|
Description: "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"minimal", "low", "medium", "high"}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAIStudioModels returns the Gemini model definitions for AI Studio integrations
|
||||||
|
func GetAIStudioModels() []*ModelInfo {
|
||||||
|
return []*ModelInfo{
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-pro",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-pro",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Pro",
|
||||||
|
Description: "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash",
|
||||||
|
Version: "001",
|
||||||
|
DisplayName: "Gemini 2.5 Flash",
|
||||||
|
Description: "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash-lite",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1753142400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash-lite",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Flash Lite",
|
||||||
|
Description: "Our smallest and most cost effective model, built for at scale usage.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-pro-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1737158400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-pro-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Pro Preview",
|
||||||
|
Description: "Gemini 3 Pro Preview",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-flash-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1765929600,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-flash-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Flash Preview",
|
||||||
|
Description: "Our most intelligent model built for speed, combining frontier intelligence with superior search and grounding.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-pro-latest",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-pro-latest",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini Pro Latest",
|
||||||
|
Description: "Latest release of Gemini Pro",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-flash-latest",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1750118400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-flash-latest",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini Flash Latest",
|
||||||
|
Description: "Latest release of Gemini Flash",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-flash-lite-latest",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1753142400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-flash-lite-latest",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini Flash-Lite Latest",
|
||||||
|
Description: "Latest release of Gemini Flash-Lite",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 512, Max: 24576, ZeroAllowed: true, DynamicAllowed: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash-image-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1756166400,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash-image-preview",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Flash Image Preview",
|
||||||
|
Description: "State-of-the-art image generation and editing model.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 8192,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
// image models don't support thinkingConfig; leave Thinking nil
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-2.5-flash-image",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1759363200,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-2.5-flash-image",
|
||||||
|
Version: "2.5",
|
||||||
|
DisplayName: "Gemini 2.5 Flash Image",
|
||||||
|
Description: "State-of-the-art image generation and editing model.",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 8192,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
// image models don't support thinkingConfig; leave Thinking nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOpenAIModels returns the standard OpenAI model definitions
|
||||||
|
func GetOpenAIModels() []*ModelInfo {
|
||||||
|
return []*ModelInfo{
|
||||||
|
{
|
||||||
|
ID: "gpt-5",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1754524800,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5-2025-08-07",
|
||||||
|
DisplayName: "GPT 5",
|
||||||
|
Description: "Stable version of GPT 5, The best model for coding and agentic tasks across domains.",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"minimal", "low", "medium", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5-codex",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1757894400,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5-2025-09-15",
|
||||||
|
DisplayName: "GPT 5 Codex",
|
||||||
|
Description: "Stable version of GPT 5 Codex, The best model for coding and agentic tasks across domains.",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5-codex-mini",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1762473600,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5-2025-11-07",
|
||||||
|
DisplayName: "GPT 5 Codex Mini",
|
||||||
|
Description: "Stable version of GPT 5 Codex Mini: cheaper, faster, but less capable version of GPT 5 Codex.",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5.1",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1762905600,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5.1-2025-11-12",
|
||||||
|
DisplayName: "GPT 5",
|
||||||
|
Description: "Stable version of GPT 5, The best model for coding and agentic tasks across domains.",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5.1-codex",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1762905600,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5.1-2025-11-12",
|
||||||
|
DisplayName: "GPT 5.1 Codex",
|
||||||
|
Description: "Stable version of GPT 5.1 Codex, The best model for coding and agentic tasks across domains.",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5.1-codex-mini",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1762905600,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5.1-2025-11-12",
|
||||||
|
DisplayName: "GPT 5.1 Codex Mini",
|
||||||
|
Description: "Stable version of GPT 5.1 Codex Mini: cheaper, faster, but less capable version of GPT 5.1 Codex.",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5.1-codex-max",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1763424000,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5.1-max",
|
||||||
|
DisplayName: "GPT 5.1 Codex Max",
|
||||||
|
Description: "Stable version of GPT 5.1 Codex Max",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high", "xhigh"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5.2",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1765440000,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5.2",
|
||||||
|
DisplayName: "GPT 5.2",
|
||||||
|
Description: "Stable version of GPT 5.2",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high", "xhigh"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "gpt-5.2-codex",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1765440000,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Type: "openai",
|
||||||
|
Version: "gpt-5.2",
|
||||||
|
DisplayName: "GPT 5.2 Codex",
|
||||||
|
Description: "Stable version of GPT 5.2 Codex, The best model for coding and agentic tasks across domains.",
|
||||||
|
ContextLength: 400000,
|
||||||
|
MaxCompletionTokens: 128000,
|
||||||
|
SupportedParameters: []string{"tools"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high", "xhigh"}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQwenModels returns the standard Qwen model definitions
|
||||||
|
func GetQwenModels() []*ModelInfo {
|
||||||
|
return []*ModelInfo{
|
||||||
|
{
|
||||||
|
ID: "qwen3-coder-plus",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1753228800,
|
||||||
|
OwnedBy: "qwen",
|
||||||
|
Type: "qwen",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Qwen3 Coder Plus",
|
||||||
|
Description: "Advanced code generation and understanding model",
|
||||||
|
ContextLength: 32768,
|
||||||
|
MaxCompletionTokens: 8192,
|
||||||
|
SupportedParameters: []string{"temperature", "top_p", "max_tokens", "stream", "stop"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "qwen3-coder-flash",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1753228800,
|
||||||
|
OwnedBy: "qwen",
|
||||||
|
Type: "qwen",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Qwen3 Coder Flash",
|
||||||
|
Description: "Fast code generation model",
|
||||||
|
ContextLength: 8192,
|
||||||
|
MaxCompletionTokens: 2048,
|
||||||
|
SupportedParameters: []string{"temperature", "top_p", "max_tokens", "stream", "stop"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "vision-model",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1758672000,
|
||||||
|
OwnedBy: "qwen",
|
||||||
|
Type: "qwen",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Qwen3 Vision Model",
|
||||||
|
Description: "Vision model model",
|
||||||
|
ContextLength: 32768,
|
||||||
|
MaxCompletionTokens: 2048,
|
||||||
|
SupportedParameters: []string{"temperature", "top_p", "max_tokens", "stream", "stop"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// iFlowThinkingSupport is a shared ThinkingSupport configuration for iFlow models
|
||||||
|
// that support thinking mode via chat_template_kwargs.enable_thinking (boolean toggle).
|
||||||
|
// Uses level-based configuration so standard normalization flows apply before conversion.
|
||||||
|
var iFlowThinkingSupport = &ThinkingSupport{
|
||||||
|
Levels: []string{"none", "auto", "minimal", "low", "medium", "high", "xhigh"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIFlowModels returns supported models for iFlow OAuth accounts.
|
||||||
|
func GetIFlowModels() []*ModelInfo {
|
||||||
|
entries := []struct {
|
||||||
|
ID string
|
||||||
|
DisplayName string
|
||||||
|
Description string
|
||||||
|
Created int64
|
||||||
|
Thinking *ThinkingSupport
|
||||||
|
}{
|
||||||
|
{ID: "tstars2.0", DisplayName: "TStars-2.0", Description: "iFlow TStars-2.0 multimodal assistant", Created: 1746489600},
|
||||||
|
{ID: "qwen3-coder-plus", DisplayName: "Qwen3-Coder-Plus", Description: "Qwen3 Coder Plus code generation", Created: 1753228800},
|
||||||
|
{ID: "qwen3-max", DisplayName: "Qwen3-Max", Description: "Qwen3 flagship model", Created: 1758672000},
|
||||||
|
{ID: "qwen3-vl-plus", DisplayName: "Qwen3-VL-Plus", Description: "Qwen3 multimodal vision-language", Created: 1758672000},
|
||||||
|
{ID: "qwen3-max-preview", DisplayName: "Qwen3-Max-Preview", Description: "Qwen3 Max preview build", Created: 1757030400},
|
||||||
|
{ID: "kimi-k2-0905", DisplayName: "Kimi-K2-Instruct-0905", Description: "Moonshot Kimi K2 instruct 0905", Created: 1757030400},
|
||||||
|
{ID: "glm-4.6", DisplayName: "GLM-4.6", Description: "Zhipu GLM 4.6 general model", Created: 1759190400, Thinking: iFlowThinkingSupport},
|
||||||
|
{ID: "glm-4.7", DisplayName: "GLM-4.7", Description: "Zhipu GLM 4.7 general model", Created: 1766448000, Thinking: iFlowThinkingSupport},
|
||||||
|
{ID: "kimi-k2", DisplayName: "Kimi-K2", Description: "Moonshot Kimi K2 general model", Created: 1752192000},
|
||||||
|
{ID: "kimi-k2-thinking", DisplayName: "Kimi-K2-Thinking", Description: "Moonshot Kimi K2 thinking model", Created: 1762387200},
|
||||||
|
{ID: "deepseek-v3.2-chat", DisplayName: "DeepSeek-V3.2", Description: "DeepSeek V3.2 Chat", Created: 1764576000},
|
||||||
|
{ID: "deepseek-v3.2-reasoner", DisplayName: "DeepSeek-V3.2", Description: "DeepSeek V3.2 Reasoner", Created: 1764576000},
|
||||||
|
{ID: "deepseek-v3.2", DisplayName: "DeepSeek-V3.2-Exp", Description: "DeepSeek V3.2 experimental", Created: 1759104000},
|
||||||
|
{ID: "deepseek-v3.1", DisplayName: "DeepSeek-V3.1-Terminus", Description: "DeepSeek V3.1 Terminus", Created: 1756339200},
|
||||||
|
{ID: "deepseek-r1", DisplayName: "DeepSeek-R1", Description: "DeepSeek reasoning model R1", Created: 1737331200},
|
||||||
|
{ID: "deepseek-v3", DisplayName: "DeepSeek-V3-671B", Description: "DeepSeek V3 671B", Created: 1734307200},
|
||||||
|
{ID: "qwen3-32b", DisplayName: "Qwen3-32B", Description: "Qwen3 32B", Created: 1747094400},
|
||||||
|
{ID: "qwen3-235b-a22b-thinking-2507", DisplayName: "Qwen3-235B-A22B-Thinking", Description: "Qwen3 235B A22B Thinking (2507)", Created: 1753401600},
|
||||||
|
{ID: "qwen3-235b-a22b-instruct", DisplayName: "Qwen3-235B-A22B-Instruct", Description: "Qwen3 235B A22B Instruct", Created: 1753401600},
|
||||||
|
{ID: "qwen3-235b", DisplayName: "Qwen3-235B-A22B", Description: "Qwen3 235B A22B", Created: 1753401600},
|
||||||
|
{ID: "minimax-m2", DisplayName: "MiniMax-M2", Description: "MiniMax M2", Created: 1758672000, Thinking: iFlowThinkingSupport},
|
||||||
|
{ID: "minimax-m2.1", DisplayName: "MiniMax-M2.1", Description: "MiniMax M2.1", Created: 1766448000, Thinking: iFlowThinkingSupport},
|
||||||
|
{ID: "iflow-rome-30ba3b", DisplayName: "iFlow-ROME", Description: "iFlow Rome 30BA3B model", Created: 1736899200},
|
||||||
|
}
|
||||||
|
models := make([]*ModelInfo, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
models = append(models, &ModelInfo{
|
||||||
|
ID: entry.ID,
|
||||||
|
Object: "model",
|
||||||
|
Created: entry.Created,
|
||||||
|
OwnedBy: "iflow",
|
||||||
|
Type: "iflow",
|
||||||
|
DisplayName: entry.DisplayName,
|
||||||
|
Description: entry.Description,
|
||||||
|
Thinking: entry.Thinking,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return models
|
||||||
|
}
|
||||||
|
|
||||||
|
// AntigravityModelConfig captures static antigravity model overrides, including
|
||||||
|
// Thinking budget limits and provider max completion tokens.
|
||||||
|
type AntigravityModelConfig struct {
|
||||||
|
Thinking *ThinkingSupport
|
||||||
|
MaxCompletionTokens int
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAntigravityModelConfig returns static configuration for antigravity models.
|
||||||
|
// Keys use upstream model names returned by the Antigravity models endpoint.
|
||||||
|
func GetAntigravityModelConfig() map[string]*AntigravityModelConfig {
|
||||||
|
return map[string]*AntigravityModelConfig{
|
||||||
|
"gemini-2.5-flash": {Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true}},
|
||||||
|
"gemini-2.5-flash-lite": {Thinking: &ThinkingSupport{Min: 0, Max: 24576, ZeroAllowed: true, DynamicAllowed: true}},
|
||||||
|
"rev19-uic3-1p": {Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true}},
|
||||||
|
"gemini-3-pro-high": {Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}}},
|
||||||
|
"gemini-3-pro-image": {Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}}},
|
||||||
|
"gemini-3-flash": {Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"minimal", "low", "medium", "high"}}},
|
||||||
|
"claude-sonnet-4-5-thinking": {Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: true, DynamicAllowed: true}, MaxCompletionTokens: 64000},
|
||||||
|
"claude-opus-4-5-thinking": {Thinking: &ThinkingSupport{Min: 1024, Max: 128000, ZeroAllowed: true, DynamicAllowed: true}, MaxCompletionTokens: 64000},
|
||||||
|
"claude-sonnet-4-5": {MaxCompletionTokens: 64000},
|
||||||
|
"gpt-oss-120b-medium": {},
|
||||||
|
"tab_flash_lite_preview": {},
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user