mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-03 13:00:52 +08:00
Refactor Gemini API handlers to standardize response field names and improve model descriptions
This commit is contained in:
@@ -42,20 +42,19 @@ func NewGeminiAPIHandlers(apiHandlers *handlers.APIHandlers) *GeminiAPIHandlers
|
|||||||
// It returns a JSON response containing available Gemini models and their specifications.
|
// It returns a JSON response containing available Gemini models and their specifications.
|
||||||
func (h *GeminiAPIHandlers) GeminiModels(c *gin.Context) {
|
func (h *GeminiAPIHandlers) GeminiModels(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"data": []map[string]any{
|
"models": []map[string]any{
|
||||||
{
|
{
|
||||||
"id": "gemini-2.5-flash",
|
"name": "models/gemini-2.5-flash",
|
||||||
"object": "model",
|
"version": "001",
|
||||||
"version": "001",
|
"displayName": "Gemini 2.5 Flash",
|
||||||
"name": "Gemini 2.5 Flash",
|
"description": "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
||||||
"description": "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
"inputTokenLimit": 1048576,
|
||||||
"context_length": 1_048_576,
|
"outputTokenLimit": 65536,
|
||||||
"max_completion_tokens": 65_536,
|
"supportedGenerationMethods": []string{
|
||||||
"supported_parameters": []string{
|
"generateContent",
|
||||||
"tools",
|
"countTokens",
|
||||||
"temperature",
|
"createCachedContent",
|
||||||
"top_p",
|
"batchGenerateContent",
|
||||||
"top_k",
|
|
||||||
},
|
},
|
||||||
"temperature": 1,
|
"temperature": 1,
|
||||||
"topP": 0.95,
|
"topP": 0.95,
|
||||||
@@ -64,18 +63,17 @@ func (h *GeminiAPIHandlers) GeminiModels(c *gin.Context) {
|
|||||||
"thinking": true,
|
"thinking": true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gemini-2.5-pro",
|
"name": "models/gemini-2.5-pro",
|
||||||
"object": "model",
|
"version": "2.5",
|
||||||
"version": "2.5",
|
"displayName": "Gemini 2.5 Pro",
|
||||||
"name": "Gemini 2.5 Pro",
|
"description": "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
||||||
"description": "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
"inputTokenLimit": 1048576,
|
||||||
"context_length": 1_048_576,
|
"outputTokenLimit": 65536,
|
||||||
"max_completion_tokens": 65_536,
|
"supportedGenerationMethods": []string{
|
||||||
"supported_parameters": []string{
|
"generateContent",
|
||||||
"tools",
|
"countTokens",
|
||||||
"temperature",
|
"createCachedContent",
|
||||||
"top_p",
|
"batchGenerateContent",
|
||||||
"top_k",
|
|
||||||
},
|
},
|
||||||
"temperature": 1,
|
"temperature": 1,
|
||||||
"topP": 0.95,
|
"topP": 0.95,
|
||||||
@@ -84,15 +82,14 @@ func (h *GeminiAPIHandlers) GeminiModels(c *gin.Context) {
|
|||||||
"thinking": true,
|
"thinking": true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "gpt-5",
|
"name": "gpt-5",
|
||||||
"object": "model",
|
"version": "001",
|
||||||
"version": "gpt-5-2025-08-07",
|
"displayName": "GPT 5",
|
||||||
"name": "GPT 5",
|
"description": "Stable version of GPT 5, The best model for coding and agentic tasks across domains.",
|
||||||
"description": "Stable version of GPT 5, The best model for coding and agentic tasks across domains.",
|
"inputTokenLimit": 400000,
|
||||||
"context_length": 400_000,
|
"outputTokenLimit": 128000,
|
||||||
"max_completion_tokens": 128_000,
|
"supportedGenerationMethods": []string{
|
||||||
"supported_parameters": []string{
|
"generateContent",
|
||||||
"tools",
|
|
||||||
},
|
},
|
||||||
"temperature": 1,
|
"temperature": 1,
|
||||||
"topP": 0.95,
|
"topP": 0.95,
|
||||||
@@ -122,39 +119,38 @@ func (h *GeminiAPIHandlers) GeminiGetHandler(c *gin.Context) {
|
|||||||
switch request.Action {
|
switch request.Action {
|
||||||
case "gemini-2.5-pro":
|
case "gemini-2.5-pro":
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"id": "gemini-2.5-pro",
|
"name": "models/gemini-2.5-pro",
|
||||||
"object": "model",
|
"version": "2.5",
|
||||||
"version": "2.5",
|
"displayName": "Gemini 2.5 Pro",
|
||||||
"name": "Gemini 2.5 Pro",
|
"description": "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
||||||
"description": "Stable release (June 17th, 2025) of Gemini 2.5 Pro",
|
"inputTokenLimit": 1048576,
|
||||||
"context_length": 1_048_576,
|
"outputTokenLimit": 65536,
|
||||||
"max_completion_tokens": 65_536,
|
"supportedGenerationMethods": []string{
|
||||||
"supported_parameters": []string{
|
"generateContent",
|
||||||
"tools",
|
"countTokens",
|
||||||
"temperature",
|
"createCachedContent",
|
||||||
"top_p",
|
"batchGenerateContent",
|
||||||
"top_k",
|
|
||||||
},
|
},
|
||||||
"temperature": 1,
|
"temperature": 1,
|
||||||
"topP": 0.95,
|
"topP": 0.95,
|
||||||
"topK": 64,
|
"topK": 64,
|
||||||
"maxTemperature": 2,
|
"maxTemperature": 2,
|
||||||
"thinking": true,
|
"thinking": true,
|
||||||
})
|
},
|
||||||
|
)
|
||||||
case "gemini-2.5-flash":
|
case "gemini-2.5-flash":
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"id": "gemini-2.5-flash",
|
"name": "models/gemini-2.5-flash",
|
||||||
"object": "model",
|
"version": "001",
|
||||||
"version": "001",
|
"displayName": "Gemini 2.5 Flash",
|
||||||
"name": "Gemini 2.5 Flash",
|
"description": "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
||||||
"description": "Stable version of Gemini 2.5 Flash, our mid-size multimodal model that supports up to 1 million tokens, released in June of 2025.",
|
"inputTokenLimit": 1048576,
|
||||||
"context_length": 1_048_576,
|
"outputTokenLimit": 65536,
|
||||||
"max_completion_tokens": 65_536,
|
"supportedGenerationMethods": []string{
|
||||||
"supported_parameters": []string{
|
"generateContent",
|
||||||
"tools",
|
"countTokens",
|
||||||
"temperature",
|
"createCachedContent",
|
||||||
"top_p",
|
"batchGenerateContent",
|
||||||
"top_k",
|
|
||||||
},
|
},
|
||||||
"temperature": 1,
|
"temperature": 1,
|
||||||
"topP": 0.95,
|
"topP": 0.95,
|
||||||
@@ -164,15 +160,14 @@ func (h *GeminiAPIHandlers) GeminiGetHandler(c *gin.Context) {
|
|||||||
})
|
})
|
||||||
case "gpt-5":
|
case "gpt-5":
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"id": "gpt-5",
|
"name": "gpt-5",
|
||||||
"object": "model",
|
"version": "001",
|
||||||
"version": "gpt-5-2025-08-07",
|
"displayName": "GPT 5",
|
||||||
"name": "GPT 5",
|
"description": "Stable version of GPT 5, The best model for coding and agentic tasks across domains.",
|
||||||
"description": "Stable version of GPT 5, The best model for coding and agentic tasks across domains.",
|
"inputTokenLimit": 400000,
|
||||||
"context_length": 400_000,
|
"outputTokenLimit": 128000,
|
||||||
"max_completion_tokens": 128_000,
|
"supportedGenerationMethods": []string{
|
||||||
"supported_parameters": []string{
|
"generateContent",
|
||||||
"tools",
|
|
||||||
},
|
},
|
||||||
"temperature": 1,
|
"temperature": 1,
|
||||||
"topP": 0.95,
|
"topP": 0.95,
|
||||||
|
|||||||
Reference in New Issue
Block a user