feat(runtime): add model alias support and enhance payload rule matching

- Introduced `payloadModelAliases` and `payloadModelCandidates` functions to support model aliases for improved flexibility.
- Updated rule matching logic to handle multiple model candidates.
- Refactored variable naming in executor to improve code clarity and consistency.
This commit is contained in:
Luis Pater
2026-01-17 05:05:24 +08:00
parent 384578a88c
commit bc7167e9fe
2 changed files with 86 additions and 15 deletions

View File

@@ -517,8 +517,8 @@ func (e *AntigravityExecutor) convertStreamToNonStream(stream []byte) []byte {
}
if usageResult := responseNode.Get("usageMetadata"); usageResult.Exists() {
usageRaw = usageResult.Raw
} else if usageResult := root.Get("usageMetadata"); usageResult.Exists() {
usageRaw = usageResult.Raw
} else if usageMetadataResult := root.Get("usageMetadata"); usageMetadataResult.Exists() {
usageRaw = usageMetadataResult.Raw
}
if partsResult := responseNode.Get("candidates.0.content.parts"); partsResult.IsArray() {
@@ -642,7 +642,6 @@ func (e *AntigravityExecutor) ExecuteStream(ctx context.Context, auth *cliproxya
err = errReq
return nil, err
}
httpResp, errDo := httpClient.Do(httpReq)
if errDo != nil {
recordAPIResponseError(ctx, e.cfg, errDo)
@@ -1004,10 +1003,10 @@ func FetchAntigravityModels(ctx context.Context, auth *cliproxyauth.Auth, cfg *c
case "chat_20706", "chat_23310", "gemini-2.5-flash-thinking", "gemini-3-pro-low", "gemini-2.5-pro":
continue
}
cfg := modelConfig[modelID]
modelCfg := modelConfig[modelID]
modelName := modelID
if cfg != nil && cfg.Name != "" {
modelName = cfg.Name
if modelCfg != nil && modelCfg.Name != "" {
modelName = modelCfg.Name
}
modelInfo := &registry.ModelInfo{
ID: modelID,
@@ -1021,12 +1020,12 @@ func FetchAntigravityModels(ctx context.Context, auth *cliproxyauth.Auth, cfg *c
Type: antigravityAuthType,
}
// Look up Thinking support from static config using upstream model name.
if cfg != nil {
if cfg.Thinking != nil {
modelInfo.Thinking = cfg.Thinking
if modelCfg != nil {
if modelCfg.Thinking != nil {
modelInfo.Thinking = modelCfg.Thinking
}
if cfg.MaxCompletionTokens > 0 {
modelInfo.MaxCompletionTokens = cfg.MaxCompletionTokens
if modelCfg.MaxCompletionTokens > 0 {
modelInfo.MaxCompletionTokens = modelCfg.MaxCompletionTokens
}
}
models = append(models, modelInfo)