Add reverse mappings for original tool names and improve error logging

- Introduced reverse mapping logic for tool names in translators to restore original names when shortened.
- Enhanced error handling by logging API response errors consistently across handlers.
- Refactored request and response loggers to include API error details, improving debugging capabilities.
- Integrated robust tool name shortening and uniqueness mechanisms for OpenAI, Gemini, and Claude requests.
- Improved handler retry logic to properly capture and respond to errors.
This commit is contained in:
Luis Pater
2025-09-04 02:39:56 +08:00
parent 7209fa233f
commit ad943b2d4d
14 changed files with 644 additions and 25 deletions

View File

@@ -139,12 +139,13 @@ func (h *ClaudeCodeAPIHandler) handleStreamingResponse(c *gin.Context, rawJSON [
}
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
// Main client rotation loop with quota management
// This loop implements a sophisticated load balancing and failover mechanism
outLoop:
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -185,6 +186,8 @@ outLoop:
// This manages various error conditions and implements retry logic
case errInfo, okError := <-errChan:
if okError {
errorResponse = errInfo
h.LoggingAPIResponseError(cliCtx, errInfo)
// Special handling for quota exceeded errors
// If configured, attempt to switch to a different project/client
switch errInfo.StatusCode {
@@ -221,4 +224,12 @@ outLoop:
}
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = fmt.Fprint(c.Writer, errorResponse.Error.Error())
flusher.Flush()
cliCancel(errorResponse.Error)
return
}
}

View File

@@ -169,10 +169,10 @@ func (h *GeminiCLIAPIHandler) handleInternalStreamGenerateContent(c *gin.Context
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
outLoop:
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -208,6 +208,9 @@ outLoop:
// Handle errors from the backend.
case err, okError := <-errChan:
if okError {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -232,6 +235,13 @@ outLoop:
}
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = fmt.Fprint(c.Writer, errorResponse.Error.Error())
flusher.Flush()
cliCancel(errorResponse.Error)
return
}
}
// handleInternalGenerateContent handles non-streaming content generation requests.
@@ -252,9 +262,9 @@ func (h *GeminiCLIAPIHandler) handleInternalGenerateContent(c *gin.Context, rawJ
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -265,6 +275,9 @@ func (h *GeminiCLIAPIHandler) handleInternalGenerateContent(c *gin.Context, rawJ
resp, err := cliClient.SendRawMessage(cliCtx, modelName, rawJSON, "")
if err != nil {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -296,4 +309,11 @@ func (h *GeminiCLIAPIHandler) handleInternalGenerateContent(c *gin.Context, rawJ
break
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = c.Writer.Write([]byte(errorResponse.Error.Error()))
cliCancel(errorResponse.Error)
return
}
}

View File

@@ -221,10 +221,10 @@ func (h *GeminiAPIHandler) handleStreamGenerateContent(c *gin.Context, modelName
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
outLoop:
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -263,6 +263,9 @@ outLoop:
// Handle errors from the backend.
case err, okError := <-errChan:
if okError {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -287,6 +290,13 @@ outLoop:
}
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = fmt.Fprint(c.Writer, errorResponse.Error.Error())
flusher.Flush()
cliCancel(errorResponse.Error)
return
}
}
// handleCountTokens handles token counting requests for Gemini models.
@@ -365,9 +375,9 @@ func (h *GeminiAPIHandler) handleGenerateContent(c *gin.Context, modelName strin
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -378,6 +388,9 @@ func (h *GeminiAPIHandler) handleGenerateContent(c *gin.Context, modelName strin
resp, err := cliClient.SendRawMessage(cliCtx, modelName, rawJSON, alt)
if err != nil {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -409,4 +422,10 @@ func (h *GeminiAPIHandler) handleGenerateContent(c *gin.Context, modelName strin
break
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = c.Writer.Write([]byte(errorResponse.Error.Error()))
cliCancel(errorResponse.Error)
return
}
}

View File

@@ -235,6 +235,22 @@ func (h *BaseAPIHandler) GetContextWithCancel(handler interfaces.APIHandler, c *
}
}
func (h *BaseAPIHandler) LoggingAPIResponseError(ctx context.Context, err *interfaces.ErrorMessage) {
if h.Cfg.RequestLog {
if ginContext, ok := ctx.Value("gin").(*gin.Context); ok {
if apiResponseErrors, isExist := ginContext.Get("API_RESPONSE_ERROR"); isExist {
if slicesAPIResponseError, isOk := apiResponseErrors.([]*interfaces.ErrorMessage); isOk {
slicesAPIResponseError = append(slicesAPIResponseError, err)
ginContext.Set("API_RESPONSE_ERROR", slicesAPIResponseError)
}
} else {
// Create new response data entry
ginContext.Set("API_RESPONSE_ERROR", []*interfaces.ErrorMessage{err})
}
}
}
}
// APIHandlerCancelFunc is a function type for canceling an API handler's context.
// It can optionally accept parameters, which are used for logging the response.
type APIHandlerCancelFunc func(params ...interface{})

View File

@@ -387,9 +387,9 @@ func (h *OpenAIAPIHandler) handleNonStreamingResponse(c *gin.Context, rawJSON []
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -400,6 +400,9 @@ func (h *OpenAIAPIHandler) handleNonStreamingResponse(c *gin.Context, rawJSON []
resp, err := cliClient.SendRawMessage(cliCtx, modelName, rawJSON, "")
if err != nil {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -431,6 +434,12 @@ func (h *OpenAIAPIHandler) handleNonStreamingResponse(c *gin.Context, rawJSON []
break
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = c.Writer.Write([]byte(errorResponse.Error.Error()))
cliCancel(errorResponse.Error)
return
}
}
// handleStreamingResponse handles streaming responses for Gemini models.
@@ -471,10 +480,10 @@ func (h *OpenAIAPIHandler) handleStreamingResponse(c *gin.Context, rawJSON []byt
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
outLoop:
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -511,6 +520,9 @@ outLoop:
// Handle errors from the backend.
case err, okError := <-errChan:
if okError {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -535,6 +547,13 @@ outLoop:
}
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = fmt.Fprint(c.Writer, errorResponse.Error.Error())
flusher.Flush()
cliCancel(errorResponse.Error)
return
}
}
// handleCompletionsNonStreamingResponse handles non-streaming completions responses.
@@ -562,9 +581,9 @@ func (h *OpenAIAPIHandler) handleCompletionsNonStreamingResponse(c *gin.Context,
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -576,6 +595,9 @@ func (h *OpenAIAPIHandler) handleCompletionsNonStreamingResponse(c *gin.Context,
// Send the converted chat completions request
resp, err := cliClient.SendRawMessage(cliCtx, modelName, chatCompletionsJSON, "")
if err != nil {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -601,6 +623,13 @@ func (h *OpenAIAPIHandler) handleCompletionsNonStreamingResponse(c *gin.Context,
break
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = c.Writer.Write([]byte(errorResponse.Error.Error()))
cliCancel(errorResponse.Error)
return
}
}
// handleCompletionsStreamingResponse handles streaming completions responses.
@@ -644,10 +673,10 @@ func (h *OpenAIAPIHandler) handleCompletionsStreamingResponse(c *gin.Context, ra
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
outLoop:
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -689,6 +718,9 @@ outLoop:
// Handle errors from the backend.
case err, okError := <-errChan:
if okError {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -713,4 +745,11 @@ outLoop:
}
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = fmt.Fprint(c.Writer, errorResponse.Error.Error())
flusher.Flush()
cliCancel(errorResponse.Error)
return
}
}

View File

@@ -115,9 +115,9 @@ func (h *OpenAIResponsesAPIHandler) handleNonStreamingResponse(c *gin.Context, r
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -128,6 +128,9 @@ func (h *OpenAIResponsesAPIHandler) handleNonStreamingResponse(c *gin.Context, r
resp, err := cliClient.SendRawMessage(cliCtx, modelName, rawJSON, "")
if err != nil {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -159,6 +162,13 @@ func (h *OpenAIResponsesAPIHandler) handleNonStreamingResponse(c *gin.Context, r
break
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = c.Writer.Write([]byte(errorResponse.Error.Error()))
cliCancel(errorResponse.Error)
return
}
}
// handleStreamingResponse handles streaming responses for Gemini models.
@@ -199,10 +209,10 @@ func (h *OpenAIResponsesAPIHandler) handleStreamingResponse(c *gin.Context, rawJ
}
}()
var errorResponse *interfaces.ErrorMessage
retryCount := 0
outLoop:
for retryCount <= h.Cfg.RequestRetry {
var errorResponse *interfaces.ErrorMessage
cliClient, errorResponse = h.GetClient(modelName)
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
@@ -238,6 +248,8 @@ outLoop:
// Handle errors from the backend.
case err, okError := <-errChan:
if okError {
errorResponse = err
h.LoggingAPIResponseError(cliCtx, err)
switch err.StatusCode {
case 429:
if h.Cfg.QuotaExceeded.SwitchProject {
@@ -262,4 +274,12 @@ outLoop:
}
}
}
if errorResponse != nil {
c.Status(errorResponse.StatusCode)
_, _ = fmt.Fprint(c.Writer, errorResponse.Error.Error())
flusher.Flush()
cliCancel(errorResponse.Error)
return
}
}