Refactor error handling and variable declarations in browser and logging modules

- Simplified variable initialization in `browser.go` for readability.
- Updated error handling in `request_logger.go` with better resource cleanup using deferred anonymous functions.

Refactor API handlers to use `GetContextWithCancel` for streamlined context creation and response handling

- Replaced redundant `context.WithCancel` and `context.WithValue` logic with the new `GetContextWithCancel` utility in all handlers.
- Centralized API response storage in the given context during cancellation.
- Updated associated cancellation calls for consistency and improved resource management.

- Replaced `apiResponseData` with `AddAPIResponseData` for centralized response recording.
- Simplified cancellation logic by switching to a boolean-based `cliCancel` method.
- Removed unused `apiResponseData` slices across handlers to reduce memory usage.
- Updated `handlers.go` to support unified response data storage per request context.
This commit is contained in:
Luis Pater
2025-08-17 18:34:15 +08:00
parent 6bbdf67f96
commit c5cc238308
7 changed files with 115 additions and 118 deletions

View File

@@ -160,8 +160,7 @@ func (h *OpenAIAPIHandlers) handleGeminiNonStreamingResponse(c *gin.Context, raw
c.Header("Content-Type", "application/json")
modelName, systemInstruction, contents, tools := translatorOpenAIToGeminiCli.ConvertOpenAIChatRequestToCli(rawJSON)
backgroundCtx, cliCancel := context.WithCancel(context.Background())
cliCtx := context.WithValue(backgroundCtx, "gin", c)
cliCtx, cliCancel := h.GetContextWithCancel(c, context.Background())
var cliClient client.Client
defer func() {
@@ -195,8 +194,7 @@ func (h *OpenAIAPIHandlers) handleGeminiNonStreamingResponse(c *gin.Context, raw
} else {
c.Status(err.StatusCode)
_, _ = c.Writer.Write([]byte(err.Error.Error()))
c.Set("API_RESPONSE", []byte(err.Error.Error()))
cliCancel()
cliCancel(err.Error)
}
break
} else {
@@ -204,8 +202,7 @@ func (h *OpenAIAPIHandlers) handleGeminiNonStreamingResponse(c *gin.Context, raw
if openAIFormat != "" {
_, _ = c.Writer.Write([]byte(openAIFormat))
}
c.Set("API_RESPONSE", resp)
cliCancel()
cliCancel(resp)
break
}
}
@@ -238,8 +235,7 @@ func (h *OpenAIAPIHandlers) handleGeminiStreamingResponse(c *gin.Context, rawJSO
// Prepare the request for the backend client.
modelName, systemInstruction, contents, tools := translatorOpenAIToGeminiCli.ConvertOpenAIChatRequestToCli(rawJSON)
backgroundCtx, cliCancel := context.WithCancel(context.Background())
cliCtx := context.WithValue(backgroundCtx, "gin", c)
cliCtx, cliCancel := h.GetContextWithCancel(c, context.Background())
var cliClient client.Client
defer func() {
@@ -270,7 +266,6 @@ outLoop:
}
// Send the message and receive response chunks and errors via channels.
respChan, errChan := cliClient.SendMessageStream(cliCtx, rawJSON, modelName, systemInstruction, contents, tools)
apiResponseData := make([]byte, 0)
hasFirstResponse := false
for {
@@ -279,7 +274,6 @@ outLoop:
case <-c.Request.Context().Done():
if c.Request.Context().Err().Error() == "context canceled" {
log.Debugf("GeminiClient disconnected: %v", c.Request.Context().Err())
c.Set("API_RESPONSE", apiResponseData)
cliCancel() // Cancel the backend request.
return
}
@@ -289,11 +283,12 @@ outLoop:
// Stream is closed, send the final [DONE] message.
_, _ = fmt.Fprintf(c.Writer, "data: [DONE]\n\n")
flusher.Flush()
c.Set("API_RESPONSE", apiResponseData)
cliCancel()
return
}
apiResponseData = append(apiResponseData, chunk...)
h.AddAPIResponseData(c, chunk)
// Convert the chunk to OpenAI format and send it to the client.
hasFirstResponse = true
openAIFormat := translatorOpenAIToGeminiCli.ConvertCliResponseToOpenAIChat(chunk, time.Now().Unix(), isGlAPIKey)
@@ -310,8 +305,7 @@ outLoop:
c.Status(err.StatusCode)
_, _ = fmt.Fprint(c.Writer, err.Error.Error())
flusher.Flush()
c.Set("API_RESPONSE", []byte(err.Error.Error()))
cliCancel()
cliCancel(err.Error)
}
return
}
@@ -338,8 +332,8 @@ func (h *OpenAIAPIHandlers) handleCodexNonStreamingResponse(c *gin.Context, rawJ
newRequestJSON := translatorOpenAIToCodex.ConvertOpenAIChatRequestToCodex(rawJSON)
modelName := gjson.GetBytes(rawJSON, "model")
backgroundCtx, cliCancel := context.WithCancel(context.Background())
cliCtx := context.WithValue(backgroundCtx, "gin", c)
cliCtx, cliCancel := h.GetContextWithCancel(c, context.Background())
var cliClient client.Client
defer func() {
@@ -363,25 +357,24 @@ outLoop:
// Send the message and receive response chunks and errors via channels.
respChan, errChan := cliClient.SendRawMessageStream(cliCtx, []byte(newRequestJSON), "")
apiResponseData := make([]byte, 0)
for {
select {
// Handle client disconnection.
case <-c.Request.Context().Done():
if c.Request.Context().Err().Error() == "context canceled" {
log.Debugf("CodexClient disconnected: %v", c.Request.Context().Err())
c.Set("API_RESPONSE", apiResponseData)
cliCancel() // Cancel the backend request.
return
}
// Process incoming response chunks.
case chunk, okStream := <-respChan:
if !okStream {
c.Set("API_RESPONSE", apiResponseData)
cliCancel()
return
}
apiResponseData = append(apiResponseData, chunk...)
h.AddAPIResponseData(c, chunk)
if bytes.HasPrefix(chunk, []byte("data: ")) {
jsonData := chunk[6:]
data := gjson.ParseBytes(jsonData)
@@ -400,8 +393,7 @@ outLoop:
} else {
c.Status(err.StatusCode)
_, _ = c.Writer.Write([]byte(err.Error.Error()))
c.Set("API_RESPONSE", []byte(err.Error.Error()))
cliCancel()
cliCancel(err.Error)
}
return
}
@@ -443,8 +435,7 @@ func (h *OpenAIAPIHandlers) handleCodexStreamingResponse(c *gin.Context, rawJSON
modelName := gjson.GetBytes(rawJSON, "model")
backgroundCtx, cliCancel := context.WithCancel(context.Background())
cliCtx := context.WithValue(backgroundCtx, "gin", c)
cliCtx, cliCancel := h.GetContextWithCancel(c, context.Background())
var cliClient client.Client
defer func() {
@@ -471,14 +462,12 @@ outLoop:
// Send the message and receive response chunks and errors via channels.
var params *translatorOpenAIToCodex.ConvertCliToOpenAIParams
respChan, errChan := cliClient.SendRawMessageStream(cliCtx, []byte(newRequestJSON), "")
apiResponseData := make([]byte, 0)
for {
select {
// Handle client disconnection.
case <-c.Request.Context().Done():
if c.Request.Context().Err().Error() == "context canceled" {
log.Debugf("CodexClient disconnected: %v", c.Request.Context().Err())
c.Set("API_RESPONSE", apiResponseData)
cliCancel() // Cancel the backend request.
return
}
@@ -487,11 +476,12 @@ outLoop:
if !okStream {
_, _ = c.Writer.Write([]byte("[done]\n\n"))
flusher.Flush()
c.Set("API_RESPONSE", apiResponseData)
cliCancel()
return
}
apiResponseData = append(apiResponseData, chunk...)
h.AddAPIResponseData(c, chunk)
// log.Debugf("Response: %s\n", string(chunk))
// Convert the chunk to OpenAI format and send it to the client.
if bytes.HasPrefix(chunk, []byte("data: ")) {
@@ -518,9 +508,8 @@ outLoop:
} else {
c.Status(err.StatusCode)
_, _ = fmt.Fprint(c.Writer, err.Error.Error())
c.Set("API_RESPONSE", []byte(err.Error.Error()))
flusher.Flush()
cliCancel()
cliCancel(err.Error)
}
return
}