mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-03 04:50:52 +08:00
feat: Add Amp CLI integration with comprehensive documentation
Add full Amp CLI support to enable routing AI model requests through the proxy
while maintaining Amp-specific features like thread management, user info, and
telemetry. Includes complete documentation and pull bot configuration.
Features:
- Modular architecture with RouteModule interface for clean integration
- Reverse proxy for Amp management routes (thread/user/meta/ads/telemetry)
- Provider-specific route aliases (/api/provider/{provider}/*)
- Secret management with precedence: config > env > file
- 5-minute secret caching to reduce file I/O
- Automatic gzip decompression for responses
- Proper connection cleanup to prevent leaks
- Localhost-only restriction for management routes (configurable)
- CORS protection for management endpoints
Documentation:
- Complete setup guide (USING_WITH_FACTORY_AND_AMP.md)
- OAuth setup for OpenAI (ChatGPT Plus/Pro) and Anthropic (Claude Pro/Max)
- Factory CLI config examples with all model variants
- Amp CLI/IDE configuration examples
- tmux setup for remote server deployment
- Screenshots and diagrams
Configuration:
- Pull bot disabled for this repo (manual rebase workflow)
- Config fields: AmpUpstreamURL, AmpUpstreamAPIKey, AmpRestrictManagementToLocalhost
- Compatible with upstream DisableCooling and other features
Technical details:
- internal/api/modules/amp/: Complete Amp routing module
- sdk/api/httpx/: HTTP utilities for gzip/transport
- 94.6% test coverage with 34 comprehensive test cases
- Clean integration minimizes merge conflict risk
Security:
- Management routes restricted to localhost by default
- Configurable via amp-restrict-management-to-localhost
- Prevents drive-by browser attacks on user data
This provides a production-ready foundation for Amp CLI integration while
maintaining clean separation from upstream code for easy rebasing.
Amp-Thread-ID: https://ampcode.com/threads/T-9e2befc5-f969-41c6-890c-5b779d58cf18
This commit is contained in:
@@ -8,9 +8,12 @@ package claude
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@@ -19,6 +22,7 @@ import (
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/interfaces"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||
"github.com/router-for-me/CLIProxyAPI/v6/sdk/api/handlers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
@@ -153,6 +157,23 @@ func (h *ClaudeCodeAPIHandler) handleNonStreamingResponse(c *gin.Context, rawJSO
|
||||
cliCancel(errMsg.Error)
|
||||
return
|
||||
}
|
||||
|
||||
// Decompress gzipped responses - Claude API sometimes returns gzip without Content-Encoding header
|
||||
// This fixes title generation and other non-streaming responses that arrive compressed
|
||||
if len(resp) >= 2 && resp[0] == 0x1f && resp[1] == 0x8b {
|
||||
gzReader, err := gzip.NewReader(bytes.NewReader(resp))
|
||||
if err != nil {
|
||||
log.Warnf("failed to decompress gzipped Claude response: %v", err)
|
||||
} else {
|
||||
defer gzReader.Close()
|
||||
if decompressed, err := io.ReadAll(gzReader); err != nil {
|
||||
log.Warnf("failed to read decompressed Claude response: %v", err)
|
||||
} else {
|
||||
resp = decompressed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = c.Writer.Write(resp)
|
||||
cliCancel()
|
||||
}
|
||||
|
||||
33
sdk/api/httpx/gzip.go
Normal file
33
sdk/api/httpx/gzip.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Package httpx provides HTTP transport utilities for SDK clients,
|
||||
// including automatic gzip decompression for misconfigured upstreams.
|
||||
package httpx
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
)
|
||||
|
||||
// DecodePossibleGzip inspects the raw response body and transparently
|
||||
// decompresses it when the payload is gzip compressed. Some upstream
|
||||
// providers return gzip data without a Content-Encoding header, which
|
||||
// confuses clients expecting JSON. This helper restores the original
|
||||
// JSON bytes while leaving plain responses untouched.
|
||||
//
|
||||
// This function is preserved for backward compatibility but new code
|
||||
// should use GzipFixupTransport instead.
|
||||
func DecodePossibleGzip(raw []byte) ([]byte, error) {
|
||||
if len(raw) >= 2 && raw[0] == 0x1f && raw[1] == 0x8b {
|
||||
reader, err := gzip.NewReader(bytes.NewReader(raw))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decompressed, err := io.ReadAll(reader)
|
||||
_ = reader.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return decompressed, nil
|
||||
}
|
||||
return raw, nil
|
||||
}
|
||||
177
sdk/api/httpx/transport.go
Normal file
177
sdk/api/httpx/transport.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package httpx
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GzipFixupTransport wraps an http.RoundTripper to auto-decode gzip responses
|
||||
// that don't properly set Content-Encoding header.
|
||||
//
|
||||
// Some upstream providers (especially when proxied) return gzip-compressed
|
||||
// responses without setting the Content-Encoding: gzip header, which causes
|
||||
// Go's http client to pass the compressed bytes directly to the application.
|
||||
//
|
||||
// This transport detects gzip magic bytes and transparently decompresses
|
||||
// the response while preserving streaming behavior for SSE and chunked responses.
|
||||
type GzipFixupTransport struct {
|
||||
// Base is the underlying transport. If nil, http.DefaultTransport is used.
|
||||
Base http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (t *GzipFixupTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
base := t.Base
|
||||
if base == nil {
|
||||
base = http.DefaultTransport
|
||||
}
|
||||
|
||||
resp, err := base.RoundTrip(req)
|
||||
if err != nil || resp == nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Skip if Go already decompressed it
|
||||
if resp.Uncompressed {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Skip if Content-Encoding is already set (properly configured upstream)
|
||||
if resp.Header.Get("Content-Encoding") != "" {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Skip streaming responses - they need different handling
|
||||
if isStreamingResponse(resp) {
|
||||
// For streaming responses, wrap with a streaming gzip detector
|
||||
// that can handle chunked gzip data
|
||||
resp.Body = &streamingGzipDetector{
|
||||
inner: resp.Body,
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// For non-streaming responses, peek and decompress if needed
|
||||
resp.Body = &gzipDetectingReader{
|
||||
inner: resp.Body,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// isStreamingResponse checks if response is SSE or chunked
|
||||
func isStreamingResponse(resp *http.Response) bool {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
|
||||
// Check for Server-Sent Events
|
||||
if strings.Contains(contentType, "text/event-stream") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for chunked transfer encoding
|
||||
if strings.Contains(strings.ToLower(resp.Header.Get("Transfer-Encoding")), "chunked") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// gzipDetectingReader is an io.ReadCloser that detects gzip magic bytes
|
||||
// on first read and switches to gzip decompression if detected.
|
||||
// This is used for non-streaming responses.
|
||||
type gzipDetectingReader struct {
|
||||
inner io.ReadCloser
|
||||
reader io.Reader
|
||||
once bool
|
||||
}
|
||||
|
||||
func (g *gzipDetectingReader) Read(p []byte) (int, error) {
|
||||
if !g.once {
|
||||
g.once = true
|
||||
|
||||
// Peek at first 2 bytes to detect gzip magic bytes
|
||||
buf := make([]byte, 2)
|
||||
n, err := io.ReadFull(g.inner, buf)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
// Can't peek, use original reader
|
||||
g.reader = io.MultiReader(bytes.NewReader(buf[:n]), g.inner)
|
||||
return g.reader.Read(p)
|
||||
}
|
||||
|
||||
if n >= 2 && buf[0] == 0x1f && buf[1] == 0x8b {
|
||||
// It's gzipped, create gzip reader
|
||||
multiReader := io.MultiReader(bytes.NewReader(buf[:n]), g.inner)
|
||||
gzipReader, err := gzip.NewReader(multiReader)
|
||||
if err != nil {
|
||||
log.Warnf("gzip header detected but reader creation failed: %v", err)
|
||||
g.reader = multiReader
|
||||
} else {
|
||||
g.reader = gzipReader
|
||||
}
|
||||
} else {
|
||||
// Not gzipped, combine peeked bytes with rest
|
||||
g.reader = io.MultiReader(bytes.NewReader(buf[:n]), g.inner)
|
||||
}
|
||||
}
|
||||
|
||||
return g.reader.Read(p)
|
||||
}
|
||||
|
||||
func (g *gzipDetectingReader) Close() error {
|
||||
if closer, ok := g.reader.(io.Closer); ok {
|
||||
_ = closer.Close()
|
||||
}
|
||||
return g.inner.Close()
|
||||
}
|
||||
|
||||
// streamingGzipDetector is similar to gzipDetectingReader but designed for
|
||||
// streaming responses. It doesn't buffer; it wraps with a streaming gzip reader.
|
||||
type streamingGzipDetector struct {
|
||||
inner io.ReadCloser
|
||||
reader io.Reader
|
||||
once bool
|
||||
}
|
||||
|
||||
func (s *streamingGzipDetector) Read(p []byte) (int, error) {
|
||||
if !s.once {
|
||||
s.once = true
|
||||
|
||||
// Peek at first 2 bytes
|
||||
buf := make([]byte, 2)
|
||||
n, err := io.ReadFull(s.inner, buf)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
s.reader = io.MultiReader(bytes.NewReader(buf[:n]), s.inner)
|
||||
return s.reader.Read(p)
|
||||
}
|
||||
|
||||
if n >= 2 && buf[0] == 0x1f && buf[1] == 0x8b {
|
||||
// It's gzipped - wrap with streaming gzip reader
|
||||
multiReader := io.MultiReader(bytes.NewReader(buf[:n]), s.inner)
|
||||
gzipReader, err := gzip.NewReader(multiReader)
|
||||
if err != nil {
|
||||
log.Warnf("streaming gzip header detected but reader creation failed: %v", err)
|
||||
s.reader = multiReader
|
||||
} else {
|
||||
s.reader = gzipReader
|
||||
log.Debug("streaming gzip decompression enabled")
|
||||
}
|
||||
} else {
|
||||
// Not gzipped
|
||||
s.reader = io.MultiReader(bytes.NewReader(buf[:n]), s.inner)
|
||||
}
|
||||
}
|
||||
|
||||
return s.reader.Read(p)
|
||||
}
|
||||
|
||||
func (s *streamingGzipDetector) Close() error {
|
||||
if closer, ok := s.reader.(io.Closer); ok {
|
||||
_ = closer.Close()
|
||||
}
|
||||
return s.inner.Close()
|
||||
}
|
||||
Reference in New Issue
Block a user