mirror of
https://github.com/router-for-me/CLIProxyAPI.git
synced 2026-02-15 10:50:51 +08:00
Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
352a67857b | ||
|
|
644a3ad220 | ||
|
|
19c32f58b2 |
@@ -24,10 +24,10 @@ api-keys:
|
||||
debug: false
|
||||
|
||||
# When true, write application logs to rotating files instead of stdout
|
||||
logging-to-file: true
|
||||
logging-to-file: false
|
||||
|
||||
# When false, disable in-memory usage statistics aggregation
|
||||
usage-statistics-enabled: true
|
||||
usage-statistics-enabled: false
|
||||
|
||||
# Proxy URL. Supports socks5/http/https protocols. Example: socks5://user:pass@192.168.1.1:1080/
|
||||
proxy-url: ""
|
||||
@@ -41,48 +41,48 @@ quota-exceeded:
|
||||
switch-preview-model: true # Whether to automatically switch to a preview model when a quota is exceeded
|
||||
|
||||
# API keys for official Generative Language API
|
||||
generative-language-api-key:
|
||||
- "AIzaSy...01"
|
||||
- "AIzaSy...02"
|
||||
- "AIzaSy...03"
|
||||
- "AIzaSy...04"
|
||||
#generative-language-api-key:
|
||||
# - "AIzaSy...01"
|
||||
# - "AIzaSy...02"
|
||||
# - "AIzaSy...03"
|
||||
# - "AIzaSy...04"
|
||||
|
||||
# Codex API keys
|
||||
codex-api-key:
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||
#codex-api-key:
|
||||
# - api-key: "sk-atSM..."
|
||||
# base-url: "https://www.example.com" # use the custom codex API endpoint
|
||||
|
||||
# Claude API keys
|
||||
claude-api-key:
|
||||
- api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||
- api-key: "sk-atSM..."
|
||||
base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||
#claude-api-key:
|
||||
# - api-key: "sk-atSM..." # use the official claude API key, no need to set the base url
|
||||
# - api-key: "sk-atSM..."
|
||||
# base-url: "https://www.example.com" # use the custom claude API endpoint
|
||||
|
||||
# OpenAI compatibility providers
|
||||
openai-compatibility:
|
||||
- name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||
base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||
api-keys: # The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed.
|
||||
- "sk-or-v1-...b780"
|
||||
- "sk-or-v1-...b781"
|
||||
models: # The models supported by the provider.
|
||||
- name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||
alias: "kimi-k2" # The alias used in the API.
|
||||
#openai-compatibility:
|
||||
# - name: "openrouter" # The name of the provider; it will be used in the user agent and other places.
|
||||
# base-url: "https://openrouter.ai/api/v1" # The base URL of the provider.
|
||||
# api-keys: # The API keys for the provider. Add multiple keys if needed. Omit if unauthenticated access is allowed.
|
||||
# - "sk-or-v1-...b780"
|
||||
# - "sk-or-v1-...b781"
|
||||
# models: # The models supported by the provider.
|
||||
# - name: "moonshotai/kimi-k2:free" # The actual model name.
|
||||
# alias: "kimi-k2" # The alias used in the API.
|
||||
|
||||
# Gemini Web settings
|
||||
gemini-web:
|
||||
# Conversation reuse: set to true to enable (default), false to disable.
|
||||
context: true
|
||||
# Maximum characters per single request to Gemini Web. Requests exceeding this
|
||||
# size split into chunks. Only the last chunk carries files and yields the final answer.
|
||||
max-chars-per-request: 1000000
|
||||
# Disable the short continuation hint appended to intermediate chunks
|
||||
# when splitting long prompts. Default is false (hint enabled by default).
|
||||
disable-continuation-hint: false
|
||||
# Code mode:
|
||||
# - true: enable XML wrapping hint and attach the coding-partner Gem.
|
||||
# Thought merging (<think> into visible content) applies to STREAMING only;
|
||||
# non-stream responses keep reasoning/thought parts separate for clients
|
||||
# that expect explicit reasoning fields.
|
||||
# - false: disable XML hint and keep <think> separate
|
||||
code-mode: false
|
||||
#gemini-web:
|
||||
# # Conversation reuse: set to true to enable (default), false to disable.
|
||||
# context: true
|
||||
# # Maximum characters per single request to Gemini Web. Requests exceeding this
|
||||
# # size split into chunks. Only the last chunk carries files and yields the final answer.
|
||||
# max-chars-per-request: 1000000
|
||||
# # Disable the short continuation hint appended to intermediate chunks
|
||||
# # when splitting long prompts. Default is false (hint enabled by default).
|
||||
# disable-continuation-hint: false
|
||||
# # Code mode:
|
||||
# # - true: enable XML wrapping hint and attach the coding-partner Gem.
|
||||
# # Thought merging (<think> into visible content) applies to STREAMING only;
|
||||
# # non-stream responses keep reasoning/thought parts separate for clients
|
||||
# # that expect explicit reasoning fields.
|
||||
# # - false: disable XML hint and keep <think> separate
|
||||
# code-mode: false
|
||||
|
||||
@@ -284,6 +284,7 @@ func hasZSTDEcoding(contentEncoding string) bool {
|
||||
func applyClaudeHeaders(r *http.Request, apiKey string, stream bool) {
|
||||
r.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
r.Header.Set("Anthropic-Beta", "claude-code-20250219,oauth-2025-04-20,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14")
|
||||
|
||||
var ginHeaders http.Header
|
||||
if ginCtx, ok := r.Context().Value("gin").(*gin.Context); ok && ginCtx != nil && ginCtx.Request != nil {
|
||||
@@ -292,7 +293,6 @@ func applyClaudeHeaders(r *http.Request, apiKey string, stream bool) {
|
||||
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Version", "2023-06-01")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Dangerous-Direct-Browser-Access", "true")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "Anthropic-Beta", "claude-code-20250219,oauth-2025-04-20,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "X-App", "cli")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "X-Stainless-Helper-Method", "stream")
|
||||
misc.EnsureHeader(r.Header, ginHeaders, "X-Stainless-Retry-Count", "0")
|
||||
|
||||
@@ -78,12 +78,21 @@ func ConvertGeminiResponseToOpenAIResponses(_ context.Context, modelName string,
|
||||
textDone, _ = sjson.Set(textDone, "output_index", st.ReasoningIndex)
|
||||
textDone, _ = sjson.Set(textDone, "text", full)
|
||||
out = append(out, emitEvent("response.reasoning_summary_text.done", textDone))
|
||||
|
||||
partDone := `{"type":"response.reasoning_summary_part.done","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"part":{"type":"summary_text","text":""}}`
|
||||
partDone, _ = sjson.Set(partDone, "sequence_number", nextSeq())
|
||||
partDone, _ = sjson.Set(partDone, "item_id", st.ReasoningItemID)
|
||||
partDone, _ = sjson.Set(partDone, "output_index", st.ReasoningIndex)
|
||||
partDone, _ = sjson.Set(partDone, "part.text", full)
|
||||
out = append(out, emitEvent("response.reasoning_summary_part.done", partDone))
|
||||
|
||||
itemDone := `{"type":"response.output_item.done","sequence_number":0,"output_index":0,"item":{"id":"","type":"reasoning","encrypted_content":"","summary":[{"type":"summary_text","text":""}]}}`
|
||||
itemDone, _ = sjson.Set(itemDone, "sequence_number", nextSeq())
|
||||
itemDone, _ = sjson.Set(itemDone, "item.id", st.ReasoningItemID)
|
||||
itemDone, _ = sjson.Set(itemDone, "output_index", st.ReasoningIndex)
|
||||
itemDone, _ = sjson.Set(itemDone, "item.summary.0.text", full)
|
||||
out = append(out, emitEvent("response.output_item.done", itemDone))
|
||||
|
||||
st.ReasoningClosed = true
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user