chore: initial public snapshot for github upload

This commit is contained in:
Your Name
2026-03-26 20:06:14 +08:00
commit 0e5ecd930e
3497 changed files with 1586236 additions and 0 deletions

View File

@@ -0,0 +1,391 @@
# =============================================================================
# Sub2API Docker Environment Configuration
# =============================================================================
# Copy this file to .env and modify as needed:
# cp .env.example .env
# nano .env
#
# Then start with: docker-compose up -d
# =============================================================================
# -----------------------------------------------------------------------------
# Server Configuration
# -----------------------------------------------------------------------------
# Bind address for host port mapping
BIND_HOST=0.0.0.0
# Server port (exposed on host)
SERVER_PORT=8080
# Server mode: release or debug
SERVER_MODE=release
# -----------------------------------------------------------------------------
# Logging Configuration
# 日志配置
# -----------------------------------------------------------------------------
# 日志级别debug/info/warn/error
LOG_LEVEL=info
# 日志格式json/console
LOG_FORMAT=json
# 每条日志附带的 service 字段
LOG_SERVICE_NAME=sub2api
# 每条日志附带的 env 字段
LOG_ENV=production
# 是否输出调用方位置信息
LOG_CALLER=true
# 堆栈输出阈值none/error/fatal
LOG_STACKTRACE_LEVEL=error
# 输出开关(建议容器内保持双输出)
# 是否输出到 stdout/stderr
LOG_OUTPUT_TO_STDOUT=true
# 是否输出到文件
LOG_OUTPUT_TO_FILE=true
# 日志文件路径(留空自动推导):
# - 设置 DATA_DIR${DATA_DIR}/logs/sub2api.log
# - 未设置 DATA_DIR/app/data/logs/sub2api.log
LOG_OUTPUT_FILE_PATH=
# 滚动配置
# 单文件最大体积MB
LOG_ROTATION_MAX_SIZE_MB=100
# 保留历史文件数量0 表示不限制)
LOG_ROTATION_MAX_BACKUPS=10
# 历史日志保留天数0 表示不限制)
LOG_ROTATION_MAX_AGE_DAYS=7
# 是否压缩历史日志
LOG_ROTATION_COMPRESS=true
# 滚动文件时间戳是否使用本地时间
LOG_ROTATION_LOCAL_TIME=true
# 采样配置(高频重复日志降噪)
LOG_SAMPLING_ENABLED=false
# 每秒前 N 条日志不采样
LOG_SAMPLING_INITIAL=100
# 之后每 N 条保留 1 条
LOG_SAMPLING_THEREAFTER=100
# Global max request body size in bytes (default: 256MB)
# 全局最大请求体大小(字节,默认 256MB
# Applies to all requests, especially important for h2c first request memory protection
# 适用于所有请求,对 h2c 第一请求的内存保护尤为重要
SERVER_MAX_REQUEST_BODY_SIZE=268435456
# Gateway max request body size in bytes (default: 256MB)
# 网关请求体最大字节数(默认 256MB
GATEWAY_MAX_BODY_SIZE=268435456
# Enable HTTP/2 Cleartext (h2c) for client connections
# 启用 HTTP/2 Cleartext (h2c) 客户端连接
SERVER_H2C_ENABLED=true
# H2C max concurrent streams (default: 50)
# H2C 最大并发流数量(默认 50
SERVER_H2C_MAX_CONCURRENT_STREAMS=50
# H2C idle timeout in seconds (default: 75)
# H2C 空闲超时时间(秒,默认 75
SERVER_H2C_IDLE_TIMEOUT=75
# H2C max read frame size in bytes (default: 1048576 = 1MB)
# H2C 最大帧大小(字节,默认 1048576 = 1MB
SERVER_H2C_MAX_READ_FRAME_SIZE=1048576
# H2C max upload buffer per connection in bytes (default: 2097152 = 2MB)
# H2C 每个连接的最大上传缓冲区(字节,默认 2097152 = 2MB
SERVER_H2C_MAX_UPLOAD_BUFFER_PER_CONNECTION=2097152
# H2C max upload buffer per stream in bytes (default: 524288 = 512KB)
# H2C 每个流的最大上传缓冲区(字节,默认 524288 = 512KB
SERVER_H2C_MAX_UPLOAD_BUFFER_PER_STREAM=524288
# 运行模式: standard (默认) 或 simple (内部自用)
# standard: 完整 SaaS 功能,包含计费/余额校验simple: 隐藏 SaaS 功能并跳过计费/余额校验
RUN_MODE=standard
# Timezone
TZ=Asia/Shanghai
# -----------------------------------------------------------------------------
# PostgreSQL Configuration (REQUIRED)
# -----------------------------------------------------------------------------
POSTGRES_USER=sub2api
POSTGRES_PASSWORD=change_this_secure_password
POSTGRES_DB=sub2api
# PostgreSQL 监听端口(同时用于 PG 服务端和应用连接,默认 5432
DATABASE_PORT=5432
# -----------------------------------------------------------------------------
# PostgreSQL 服务端参数(可选)
# -----------------------------------------------------------------------------
# POSTGRES_MAX_CONNECTIONSPostgreSQL 服务端允许的最大连接数。
# 必须 >=(所有 Sub2API 实例的 DATABASE_MAX_OPEN_CONNS 之和)+ 预留余量(例如 20%)。
POSTGRES_MAX_CONNECTIONS=1024
# POSTGRES_SHARED_BUFFERSPostgreSQL 用于缓存数据页的共享内存。
# 常见建议:物理内存的 10%~25%(容器内存受限时请按实际限制调整)。
# 8GB 内存容器参考1GB。
POSTGRES_SHARED_BUFFERS=1GB
# POSTGRES_EFFECTIVE_CACHE_SIZE查询规划器“假设可用的 OS 缓存大小”(不等于实际分配)。
# 常见建议:物理内存的 50%~75%。
# 8GB 内存容器参考6GB。
POSTGRES_EFFECTIVE_CACHE_SIZE=4GB
# POSTGRES_MAINTENANCE_WORK_MEM维护操作内存VACUUM/CREATE INDEX 等)。
# 值越大维护越快,但会占用更多内存。
# 8GB 内存容器参考128MB。
POSTGRES_MAINTENANCE_WORK_MEM=128MB
# -----------------------------------------------------------------------------
# PostgreSQL 连接池参数(可选,默认与程序内置一致)
# -----------------------------------------------------------------------------
# 说明:
# - 这些参数控制 Sub2API 进程到 PostgreSQL 的连接池大小(不是 PostgreSQL 自身的 max_connections
# - 多实例/多副本部署时,总连接上限约等于:实例数 * DATABASE_MAX_OPEN_CONNS。
# - 连接池过大可能导致:数据库连接耗尽、内存占用上升、上下文切换增多,反而变慢。
# - 建议结合 PostgreSQL 的 max_connections 与机器规格逐步调优:
# 通常把应用总连接上限控制在 max_connections 的 50%~80% 更稳妥。
#
# DATABASE_MAX_OPEN_CONNS最大打开连接数活跃+空闲),达到后新请求会等待可用连接。
# 典型范围50~500取决于 DB 规格、实例数、SQL 复杂度)。
DATABASE_MAX_OPEN_CONNS=256
# DATABASE_MAX_IDLE_CONNS最大空闲连接数热连接建议 <= MAX_OPEN。
# 太小会频繁建连增加延迟;太大会长期占用数据库资源。
DATABASE_MAX_IDLE_CONNS=128
# DATABASE_CONN_MAX_LIFETIME_MINUTES单个连接最大存活时间单位分钟
# 用于避免连接长期不重建导致的中间件/LB/NAT 异常或服务端重启后的“僵尸连接”。
# 设置为 0 表示不限制(一般不建议生产环境)。
DATABASE_CONN_MAX_LIFETIME_MINUTES=30
# DATABASE_CONN_MAX_IDLE_TIME_MINUTES空闲连接最大存活时间单位分钟
# 超过该时间的空闲连接会被回收,防止长时间闲置占用连接数。
# 设置为 0 表示不限制(一般不建议生产环境)。
DATABASE_CONN_MAX_IDLE_TIME_MINUTES=5
# -----------------------------------------------------------------------------
# Redis Configuration
# -----------------------------------------------------------------------------
# Redis 监听端口(同时用于应用连接和 Redis 服务端,默认 6379
REDIS_PORT=6379
# Leave empty for no password (default for local development)
REDIS_PASSWORD=
REDIS_DB=0
# Redis 服务端最大客户端连接数(可选)
REDIS_MAXCLIENTS=50000
# Redis 连接池大小(默认 1024
REDIS_POOL_SIZE=4096
# Redis 最小空闲连接数(默认 10
REDIS_MIN_IDLE_CONNS=256
REDIS_ENABLE_TLS=false
# -----------------------------------------------------------------------------
# Admin Account
# -----------------------------------------------------------------------------
# Email for the admin account
ADMIN_EMAIL=admin@sub2api.local
# Password for admin account
# Leave empty to auto-generate (will be shown in logs on first run)
ADMIN_PASSWORD=
# -----------------------------------------------------------------------------
# JWT Configuration
# -----------------------------------------------------------------------------
# IMPORTANT: Set a fixed JWT_SECRET to prevent login sessions from being
# invalidated after container restarts. If left empty, a random secret will
# be generated on each startup, causing all users to be logged out.
# Generate a secure secret: openssl rand -hex 32
JWT_SECRET=
JWT_EXPIRE_HOUR=24
# Access Token 有效期(分钟)
# 优先级说明:
# - >0: 按分钟生效(优先于 JWT_EXPIRE_HOUR
# - =0: 回退使用 JWT_EXPIRE_HOUR
JWT_ACCESS_TOKEN_EXPIRE_MINUTES=0
# -----------------------------------------------------------------------------
# TOTP (2FA) Configuration
# TOTP双因素认证配置
# -----------------------------------------------------------------------------
# IMPORTANT: Set a fixed encryption key for TOTP secrets. If left empty, a
# random key will be generated on each startup, causing all existing TOTP
# configurations to become invalid (users won't be able to login with 2FA).
# Generate a secure key: openssl rand -hex 32
# 重要:设置固定的 TOTP 加密密钥。如果留空,每次启动将生成随机密钥,
# 导致现有的 TOTP 配置失效(用户无法使用双因素认证登录)。
TOTP_ENCRYPTION_KEY=
# -----------------------------------------------------------------------------
# Configuration File (Optional)
# -----------------------------------------------------------------------------
# Path to custom config file (relative to docker-compose.yml directory)
# Copy config.example.yaml to config.yaml and modify as needed
# Leave unset to use default ./config.yaml
#CONFIG_FILE=./config.yaml
# -----------------------------------------------------------------------------
# Built-in OAuth Client Secrets (Optional)
# -----------------------------------------------------------------------------
# SECURITY NOTE:
# - 本项目不会在代码仓库中内置第三方 OAuth client_secret。
# - 如需使用“内置客户端”(而不是自建 OAuth Client请在运行环境通过 env 注入。
#
# Gemini CLI built-in OAuth client_secret用于 Gemini code_assist/google_one 内置登录流)
# GEMINI_CLI_OAUTH_CLIENT_SECRET=
#
# Antigravity OAuth client_secret用于 Antigravity OAuth 登录流)
# ANTIGRAVITY_OAUTH_CLIENT_SECRET=
# -----------------------------------------------------------------------------
# Rate Limiting (Optional)
# 速率限制(可选)
# -----------------------------------------------------------------------------
# Cooldown time (in minutes) when upstream returns 529 (overloaded)
# 上游返回 529过载时的冷却时间分钟
RATE_LIMIT_OVERLOAD_COOLDOWN_MINUTES=10
# -----------------------------------------------------------------------------
# Gateway Scheduling (Optional)
# 调度缓存与受控回源配置(缓存就绪且命中时不读 DB
# -----------------------------------------------------------------------------
# Force Codex CLI mode: treat all /openai/v1/responses requests as Codex CLI.
# 强制按 Codex CLI 处理 /openai/v1/responses 请求(用于网关未透传/改写 User-Agent 的兜底)。
#
# 注意:开启后会影响所有客户端的行为(不仅限于 VS Code / Codex CLI请谨慎开启。
#
# 默认false
GATEWAY_FORCE_CODEX_CLI=false
# 上游连接池:每主机最大连接数(默认 1024流式/HTTP1.1 场景可调大,如 2400/4096
GATEWAY_MAX_CONNS_PER_HOST=2048
# 上游连接池:最大空闲连接总数(默认 2560账号/代理隔离 + 高并发场景可调大)
GATEWAY_MAX_IDLE_CONNS=8192
# 上游连接池:每主机最大空闲连接(默认 120
GATEWAY_MAX_IDLE_CONNS_PER_HOST=4096
# 粘性会话最大排队长度
GATEWAY_SCHEDULING_STICKY_SESSION_MAX_WAITING=3
# 粘性会话等待超时(时间段,例如 45s
GATEWAY_SCHEDULING_STICKY_SESSION_WAIT_TIMEOUT=120s
# 兜底排队等待超时(时间段,例如 30s
GATEWAY_SCHEDULING_FALLBACK_WAIT_TIMEOUT=30s
# 兜底最大排队长度
GATEWAY_SCHEDULING_FALLBACK_MAX_WAITING=100
# 启用调度批量负载计算
GATEWAY_SCHEDULING_LOAD_BATCH_ENABLED=true
# 并发槽位清理周期(时间段,例如 30s
GATEWAY_SCHEDULING_SLOT_CLEANUP_INTERVAL=30s
# 是否允许受控回源到 DB默认 true保持现有行为
GATEWAY_SCHEDULING_DB_FALLBACK_ENABLED=true
# 受控回源超时0 表示不额外收紧超时
GATEWAY_SCHEDULING_DB_FALLBACK_TIMEOUT_SECONDS=0
# 受控回源限流(实例级 QPS0 表示不限制
GATEWAY_SCHEDULING_DB_FALLBACK_MAX_QPS=0
# outbox 轮询周期(秒)
GATEWAY_SCHEDULING_OUTBOX_POLL_INTERVAL_SECONDS=1
# outbox 滞后告警阈值(秒)
GATEWAY_SCHEDULING_OUTBOX_LAG_WARN_SECONDS=5
# outbox 触发强制重建阈值(秒)
GATEWAY_SCHEDULING_OUTBOX_LAG_REBUILD_SECONDS=10
# outbox 连续滞后触发次数
GATEWAY_SCHEDULING_OUTBOX_LAG_REBUILD_FAILURES=3
# outbox 积压触发重建阈值(行数)
GATEWAY_SCHEDULING_OUTBOX_BACKLOG_REBUILD_ROWS=10000
# 全量重建周期(秒)
GATEWAY_SCHEDULING_FULL_REBUILD_INTERVAL_SECONDS=300
# -----------------------------------------------------------------------------
# Dashboard Aggregation (Optional)
# -----------------------------------------------------------------------------
# Enable aggregation job
# 启用仪表盘预聚合
DASHBOARD_AGGREGATION_ENABLED=true
# Refresh interval (seconds)
# 刷新间隔(秒)
DASHBOARD_AGGREGATION_INTERVAL_SECONDS=60
# Lookback window (seconds)
# 回看窗口(秒)
DASHBOARD_AGGREGATION_LOOKBACK_SECONDS=120
# Allow manual backfill
# 允许手动回填
DASHBOARD_AGGREGATION_BACKFILL_ENABLED=false
# Backfill max range (days)
# 回填最大跨度(天)
DASHBOARD_AGGREGATION_BACKFILL_MAX_DAYS=31
# Recompute recent N days on startup
# 启动时重算最近 N 天
DASHBOARD_AGGREGATION_RECOMPUTE_DAYS=2
# Retention windows (days)
# 保留窗口(天)
DASHBOARD_AGGREGATION_RETENTION_USAGE_LOGS_DAYS=90
DASHBOARD_AGGREGATION_RETENTION_HOURLY_DAYS=180
DASHBOARD_AGGREGATION_RETENTION_DAILY_DAYS=730
# -----------------------------------------------------------------------------
# Security Configuration
# -----------------------------------------------------------------------------
# URL Allowlist Configuration
# 启用 URL 白名单验证false 则跳过白名单检查,仅做基本格式校验)
SECURITY_URL_ALLOWLIST_ENABLED=false
# 关闭白名单时,是否允许 http:// URL默认 false只允许 https://
# ⚠️ 警告:允许 HTTP 存在安全风险(明文传输),仅建议在开发/测试环境或可信内网中使用
# Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https)
# ⚠️ WARNING: Allowing HTTP has security risks (plaintext transmission)
# Only recommended for dev/test environments or trusted networks
SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true
# 是否允许本地/私有 IP 地址用于上游/定价/CRS仅在可信网络中使用
# Allow localhost/private IPs for upstream/pricing/CRS (use only in trusted networks)
SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=true
# -----------------------------------------------------------------------------
# Gemini OAuth (OPTIONAL, required only for Gemini OAuth accounts)
# -----------------------------------------------------------------------------
# Sub2API supports TWO Gemini OAuth modes:
#
# 1. Code Assist OAuth (需要 GCP project_id)
# - Uses: cloudcode-pa.googleapis.com (Code Assist API)
# - Auto scopes: cloud-platform + userinfo.email + userinfo.profile
# - OAuth Client: Can use built-in Gemini CLI client (留空即可)
# - Requires: Google Cloud Platform project with Code Assist enabled
#
# 2. AI Studio OAuth (不需要 project_id)
# - Uses: generativelanguage.googleapis.com (AI Studio API)
# - Default scopes: generative-language
# - OAuth Client: Requires your own OAuth 2.0 Client (内置 Gemini CLI client 不能申请 generative-language scope)
# - Requires: Create OAuth 2.0 Client in GCP Console + OAuth consent screen
# - Setup Guide: https://ai.google.dev/gemini-api/docs/oauth
# - ⚠️ IMPORTANT: OAuth Client 必须发布为正式版本 (Production)
# Testing 模式限制: 只能添加 100 个测试用户, refresh token 7 天后过期
# 发布步骤: GCP Console → OAuth consent screen → PUBLISH APP
#
# Configuration:
# Leave empty to use the built-in Gemini CLI OAuth client (Code Assist OAuth only).
# To enable AI Studio OAuth, set your own OAuth client ID/secret here.
GEMINI_OAUTH_CLIENT_ID=
GEMINI_OAUTH_CLIENT_SECRET=
# Optional; leave empty to auto-select scopes based on oauth_type
GEMINI_OAUTH_SCOPES=
# -----------------------------------------------------------------------------
# Gemini Quota Policy (OPTIONAL, local simulation)
# -----------------------------------------------------------------------------
# JSON overrides for local quota simulation (Code Assist only).
# Example:
# GEMINI_QUOTA_POLICY={"tiers":{"LEGACY":{"pro_rpd":50,"flash_rpd":1500,"cooldown_minutes":30},"PRO":{"pro_rpd":1500,"flash_rpd":4000,"cooldown_minutes":5},"ULTRA":{"pro_rpd":2000,"flash_rpd":0,"cooldown_minutes":5}}}
GEMINI_QUOTA_POLICY=
# -----------------------------------------------------------------------------
# Ops Monitoring Configuration (运维监控配置)
# -----------------------------------------------------------------------------
# Enable ops monitoring features (background jobs and APIs)
# 是否启用运维监控功能(后台任务和接口)
# Set to false to hide ops menu in sidebar and disable all ops features
# 设置为 false 可在左侧栏隐藏运维监控菜单并禁用所有运维监控功能
OPS_ENABLED=true
# -----------------------------------------------------------------------------
# Update Configuration (在线更新配置)
# -----------------------------------------------------------------------------
# Proxy URL for accessing GitHub (used for online updates and pricing data)
# 用于访问 GitHub 的代理地址(用于在线更新和定价数据获取)
# Supports: http, https, socks5, socks5h
# Examples:
# HTTP proxy: http://127.0.0.1:7890
# SOCKS5 proxy: socks5://127.0.0.1:1080
# With authentication: http://user:pass@proxy.example.com:8080
# Leave empty for direct connection (recommended for overseas servers)
# 留空表示直连(适用于海外服务器)
UPDATE_PROXY_URL=

View File

@@ -0,0 +1,19 @@
# =============================================================================
# Sub2API Deploy Directory - Git Ignore
# =============================================================================
# Data directories (generated at runtime when using docker-compose.local.yml)
data/
postgres_data/
redis_data/
# Environment configuration (contains sensitive information)
.env
# Backup files
*.backup
*.bak
# Temporary files
*.tmp
*.log

View File

@@ -0,0 +1,113 @@
# 修改为你的域名
api.sub2api.com {
# =========================================================================
# 静态资源长期缓存(高优先级,放在最前面)
# 带 hash 的文件可以永久缓存,浏览器和 CDN 都会缓存
# =========================================================================
@static {
path /assets/*
path /logo.png
path /favicon.ico
}
header @static {
Cache-Control "public, max-age=31536000, immutable"
# 移除可能干扰缓存的头
-Pragma
-Expires
}
# =========================================================================
# TLS 安全配置
# =========================================================================
tls {
# 仅使用 TLS 1.2 和 1.3
protocols tls1.2 tls1.3
# 优先使用的加密套件
ciphers TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
}
# =========================================================================
# 反向代理配置
# =========================================================================
reverse_proxy localhost:8080 {
# 健康检查
health_uri /health
health_interval 30s
health_timeout 10s
health_status 200
# 负载均衡策略(单节点可忽略,多节点时有用)
lb_policy round_robin
lb_try_duration 5s
lb_try_interval 250ms
# 传递真实客户端信息
# 兼容 Cloudflare 和直连:后端应优先读取 CF-Connecting-IP其次 X-Real-IP
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
header_up X-Forwarded-Host {host}
# 保留 Cloudflare 原始头(如果存在)
# 后端获取 IP 的优先级建议: CF-Connecting-IP → X-Real-IP → X-Forwarded-For
header_up CF-Connecting-IP {http.request.header.CF-Connecting-IP}
# 连接池优化
transport http {
keepalive 120s
keepalive_idle_conns 256
read_buffer 16KB
write_buffer 16KB
compression off
}
# 故障转移
fail_duration 30s
max_fails 3
unhealthy_status 500 502 503 504
}
# =========================================================================
# 压缩配置
# =========================================================================
encode {
zstd
gzip 6
minimum_length 256
match {
header Content-Type text/*
header Content-Type application/json*
header Content-Type application/javascript*
header Content-Type application/xml*
header Content-Type application/rss+xml*
header Content-Type image/svg+xml*
}
}
# =========================================================================
# 请求大小限制 (防止大文件攻击)
# =========================================================================
request_body {
max_size 100MB
}
# =========================================================================
# 日志配置
# =========================================================================
log {
output file /var/log/caddy/sub2api.log {
roll_size 50mb
roll_keep 10
roll_keep_for 720h
}
format json
level INFO
}
# =========================================================================
# 错误处理
# =========================================================================
handle_errors {
respond "{err.status_code} {err.status_text}"
}
}

View File

@@ -0,0 +1,78 @@
# datamanagementd 部署说明(数据管理)
本文说明如何在宿主机部署 `datamanagementd`,并与主进程联动开启“数据管理”功能。
## 1. 关键约束
- 主进程固定探测路径:`/tmp/sub2api-datamanagement.sock`
- 仅当该 Unix Socket 可连通且 `Health` 成功时,后台“数据管理”才会启用
- `datamanagementd` 使用 SQLite 持久化元数据,不依赖主库
## 2. 宿主机构建与运行
```bash
cd /opt/sub2api-src/datamanagement
go build -o /opt/sub2api/datamanagementd ./cmd/datamanagementd
mkdir -p /var/lib/sub2api/datamanagement
chown -R sub2api:sub2api /var/lib/sub2api/datamanagement
```
手动启动示例:
```bash
/opt/sub2api/datamanagementd \
-socket-path /tmp/sub2api-datamanagement.sock \
-sqlite-path /var/lib/sub2api/datamanagement/datamanagementd.db \
-version 1.0.0
```
## 3. systemd 托管(推荐)
仓库已提供示例服务文件:`deploy/sub2api-datamanagementd.service`
```bash
sudo cp deploy/sub2api-datamanagementd.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable --now sub2api-datamanagementd
sudo systemctl status sub2api-datamanagementd
```
查看日志:
```bash
sudo journalctl -u sub2api-datamanagementd -f
```
也可以使用一键安装脚本(自动安装二进制 + 注册 systemd
```bash
# 方式一:使用现成二进制
sudo ./deploy/install-datamanagementd.sh --binary /path/to/datamanagementd
# 方式二:从源码构建后安装
sudo ./deploy/install-datamanagementd.sh --source /path/to/sub2api
```
## 4. Docker 部署联动
`sub2api` 运行在 Docker 容器中,需要将宿主机 Socket 挂载到容器同路径:
```yaml
services:
sub2api:
volumes:
- /tmp/sub2api-datamanagement.sock:/tmp/sub2api-datamanagement.sock
```
建议在 `docker-compose.override.yml` 中维护该挂载,避免覆盖主 compose 文件。
## 5. 依赖检查
`datamanagementd` 执行备份时依赖以下工具:
- `pg_dump`
- `redis-cli`
- `docker`(仅 `source_mode=docker_exec` 时)
缺失依赖会导致对应任务失败,并在任务详情中体现错误信息。

View File

@@ -0,0 +1,76 @@
# Sub2API Docker Image
Sub2API is an AI API Gateway Platform for distributing and managing AI product subscription API quotas.
## Quick Start
```bash
docker run -d \
--name sub2api \
-p 8080:8080 \
-e DATABASE_URL="postgres://user:pass@host:5432/sub2api" \
-e REDIS_URL="redis://host:6379" \
weishaw/sub2api:latest
```
## Docker Compose
```yaml
version: '3.8'
services:
sub2api:
image: weishaw/sub2api:latest
ports:
- "8080:8080"
environment:
- DATABASE_URL=postgres://postgres:postgres@db:5432/sub2api?sslmode=disable
- REDIS_URL=redis://redis:6379
depends_on:
- db
- redis
db:
image: postgres:15-alpine
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=sub2api
volumes:
- postgres_data:/var/lib/postgresql/data
redis:
image: redis:7-alpine
volumes:
- redis_data:/data
volumes:
postgres_data:
redis_data:
```
## Environment Variables
| Variable | Description | Required | Default |
|----------|-------------|----------|---------|
| `DATABASE_URL` | PostgreSQL connection string | Yes | - |
| `REDIS_URL` | Redis connection string | Yes | - |
| `PORT` | Server port | No | `8080` |
| `GIN_MODE` | Gin framework mode (`debug`/`release`) | No | `release` |
## Supported Architectures
- `linux/amd64`
- `linux/arm64`
## Tags
- `latest` - Latest stable release
- `x.y.z` - Specific version
- `x.y` - Latest patch of minor version
- `x` - Latest minor of major version
## Links
- [GitHub Repository](https://github.com/weishaw/sub2api)
- [Documentation](https://github.com/weishaw/sub2api#readme)

View File

@@ -0,0 +1,111 @@
# =============================================================================
# Sub2API Multi-Stage Dockerfile
# =============================================================================
# Stage 1: Build frontend
# Stage 2: Build Go backend with embedded frontend
# Stage 3: Final minimal image
# =============================================================================
ARG NODE_IMAGE=node:24-alpine
ARG GOLANG_IMAGE=golang:1.26.1-alpine
ARG ALPINE_IMAGE=alpine:3.20
ARG GOPROXY=https://goproxy.cn,direct
ARG GOSUMDB=sum.golang.google.cn
# -----------------------------------------------------------------------------
# Stage 1: Frontend Builder
# -----------------------------------------------------------------------------
FROM ${NODE_IMAGE} AS frontend-builder
WORKDIR /app/frontend
# Install pnpm
RUN corepack enable && corepack prepare pnpm@latest --activate
# Install dependencies first (better caching)
COPY frontend/package.json frontend/pnpm-lock.yaml ./
RUN pnpm install --frozen-lockfile
# Copy frontend source and build
COPY frontend/ ./
RUN pnpm run build
# -----------------------------------------------------------------------------
# Stage 2: Backend Builder
# -----------------------------------------------------------------------------
FROM ${GOLANG_IMAGE} AS backend-builder
# Build arguments for version info (set by CI)
ARG VERSION=docker
ARG COMMIT=docker
ARG DATE
ARG GOPROXY
ARG GOSUMDB
ENV GOPROXY=${GOPROXY}
ENV GOSUMDB=${GOSUMDB}
# Install build dependencies
RUN apk add --no-cache git ca-certificates tzdata
WORKDIR /app/backend
# Copy go mod files first (better caching)
COPY backend/go.mod backend/go.sum ./
RUN go mod download
# Copy backend source first
COPY backend/ ./
# Copy frontend dist from previous stage (must be after backend copy to avoid being overwritten)
COPY --from=frontend-builder /app/backend/internal/web/dist ./internal/web/dist
# Build the binary (BuildType=release for CI builds, embed frontend)
RUN CGO_ENABLED=0 GOOS=linux go build \
-tags embed \
-ldflags="-s -w -X main.Commit=${COMMIT} -X main.Date=${DATE:-$(date -u +%Y-%m-%dT%H:%M:%SZ)} -X main.BuildType=release" \
-o /app/sub2api \
./cmd/server
# -----------------------------------------------------------------------------
# Stage 3: Final Runtime Image
# -----------------------------------------------------------------------------
FROM ${ALPINE_IMAGE}
# Labels
LABEL maintainer="Wei-Shaw <github.com/Wei-Shaw>"
LABEL description="Sub2API - AI API Gateway Platform"
LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api"
# Install runtime dependencies
RUN apk add --no-cache \
ca-certificates \
tzdata \
curl \
&& rm -rf /var/cache/apk/*
# Create non-root user
RUN addgroup -g 1000 sub2api && \
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
# Set working directory
WORKDIR /app
# Copy binary from builder
COPY --from=backend-builder /app/sub2api /app/sub2api
# Create data directory
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
# Switch to non-root user
USER sub2api
# Expose port (can be overridden by SERVER_PORT env var)
EXPOSE 8080
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
CMD wget -q -T 5 -O /dev/null http://localhost:${SERVER_PORT:-8080}/health || exit 1
# Run the application
ENTRYPOINT ["/app/sub2api"]

View File

@@ -0,0 +1,41 @@
.PHONY: wire build build-embed test-unit test-integration test-e2e test-cover-integration
wire:
@echo "生成 Wire 代码..."
@cd cmd/server && go generate
@echo "Wire 代码生成完成"
build:
@echo "构建后端(不嵌入前端)..."
@go build -o bin/server ./cmd/server
@echo "构建完成: bin/server"
build-embed:
@echo "构建后端(嵌入前端)..."
@go build -tags embed -o bin/server ./cmd/server
@echo "构建完成: bin/server (with embedded frontend)"
test-unit:
@go test -tags unit ./... -count=1
test-integration:
@go test -tags integration ./... -count=1 -race -parallel=8
test-e2e:
@echo "运行 E2E 测试(需要本地服务器运行)..."
@go test -tags e2e ./internal/integration/... -count=1 -v
test-cover-integration:
@echo "运行集成测试并生成覆盖率报告..."
@go test -tags=integration -cover -coverprofile=coverage.out -count=1 -race -parallel=8 ./...
@go tool cover -func=coverage.out | tail -1
@go tool cover -html=coverage.out -o coverage.html
@echo "覆盖率报告已生成: coverage.html"
clean-coverage:
@rm -f coverage.out coverage.html
@echo "覆盖率文件已清理"
clean: clean-coverage
@rm -rf bin/
@echo "构建产物已清理"

View File

@@ -0,0 +1,613 @@
# Sub2API Deployment Files
This directory contains files for deploying Sub2API on Linux servers.
## Deployment Methods
| Method | Best For | Setup Wizard |
|--------|----------|--------------|
| **Docker Compose** | Quick setup, all-in-one | Not needed (auto-setup) |
| **Binary Install** | Production servers, systemd | Web-based wizard |
## Files
| File | Description |
|------|-------------|
| `docker-compose.yml` | Docker Compose configuration (named volumes) |
| `docker-compose.local.yml` | Docker Compose configuration (local directories, easy migration) |
| `docker-deploy.sh` | **One-click Docker deployment script (recommended)** |
| `.env.example` | Docker environment variables template |
| `DOCKER.md` | Docker Hub documentation |
| `install.sh` | One-click binary installation script |
| `install-datamanagementd.sh` | datamanagementd 一键安装脚本 |
| `sub2api.service` | Systemd service unit file |
| `sub2api-datamanagementd.service` | datamanagementd systemd service unit file |
| `DATAMANAGEMENTD_CN.md` | datamanagementd 部署与联动说明(中文) |
| `config.example.yaml` | Example configuration file |
---
## Docker Deployment (Recommended)
### Method 1: One-Click Deployment (Recommended)
Use the automated preparation script for the easiest setup:
```bash
# Download and run the preparation script
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
# Or download first, then run
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh -o docker-deploy.sh
chmod +x docker-deploy.sh
./docker-deploy.sh
```
**What the script does:**
- Downloads `docker-compose.local.yml` and `.env.example`
- Automatically generates secure secrets (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD)
- Creates `.env` file with generated secrets
- Creates necessary data directories (data/, postgres_data/, redis_data/)
- **Displays generated credentials** (POSTGRES_PASSWORD, JWT_SECRET, etc.)
**After running the script:**
```bash
# Start services
docker-compose -f docker-compose.local.yml up -d
# View logs
docker-compose -f docker-compose.local.yml logs -f sub2api
# If admin password was auto-generated, find it in logs:
docker-compose -f docker-compose.local.yml logs sub2api | grep "admin password"
# Access Web UI
# http://localhost:8080
```
### Method 2: Manual Deployment
If you prefer manual control:
```bash
# Clone repository
git clone https://github.com/Wei-Shaw/sub2api.git
cd sub2api/deploy
# Configure environment
cp .env.example .env
nano .env # Set POSTGRES_PASSWORD and other required variables
# Generate secure secrets (recommended)
JWT_SECRET=$(openssl rand -hex 32)
TOTP_ENCRYPTION_KEY=$(openssl rand -hex 32)
echo "JWT_SECRET=${JWT_SECRET}" >> .env
echo "TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY}" >> .env
# Create data directories
mkdir -p data postgres_data redis_data
# Start all services using local directory version
docker-compose -f docker-compose.local.yml up -d
# View logs (check for auto-generated admin password)
docker-compose -f docker-compose.local.yml logs -f sub2api
# Access Web UI
# http://localhost:8080
```
### Deployment Version Comparison
| Version | Data Storage | Migration | Best For |
|---------|-------------|-----------|----------|
| **docker-compose.local.yml** | Local directories (./data, ./postgres_data, ./redis_data) | ✅ Easy (tar entire directory) | Production, need frequent backups/migration |
| **docker-compose.yml** | Named volumes (/var/lib/docker/volumes/) | ⚠️ Requires docker commands | Simple setup, don't need migration |
**Recommendation:** Use `docker-compose.local.yml` (deployed by `docker-deploy.sh`) for easier data management and migration.
### How Auto-Setup Works
When using Docker Compose with `AUTO_SETUP=true`:
1. On first run, the system automatically:
- Connects to PostgreSQL and Redis
- Applies database migrations (SQL files in `backend/migrations/*.sql`) and records them in `schema_migrations`
- Generates JWT secret (if not provided)
- Creates admin account (password auto-generated if not provided)
- Writes config.yaml
2. No manual Setup Wizard needed - just configure `.env` and start
3. If `ADMIN_PASSWORD` is not set, check logs for the generated password:
```bash
docker-compose logs sub2api | grep "admin password"
```
### Database Migration Notes (PostgreSQL)
- Migrations are applied in lexicographic order (e.g. `001_...sql`, `002_...sql`).
- `schema_migrations` tracks applied migrations (filename + checksum).
- Migrations are forward-only; rollback requires a DB backup restore or a manual compensating SQL script.
**Verify `users.allowed_groups` → `user_allowed_groups` backfill**
During the incremental GORM→Ent migration, `users.allowed_groups` (legacy `BIGINT[]`) is being replaced by a normalized join table `user_allowed_groups(user_id, group_id)`.
Run this query to compare the legacy data vs the join table:
```sql
WITH old_pairs AS (
SELECT DISTINCT u.id AS user_id, x.group_id
FROM users u
CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id)
WHERE u.allowed_groups IS NOT NULL
)
SELECT
(SELECT COUNT(*) FROM old_pairs) AS old_pair_count,
(SELECT COUNT(*) FROM user_allowed_groups) AS new_pair_count;
```
### datamanagementd数据管理联动
如需启用管理后台“数据管理”功能,请额外部署宿主机 `datamanagementd`
- 主进程固定探测 `/tmp/sub2api-datamanagement.sock`
- Docker 场景下需把宿主机 Socket 挂载到容器内同路径
- 详细步骤见:`deploy/DATAMANAGEMENTD_CN.md`
### Commands
For **local directory version** (docker-compose.local.yml):
```bash
# Start services
docker-compose -f docker-compose.local.yml up -d
# Stop services
docker-compose -f docker-compose.local.yml down
# View logs
docker-compose -f docker-compose.local.yml logs -f sub2api
# Restart Sub2API only
docker-compose -f docker-compose.local.yml restart sub2api
# Update to latest version
docker-compose -f docker-compose.local.yml pull
docker-compose -f docker-compose.local.yml up -d
# Remove all data (caution!)
docker-compose -f docker-compose.local.yml down
rm -rf data/ postgres_data/ redis_data/
```
For **named volumes version** (docker-compose.yml):
```bash
# Start services
docker-compose up -d
# Stop services
docker-compose down
# View logs
docker-compose logs -f sub2api
# Restart Sub2API only
docker-compose restart sub2api
# Update to latest version
docker-compose pull
docker-compose up -d
# Remove all data (caution!)
docker-compose down -v
```
### Environment Variables
| Variable | Required | Default | Description |
|----------|----------|---------|-------------|
| `POSTGRES_PASSWORD` | **Yes** | - | PostgreSQL password |
| `JWT_SECRET` | **Recommended** | *(auto-generated)* | JWT secret (fixed for persistent sessions) |
| `TOTP_ENCRYPTION_KEY` | **Recommended** | *(auto-generated)* | TOTP encryption key (fixed for persistent 2FA) |
| `SERVER_PORT` | No | `8080` | Server port |
| `ADMIN_EMAIL` | No | `admin@sub2api.local` | Admin email |
| `ADMIN_PASSWORD` | No | *(auto-generated)* | Admin password |
| `TZ` | No | `Asia/Shanghai` | Timezone |
| `GEMINI_OAUTH_CLIENT_ID` | No | *(builtin)* | Google OAuth client ID (Gemini OAuth). Leave empty to use the built-in Gemini CLI client. |
| `GEMINI_OAUTH_CLIENT_SECRET` | No | *(builtin)* | Google OAuth client secret (Gemini OAuth). Leave empty to use the built-in Gemini CLI client. |
| `GEMINI_OAUTH_SCOPES` | No | *(default)* | OAuth scopes (Gemini OAuth) |
| `GEMINI_QUOTA_POLICY` | No | *(empty)* | JSON overrides for Gemini local quota simulation (Code Assist only). |
See `.env.example` for all available options.
> **Note:** The `docker-deploy.sh` script automatically generates `JWT_SECRET`, `TOTP_ENCRYPTION_KEY`, and `POSTGRES_PASSWORD` for you.
### Easy Migration (Local Directory Version)
When using `docker-compose.local.yml`, all data is stored in local directories, making migration simple:
```bash
# On source server: Stop services and create archive
cd /path/to/deployment
docker-compose -f docker-compose.local.yml down
cd ..
tar czf sub2api-complete.tar.gz deployment/
# Transfer to new server
scp sub2api-complete.tar.gz user@new-server:/path/to/destination/
# On new server: Extract and start
tar xzf sub2api-complete.tar.gz
cd deployment/
docker-compose -f docker-compose.local.yml up -d
```
Your entire deployment (configuration + data) is migrated!
---
## Gemini OAuth Configuration
Sub2API supports three methods to connect to Gemini:
### Method 1: Code Assist OAuth (Recommended for GCP Users)
**No configuration needed** - always uses the built-in Gemini CLI OAuth client (public).
1. Leave `GEMINI_OAUTH_CLIENT_ID` and `GEMINI_OAUTH_CLIENT_SECRET` empty
2. In the Admin UI, create a Gemini OAuth account and select **"Code Assist"** type
3. Complete the OAuth flow in your browser
> Note: Even if you configure `GEMINI_OAUTH_CLIENT_ID` / `GEMINI_OAUTH_CLIENT_SECRET` for AI Studio OAuth,
> Code Assist OAuth will still use the built-in Gemini CLI client.
**Requirements:**
- Google account with access to Google Cloud Platform
- A GCP project (auto-detected or manually specified)
**How to get Project ID (if auto-detection fails):**
1. Go to [Google Cloud Console](https://console.cloud.google.com/)
2. Click the project dropdown at the top of the page
3. Copy the Project ID (not the project name) from the list
4. Common formats: `my-project-123456` or `cloud-ai-companion-xxxxx`
### Method 2: AI Studio OAuth (For Regular Google Accounts)
Requires your own OAuth client credentials.
**Step 1: Create OAuth Client in Google Cloud Console**
1. Go to [Google Cloud Console - Credentials](https://console.cloud.google.com/apis/credentials)
2. Create a new project or select an existing one
3. **Enable the Generative Language API:**
- Go to "APIs & Services" → "Library"
- Search for "Generative Language API"
- Click "Enable"
4. **Configure OAuth Consent Screen** (if not done):
- Go to "APIs & Services" → "OAuth consent screen"
- Choose "External" user type
- Fill in app name, user support email, developer contact
- Add scopes: `https://www.googleapis.com/auth/generative-language.retriever` (and optionally `https://www.googleapis.com/auth/cloud-platform`)
- Add test users (your Google account email)
5. **Create OAuth 2.0 credentials:**
- Go to "APIs & Services" → "Credentials"
- Click "Create Credentials" → "OAuth client ID"
- Application type: **Web application** (or **Desktop app**)
- Name: e.g., "Sub2API Gemini"
- Authorized redirect URIs: Add `http://localhost:1455/auth/callback`
6. Copy the **Client ID** and **Client Secret**
7. **⚠️ Publish to Production (IMPORTANT):**
- Go to "APIs & Services" → "OAuth consent screen"
- Click "PUBLISH APP" to move from Testing to Production
- **Testing mode limitations:**
- Only manually added test users can authenticate (max 100 users)
- Refresh tokens expire after 7 days
- Users must be re-added periodically
- **Production mode:** Any Google user can authenticate, tokens don't expire
- Note: For sensitive scopes, Google may require verification (demo video, privacy policy)
**Step 2: Configure Environment Variables**
```bash
GEMINI_OAUTH_CLIENT_ID=your-client-id.apps.googleusercontent.com
GEMINI_OAUTH_CLIENT_SECRET=GOCSPX-your-client-secret
# 可选:如需使用 Gemini CLI 内置 OAuth ClientCode Assist / Google One
# 安全说明:本仓库不会内置该 client_secret请在运行环境通过环境变量注入。
# GEMINI_CLI_OAUTH_CLIENT_SECRET=GOCSPX-your-built-in-secret
```
**Step 3: Create Account in Admin UI**
1. Create a Gemini OAuth account and select **"AI Studio"** type
2. Complete the OAuth flow
- After consent, your browser will be redirected to `http://localhost:1455/auth/callback?code=...&state=...`
- Copy the full callback URL (recommended) or just the `code` and paste it back into the Admin UI
### Method 3: API Key (Simplest)
1. Go to [Google AI Studio](https://aistudio.google.com/app/apikey)
2. Click "Create API key"
3. In Admin UI, create a Gemini **API Key** account
4. Paste your API key (starts with `AIza...`)
### Comparison Table
| Feature | Code Assist OAuth | AI Studio OAuth | API Key |
|---------|-------------------|-----------------|---------|
| Setup Complexity | Easy (no config) | Medium (OAuth client) | Easy |
| GCP Project Required | Yes | No | No |
| Custom OAuth Client | No (built-in) | Yes (required) | N/A |
| Rate Limits | GCP quota | Standard | Standard |
| Best For | GCP developers | Regular users needing OAuth | Quick testing |
---
## Binary Installation
For production servers using systemd.
### One-Line Installation
```bash
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash
```
### Manual Installation
1. Download the latest release from [GitHub Releases](https://github.com/Wei-Shaw/sub2api/releases)
2. Extract and copy the binary to `/opt/sub2api/`
3. Copy `sub2api.service` to `/etc/systemd/system/`
4. Run:
```bash
sudo systemctl daemon-reload
sudo systemctl enable sub2api
sudo systemctl start sub2api
```
5. Open the Setup Wizard in your browser to complete configuration
### Commands
```bash
# Install
sudo ./install.sh
# Upgrade
sudo ./install.sh upgrade
# Uninstall
sudo ./install.sh uninstall
```
### Service Management
```bash
# Start the service
sudo systemctl start sub2api
# Stop the service
sudo systemctl stop sub2api
# Restart the service
sudo systemctl restart sub2api
# Check status
sudo systemctl status sub2api
# View logs
sudo journalctl -u sub2api -f
# Enable auto-start on boot
sudo systemctl enable sub2api
```
### Configuration
#### Server Address and Port
During installation, you will be prompted to configure the server listen address and port. These settings are stored in the systemd service file as environment variables.
To change after installation:
1. Edit the systemd service:
```bash
sudo systemctl edit sub2api
```
2. Add or modify:
```ini
[Service]
Environment=SERVER_HOST=0.0.0.0
Environment=SERVER_PORT=3000
```
3. Reload and restart:
```bash
sudo systemctl daemon-reload
sudo systemctl restart sub2api
```
#### Gemini OAuth Configuration
If you need to use AI Studio OAuth for Gemini accounts, add the OAuth client credentials to the systemd service file:
1. Edit the service file:
```bash
sudo nano /etc/systemd/system/sub2api.service
```
2. Add your OAuth credentials in the `[Service]` section (after the existing `Environment=` lines):
```ini
Environment=GEMINI_OAUTH_CLIENT_ID=your-client-id.apps.googleusercontent.com
Environment=GEMINI_OAUTH_CLIENT_SECRET=GOCSPX-your-client-secret
```
如需使用“内置 Gemini CLI OAuth Client”Code Assist / Google One还需要注入
```ini
Environment=GEMINI_CLI_OAUTH_CLIENT_SECRET=GOCSPX-your-built-in-secret
```
3. Reload and restart:
```bash
sudo systemctl daemon-reload
sudo systemctl restart sub2api
```
> **Note:** Code Assist OAuth does not require any configuration - it uses the built-in Gemini CLI client.
> See the [Gemini OAuth Configuration](#gemini-oauth-configuration) section above for detailed setup instructions.
#### Application Configuration
The main config file is at `/etc/sub2api/config.yaml` (created by Setup Wizard).
### Prerequisites
- Linux server (Ubuntu 20.04+, Debian 11+, CentOS 8+, etc.)
- PostgreSQL 14+
- Redis 6+
- systemd
### Directory Structure
```
/opt/sub2api/
├── sub2api # Main binary
├── sub2api.backup # Backup (after upgrade)
└── data/ # Runtime data
/etc/sub2api/
└── config.yaml # Configuration file
```
---
## Troubleshooting
### Docker
For **local directory version**:
```bash
# Check container status
docker-compose -f docker-compose.local.yml ps
# View detailed logs
docker-compose -f docker-compose.local.yml logs --tail=100 sub2api
# Check database connection
docker-compose -f docker-compose.local.yml exec postgres pg_isready
# Check Redis connection
docker-compose -f docker-compose.local.yml exec redis redis-cli ping
# Restart all services
docker-compose -f docker-compose.local.yml restart
# Check data directories
ls -la data/ postgres_data/ redis_data/
```
For **named volumes version**:
```bash
# Check container status
docker-compose ps
# View detailed logs
docker-compose logs --tail=100 sub2api
# Check database connection
docker-compose exec postgres pg_isready
# Check Redis connection
docker-compose exec redis redis-cli ping
# Restart all services
docker-compose restart
```
### Binary Install
```bash
# Check service status
sudo systemctl status sub2api
# View recent logs
sudo journalctl -u sub2api -n 50
# Check config file
sudo cat /etc/sub2api/config.yaml
# Check PostgreSQL
sudo systemctl status postgresql
# Check Redis
sudo systemctl status redis
```
### Common Issues
1. **Port already in use**: Change `SERVER_PORT` in `.env` or systemd config
2. **Database connection failed**: Check PostgreSQL is running and credentials are correct
3. **Redis connection failed**: Check Redis is running and password is correct
4. **Permission denied**: Ensure proper file ownership for binary install
---
## TLS Fingerprint Configuration
Sub2API supports TLS fingerprint simulation to make requests appear as if they come from the official Claude CLI (Node.js client).
> **💡 Tip:** Visit **[tls.sub2api.org](https://tls.sub2api.org/)** to get TLS fingerprint information for different devices and browsers.
### Default Behavior
- Built-in `claude_cli_v2` profile simulates Node.js 20.x + OpenSSL 3.x
- JA3 Hash: `1a28e69016765d92e3b381168d68922c`
- JA4: `t13d5911h1_a33745022dd6_1f22a2ca17c4`
- Profile selection: `accountID % profileCount`
### Configuration
```yaml
gateway:
tls_fingerprint:
enabled: true # Global switch
profiles:
# Simple profile (uses default cipher suites)
profile_1:
name: "Profile 1"
# Profile with custom cipher suites (use compact array format)
profile_2:
name: "Profile 2"
cipher_suites: [4866, 4867, 4865, 49199, 49195, 49200, 49196]
curves: [29, 23, 24]
point_formats: 0
# Another custom profile
profile_3:
name: "Profile 3"
cipher_suites: [4865, 4866, 4867, 49199, 49200]
curves: [29, 23, 24, 25]
```
### Profile Fields
| Field | Type | Description |
|-------|------|-------------|
| `name` | string | Display name (required) |
| `cipher_suites` | []uint16 | Cipher suites in decimal. Empty = default |
| `curves` | []uint16 | Elliptic curves in decimal. Empty = default |
| `point_formats` | []uint8 | EC point formats. Empty = default |
### Common Values Reference
**Cipher Suites (TLS 1.3):** `4865` (AES_128_GCM), `4866` (AES_256_GCM), `4867` (CHACHA20)
**Cipher Suites (TLS 1.2):** `49195`, `49196`, `49199`, `49200` (ECDHE variants)
**Curves:** `29` (X25519), `23` (P-256), `24` (P-384), `25` (P-521)

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
# 本地构建镜像的快速脚本,避免在命令行反复输入构建参数。
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
docker build -t sub2api:latest \
--build-arg GOPROXY=https://goproxy.cn,direct \
--build-arg GOSUMDB=sum.golang.google.cn \
-f "${REPO_ROOT}/Dockerfile" \
"${REPO_ROOT}"

View File

@@ -0,0 +1,995 @@
# Sub2API Configuration File
# Sub2API 配置文件
#
# Copy this file to /etc/sub2api/config.yaml and modify as needed
# 复制此文件到 /etc/sub2api/config.yaml 并根据需要修改
#
# Documentation / 文档: https://github.com/Wei-Shaw/sub2api
# =============================================================================
# Server Configuration
# 服务器配置
# =============================================================================
server:
# Bind address (0.0.0.0 for all interfaces)
# 绑定地址0.0.0.0 表示监听所有网络接口)
host: "0.0.0.0"
# Port to listen on
# 监听端口
port: 8080
# Mode: "debug" for development, "release" for production
# 运行模式:"debug" 用于开发,"release" 用于生产环境
mode: "release"
# Frontend base URL used to generate external links in emails (e.g. password reset)
# 用于生成邮件中的外部链接(例如:重置密码链接)的前端基础地址
# Example: "https://example.com"
frontend_url: ""
# Trusted proxies for X-Forwarded-For parsing (CIDR/IP). Empty disables trusted proxies.
# 信任的代理地址CIDR/IP 格式),用于解析 X-Forwarded-For 头。留空则禁用代理信任。
trusted_proxies: []
# Global max request body size in bytes (default: 256MB)
# 全局最大请求体大小(字节,默认 256MB
# Applies to all requests, especially important for h2c first request memory protection
# 适用于所有请求,对 h2c 第一请求的内存保护尤为重要
max_request_body_size: 268435456
# HTTP/2 Cleartext (h2c) configuration
# HTTP/2 Cleartext (h2c) 配置
h2c:
# Enable HTTP/2 Cleartext for client connections
# 启用 HTTP/2 Cleartext 客户端连接
enabled: true
# Max concurrent streams per connection
# 每个连接的最大并发流数量
max_concurrent_streams: 50
# Idle timeout for connections (seconds)
# 连接空闲超时时间(秒)
idle_timeout: 75
# Max frame size in bytes (default: 1MB)
# 最大帧大小(字节,默认 1MB
max_read_frame_size: 1048576
# Max upload buffer per connection in bytes (default: 2MB)
# 每个连接的最大上传缓冲区(字节,默认 2MB
max_upload_buffer_per_connection: 2097152
# Max upload buffer per stream in bytes (default: 512KB)
# 每个流的最大上传缓冲区(字节,默认 512KB
max_upload_buffer_per_stream: 524288
# =============================================================================
# Run Mode Configuration
# 运行模式配置
# =============================================================================
# Run mode: "standard" (default) or "simple" (for internal use)
# 运行模式:"standard"(默认)或 "simple"(内部使用)
# - standard: Full SaaS features with billing/balance checks
# - standard: 完整 SaaS 功能,包含计费和余额校验
# - simple: Hides SaaS features and skips billing/balance checks
# - simple: 隐藏 SaaS 功能,跳过计费和余额校验
run_mode: "standard"
# =============================================================================
# CORS Configuration
# 跨域资源共享 (CORS) 配置
# =============================================================================
cors:
# Allowed origins list. Leave empty to disable cross-origin requests.
# 允许的来源列表。留空则禁用跨域请求。
allowed_origins: []
# Allow credentials (cookies/authorization headers). Cannot be used with "*".
# 允许携带凭证cookies/授权头)。不能与 "*" 通配符同时使用。
allow_credentials: true
# =============================================================================
# Security Configuration
# 安全配置
# =============================================================================
security:
url_allowlist:
# Enable URL allowlist validation (disable to skip all URL checks)
# 启用 URL 白名单验证(禁用则跳过所有 URL 检查)
enabled: false
# Allowed upstream hosts for API proxying
# 允许代理的上游 API 主机列表
upstream_hosts:
- "api.openai.com"
- "api.anthropic.com"
- "api.kimi.com"
- "open.bigmodel.cn"
- "api.minimaxi.com"
- "generativelanguage.googleapis.com"
- "cloudcode-pa.googleapis.com"
- "*.openai.azure.com"
# Allowed hosts for pricing data download
# 允许下载定价数据的主机列表
pricing_hosts:
- "raw.githubusercontent.com"
# Allowed hosts for CRS sync (required when using CRS sync)
# 允许 CRS 同步的主机列表(使用 CRS 同步功能时必须配置)
crs_hosts: []
# Allow localhost/private IPs for upstream/pricing/CRS (use only in trusted networks)
# 允许本地/私有 IP 地址用于上游/定价/CRS仅在可信网络中使用
allow_private_hosts: true
# Allow http:// URLs when allowlist is disabled (default: false, require https)
# 白名单禁用时是否允许 http:// URL默认: false要求 https
allow_insecure_http: true
response_headers:
# Enable configurable response header filtering (default: true)
# 启用可配置的响应头过滤(默认启用,过滤上游敏感响应头)
enabled: true
# Extra allowed response headers from upstream
# 额外允许的上游响应头
additional_allowed: []
# Force-remove response headers from upstream
# 强制移除的上游响应头
force_remove: []
csp:
# Enable Content-Security-Policy header
# 启用内容安全策略 (CSP) 响应头
enabled: true
# Default CSP policy (override if you host assets on other domains)
# 默认 CSP 策略(如果静态资源托管在其他域名,请自行覆盖)
# Note: __CSP_NONCE__ will be replaced with 'nonce-xxx' at request time for inline script security
# 注意__CSP_NONCE__ 会在请求时被替换为 'nonce-xxx',用于内联脚本安全
policy: "default-src 'self'; script-src 'self' __CSP_NONCE__ https://challenges.cloudflare.com https://static.cloudflareinsights.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https:; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' https:; frame-src https://challenges.cloudflare.com; frame-ancestors 'none'; base-uri 'self'; form-action 'self'"
proxy_probe:
# Allow skipping TLS verification for proxy probe (debug only)
# 允许代理探测时跳过 TLS 证书验证(仅用于调试)
insecure_skip_verify: false
proxy_fallback:
# Allow auxiliary services (update check, pricing data) to fallback to direct
# connection when proxy initialization fails. Does NOT affect AI gateway connections.
# 辅助服务(更新检查、定价数据拉取)代理初始化失败时是否允许回退直连。
# 不影响 AI 账号网关连接。默认 falsefail-fast 防止 IP 泄露。
allow_direct_on_error: false
# =============================================================================
# Gateway Configuration
# 网关配置
# =============================================================================
gateway:
# Timeout for waiting upstream response headers (seconds)
# 等待上游响应头超时时间(秒)
response_header_timeout: 600
# Max request body size in bytes (default: 256MB)
# 请求体最大字节数(默认 256MB
max_body_size: 268435456
# Max bytes to read for non-stream upstream responses (default: 8MB)
# 非流式上游响应体读取上限(默认 8MB
upstream_response_read_max_bytes: 8388608
# Max bytes to read for proxy probe responses (default: 1MB)
# 代理探测响应体读取上限(默认 1MB
proxy_probe_response_read_max_bytes: 1048576
# Enable Gemini upstream response header debug logs (default: false)
# 是否开启 Gemini 上游响应头调试日志(默认 false
gemini_debug_response_headers: false
# Sora max request body size in bytes (0=use max_body_size)
# Sora 请求体最大字节数0=使用 max_body_size
sora_max_body_size: 268435456
# Sora stream timeout (seconds, 0=disable)
# Sora 流式请求总超时0=禁用)
sora_stream_timeout_seconds: 900
# Sora non-stream timeout (seconds, 0=disable)
# Sora 非流式请求超时0=禁用)
sora_request_timeout_seconds: 180
# Sora stream enforcement mode: force/error
# Sora stream 强制策略force/error
sora_stream_mode: "force"
# Sora model filters
# Sora 模型过滤配置
sora_model_filters:
# Hide prompt-enhance models by default
# 默认隐藏 prompt-enhance 模型
hide_prompt_enhance: true
# Require API key for /sora/media proxy (default: false)
# /sora/media 是否强制要求 API Key默认 true
sora_media_require_api_key: true
# Sora media temporary signing key (empty disables signed URL)
# Sora 媒体临时签名密钥(为空则禁用签名)
sora_media_signing_key: ""
# Signed URL TTL seconds (<=0 disables)
# 临时签名 URL 有效期(秒,<=0 表示禁用)
sora_media_signed_url_ttl_seconds: 900
# Connection pool isolation strategy:
# 连接池隔离策略:
# - proxy: Isolate by proxy, same proxy shares connection pool (suitable for few proxies, many accounts)
# - proxy: 按代理隔离,同一代理共享连接池(适合代理少、账户多)
# - account: Isolate by account, same account shares connection pool (suitable for few accounts, strict isolation)
# - account: 按账户隔离,同一账户共享连接池(适合账户少、需严格隔离)
# - account_proxy: Isolate by account+proxy combination (default, finest granularity)
# - account_proxy: 按账户+代理组合隔离(默认,最细粒度)
connection_pool_isolation: "account_proxy"
# Force Codex CLI mode: treat all /openai/v1/responses requests as Codex CLI.
# 强制按 Codex CLI 处理 /openai/v1/responses 请求(用于网关未透传/改写 User-Agent 的兜底)。
#
# 注意:开启后会影响所有客户端的行为(不仅限于 VS Code / Codex CLI请谨慎开启。
force_codex_cli: false
# OpenAI 透传模式是否放行客户端超时头(如 x-stainless-timeout
# 默认 false过滤超时头降低上游提前断流风险。
openai_passthrough_allow_timeout_headers: false
# OpenAI Responses WebSocket 配置(默认开启,可按需回滚到 HTTP
openai_ws:
# 新版 WS mode 路由(默认关闭)。关闭时保持当前 legacy 实现行为。
mode_router_v2_enabled: false
# ingress 默认模式off|ctx_pool|passthrough仅 mode_router_v2_enabled=true 生效)
# 兼容旧值shared/dedicated 会按 ctx_pool 处理。
ingress_mode_default: ctx_pool
# 全局总开关,默认 true关闭时所有请求保持原有 HTTP/SSE 路由
enabled: true
# 按账号类型细分开关
oauth_enabled: true
apikey_enabled: true
# 全局强制 HTTP紧急回滚开关
force_http: false
# 允许在 WSv2 下按策略恢复 store=true默认 false
allow_store_recovery: false
# ingress 模式收到 previous_response_not_found 时,自动去掉 previous_response_id 重试一次(默认 true
ingress_previous_response_recovery_enabled: true
# store=false 且无可复用会话连接时的策略:
# strict=强制新建连接隔离优先adaptive=仅在高风险失败后强制新建off=尽量复用(性能优先)
store_disabled_conn_mode: strict
# store=false 且无可复用会话连接时,是否强制新建连接(默认 true优先会话隔离
# 兼容旧配置:仅在 store_disabled_conn_mode 未配置时生效
store_disabled_force_new_conn: true
# 是否启用 WSv2 generate=false 预热(默认 false
prewarm_generate_enabled: false
# 协议 feature 开关v2 优先于 v1
responses_websockets: false
responses_websockets_v2: true
# 连接池参数(按账号池化复用)
max_conns_per_account: 128
min_idle_per_account: 4
max_idle_per_account: 12
# 是否按账号并发动态计算连接池上限:
# effective_max_conns = min(max_conns_per_account, ceil(account.concurrency * factor))
dynamic_max_conns_by_account_concurrency_enabled: true
# 按账号类型分别设置系数OAuth / API Key
oauth_max_conns_factor: 1.0
apikey_max_conns_factor: 1.0
dial_timeout_seconds: 10
read_timeout_seconds: 900
write_timeout_seconds: 120
pool_target_utilization: 0.7
queue_limit_per_conn: 64
# 流式写出批量 flush 参数
event_flush_batch_size: 1
event_flush_interval_ms: 10
# 预热触发冷却(毫秒)
prewarm_cooldown_ms: 300
# WS 回退到 HTTP 后的冷却时间(秒),用于避免 WS/HTTP 来回抖动0 表示关闭冷却
fallback_cooldown_seconds: 30
# WS 重试退避参数(毫秒)
retry_backoff_initial_ms: 120
retry_backoff_max_ms: 2000
# 抖动比例0-1
retry_jitter_ratio: 0.2
# 单次请求 WS 重试总预算(毫秒);建议设置为有限值,避免重试拉高 TTFT 长尾
retry_total_budget_ms: 5000
# payload_schema 日志采样率0-1降低热路径日志放大
payload_log_sample_rate: 0.2
# 调度与粘连参数
lb_top_k: 7
sticky_session_ttl_seconds: 3600
# 会话哈希迁移兼容开关:新 key 未命中时回退读取旧 SHA-256 key
session_hash_read_old_fallback: true
# 会话哈希迁移兼容开关:写入时双写旧 SHA-256 key短 TTL
session_hash_dual_write_old: true
# context 元数据迁移兼容开关:保留旧 ctxkey.* 读取/注入桥接
metadata_bridge_enabled: true
sticky_response_id_ttl_seconds: 3600
# 兼容旧键:当 sticky_response_id_ttl_seconds 缺失时回退该值
sticky_previous_response_ttl_seconds: 3600
scheduler_score_weights:
priority: 1.0
load: 1.0
queue: 0.7
error_rate: 0.8
ttft: 0.5
# HTTP upstream connection pool settings (HTTP/2 + multi-proxy scenario defaults)
# HTTP 上游连接池配置HTTP/2 + 多代理场景默认值)
# Max idle connections across all hosts
# 所有主机的最大空闲连接数
max_idle_conns: 2560
# Max idle connections per host
# 每个主机的最大空闲连接数
max_idle_conns_per_host: 120
# Max connections per host
# 每个主机的最大连接数
max_conns_per_host: 1024
# Idle connection timeout (seconds)
# 空闲连接超时时间(秒)
idle_conn_timeout_seconds: 90
# Upstream client cache settings
# 上游连接池客户端缓存配置
# max_upstream_clients: Max cached clients, evicts least recently used when exceeded
# max_upstream_clients: 最大缓存客户端数量,超出后淘汰最久未使用的
max_upstream_clients: 5000
# client_idle_ttl_seconds: Client idle reclaim threshold (seconds), reclaimed when idle and no active requests
# client_idle_ttl_seconds: 客户端空闲回收阈值(秒),超时且无活跃请求时回收
client_idle_ttl_seconds: 900
# Concurrency slot expiration time (minutes)
# 并发槽位过期时间(分钟)
concurrency_slot_ttl_minutes: 30
# Stream data interval timeout (seconds), 0=disable
# 流数据间隔超时0=禁用
stream_data_interval_timeout: 180
# Stream keepalive interval (seconds), 0=disable
# 流式 keepalive 间隔0=禁用
stream_keepalive_interval: 10
# SSE max line size in bytes (default: 40MB)
# SSE 单行最大字节数(默认 40MB
max_line_size: 41943040
# Log upstream error response body summary (safe/truncated; does not log request content)
# 记录上游错误响应体摘要(安全/截断;不记录请求内容)
log_upstream_error_body: true
# Max bytes to log from upstream error body
# 记录上游错误响应体的最大字节数
log_upstream_error_body_max_bytes: 2048
# Auto inject anthropic-beta header for API-key accounts when needed (default: off)
# 需要时自动为 API-key 账户注入 anthropic-beta 头(默认:关闭)
inject_beta_for_apikey: false
# Allow failover on selected 400 errors (default: off)
# 允许在特定 400 错误时进行故障转移(默认:关闭)
failover_on_400: false
# Scheduling configuration
# 调度配置
scheduling:
# Sticky session max waiting queue size
# 粘性会话最大排队长度
sticky_session_max_waiting: 3
# Sticky session wait timeout (duration)
# 粘性会话等待超时(时间段)
sticky_session_wait_timeout: 120s
# Fallback wait timeout (duration)
# 兜底排队等待超时(时间段)
fallback_wait_timeout: 30s
# Fallback max waiting queue size
# 兜底最大排队长度
fallback_max_waiting: 100
# Enable batch load calculation for scheduling
# 启用调度批量负载计算
load_batch_enabled: true
# Slot cleanup interval (duration)
# 并发槽位清理周期(时间段)
slot_cleanup_interval: 30s
# 是否允许受控回源到 DB默认 true保持现有行为
db_fallback_enabled: true
# 受控回源超时0 表示不额外收紧超时
db_fallback_timeout_seconds: 0
# 受控回源限流(实例级 QPS0 表示不限制
db_fallback_max_qps: 0
# outbox 轮询周期(秒)
outbox_poll_interval_seconds: 1
# outbox 滞后告警阈值(秒)
outbox_lag_warn_seconds: 5
# outbox 触发强制重建阈值(秒)
outbox_lag_rebuild_seconds: 10
# outbox 连续滞后触发次数
outbox_lag_rebuild_failures: 3
# outbox 积压触发重建阈值(行数)
outbox_backlog_rebuild_rows: 10000
# 全量重建周期0 表示禁用
full_rebuild_interval_seconds: 300
# TLS fingerprint simulation / TLS 指纹伪装
# Default profile "claude_cli_v2" simulates Node.js 20.x
# 默认模板 "claude_cli_v2" 模拟 Node.js 20.x 指纹
tls_fingerprint:
enabled: true
# profiles:
# profile_1:
# name: "Custom Profile 1"
# profile_2:
# name: "Custom Profile 2"
# =============================================================================
# Logging Configuration
# 日志配置
# =============================================================================
log:
# Log level: debug/info/warn/error
# 日志级别debug/info/warn/error
level: "info"
# Log format: json/console
# 日志格式json/console
format: "console"
# Service name field written into each log line
# 每条日志都会附带 service 字段
service_name: "sub2api"
# Environment field written into each log line
# 每条日志都会附带 env 字段
env: "production"
# Include caller information
# 是否输出调用方位置信息
caller: true
# Stacktrace threshold: none/error/fatal
# 堆栈输出阈值none/error/fatal
stacktrace_level: "error"
output:
# Keep stdout/stderr output for container log collection
# 保持标准输出用于容器日志采集
to_stdout: true
# Enable file output (default path auto-derived)
# 启用文件输出(默认路径自动推导)
to_file: true
# Empty means:
# - DATA_DIR set: {{DATA_DIR}}/logs/sub2api.log
# - otherwise: /app/data/logs/sub2api.log
# 留空时:
# - 设置 DATA_DIR{{DATA_DIR}}/logs/sub2api.log
# - 否则:/app/data/logs/sub2api.log
file_path: ""
rotation:
# Max file size before rotation (MB)
# 单文件滚动阈值MB
max_size_mb: 100
# Number of rotated files to keep (0 means unlimited)
# 保留历史文件数量0 表示不限制)
max_backups: 10
# Number of days to keep old log files (0 means unlimited)
# 历史日志保留天数0 表示不限制)
max_age_days: 7
# Compress rotated files
# 是否压缩历史日志
compress: true
# Use local time for timestamp in rotated filename
# 滚动文件名时间戳使用本地时区
local_time: true
sampling:
# Enable zap sampler (reduce high-frequency repetitive logs)
# 启用 zap 采样(减少高频重复日志)
enabled: false
# Number of first entries per second to always log
# 每秒无采样保留的前 N 条日志
initial: 100
# Thereafter keep 1 out of N entries per second
# 之后每 N 条保留 1 条
thereafter: 100
# =============================================================================
# Sora Direct Client Configuration
# Sora 直连配置
# =============================================================================
sora:
client:
# Sora backend base URL
# Sora 上游 Base URL
base_url: "https://sora.chatgpt.com/backend"
# Request timeout (seconds)
# 请求超时(秒)
timeout_seconds: 120
# Max retries for upstream requests
# 上游请求最大重试次数
max_retries: 3
# Account+proxy cooldown window after Cloudflare challenge (seconds, 0 to disable)
# Cloudflare challenge 后按账号+代理冷却窗口0 表示关闭)
cloudflare_challenge_cooldown_seconds: 900
# Poll interval (seconds)
# 轮询间隔(秒)
poll_interval_seconds: 2
# Max poll attempts
# 最大轮询次数
max_poll_attempts: 600
# Recent task query limit (image)
# 最近任务查询数量(图片轮询)
recent_task_limit: 50
# Recent task query max limit (fallback)
# 最近任务查询最大数量(回退)
recent_task_limit_max: 200
# Enable debug logs for Sora upstream requests
# 启用 Sora 直连调试日志
# 调试日志会输出上游请求尝试、重试、响应摘要Authorization/openai-sentinel-token 等敏感头会自动脱敏
debug: false
# Allow Sora client to fetch token via OpenAI token provider
# 是否允许 Sora 客户端通过 OpenAI token provider 取 token默认 false避免误走 OpenAI 刷新链路)
use_openai_token_provider: false
# Optional custom headers (key-value)
# 额外请求头(键值对)
headers: {}
# Default User-Agent for Sora requests
# Sora 默认 User-Agent
user_agent: "Sora/1.2026.007 (Android 15; 24122RKC7C; build 2600700)"
# Disable TLS fingerprint for Sora upstream
# 关闭 Sora 上游 TLS 指纹伪装
disable_tls_fingerprint: false
# curl_cffi sidecar for Sora only (required)
# 仅 Sora 链路使用的 curl_cffi sidecar必需
curl_cffi_sidecar:
# Sora 强制通过 sidecar 请求,必须启用
# Sora is forced to use sidecar only; keep enabled=true
enabled: true
# Sidecar base URL (default endpoint: /request)
# sidecar 基础地址(默认请求端点:/request
base_url: "http://sora-curl-cffi-sidecar:8080"
# curl_cffi impersonate profile, e.g. chrome131/chrome124/safari18_0
# curl_cffi 指纹伪装 profile例如 chrome131/chrome124/safari18_0
impersonate: "chrome131"
# Sidecar request timeout (seconds)
# sidecar 请求超时(秒)
timeout_seconds: 60
# Reuse session key per account+proxy to let sidecar persist cookies/session
# 按账号+代理复用 session key让 sidecar 持久化 cookies/session
session_reuse_enabled: true
# Session TTL in sidecar (seconds)
# sidecar 会话 TTL
session_ttl_seconds: 3600
storage:
# Storage type (local only for now)
# 存储类型(首发仅支持 local
type: "local"
# Local base path; empty uses /app/data/sora
# 本地存储基础路径;为空使用 /app/data/sora
local_path: ""
# Fallback to upstream URL when download fails
# 下载失败时回退到上游 URL
fallback_to_upstream: true
# Max concurrent downloads
# 并发下载上限
max_concurrent_downloads: 4
# Download timeout (seconds)
# 下载超时(秒)
download_timeout_seconds: 120
# Max download bytes
# 最大下载字节数
max_download_bytes: 209715200
# Enable debug logs for media storage
# 启用媒体存储调试日志
debug: false
cleanup:
# Enable cleanup task
# 启用清理任务
enabled: true
# Retention days
# 保留天数
retention_days: 7
# Cron schedule
# Cron 调度表达式
schedule: "0 3 * * *"
# Token refresh behavior
# token 刷新行为控制
token_refresh:
# Whether OpenAI refresh flow is allowed to sync linked Sora accounts
# 是否允许 OpenAI 刷新流程同步覆盖 linked_openai_account_id 关联的 Sora 账号 token
sync_linked_sora_accounts: false
# =============================================================================
# API Key Auth Cache Configuration
# API Key 认证缓存配置
# =============================================================================
api_key_auth_cache:
# L1 cache size (entries), in-process LRU/TTL cache
# L1 缓存容量(条目数),进程内 LRU/TTL 缓存
l1_size: 65535
# L1 cache TTL (seconds)
# L1 缓存 TTL
l1_ttl_seconds: 15
# L2 cache TTL (seconds), stored in Redis
# L2 缓存 TTLRedis 中存储
l2_ttl_seconds: 300
# Negative cache TTL (seconds)
# 负缓存 TTL
negative_ttl_seconds: 30
# TTL jitter percent (0-100)
# TTL 抖动百分比0-100
jitter_percent: 10
# Enable singleflight for cache misses
# 缓存未命中时启用 singleflight 合并回源
singleflight: true
# =============================================================================
# Dashboard Cache Configuration
# 仪表盘缓存配置
# =============================================================================
dashboard_cache:
# Enable dashboard cache
# 启用仪表盘缓存
enabled: true
# Redis key prefix for multi-environment isolation
# Redis key 前缀,用于多环境隔离
key_prefix: "sub2api:"
# Fresh TTL (seconds); within this window cached stats are considered fresh
# 新鲜阈值(秒);命中后处于该窗口视为新鲜数据
stats_fresh_ttl_seconds: 15
# Cache TTL (seconds) stored in Redis
# Redis 缓存 TTL
stats_ttl_seconds: 30
# Async refresh timeout (seconds)
# 异步刷新超时(秒)
stats_refresh_timeout_seconds: 30
# =============================================================================
# Dashboard Aggregation Configuration
# 仪表盘预聚合配置(重启生效)
# =============================================================================
dashboard_aggregation:
# Enable aggregation job
# 启用聚合作业
enabled: true
# Refresh interval (seconds)
# 刷新间隔(秒)
interval_seconds: 60
# Lookback window (seconds) for late-arriving data
# 回看窗口(秒),处理迟到数据
lookback_seconds: 120
# Allow manual backfill
# 允许手动回填
backfill_enabled: false
# Backfill max range (days)
# 回填最大跨度(天)
backfill_max_days: 31
# Recompute recent N days on startup
# 启动时重算最近 N 天
recompute_days: 2
# Retention windows (days)
# 保留窗口(天)
retention:
# Raw usage_logs retention
# 原始 usage_logs 保留天数
usage_logs_days: 90
# Hourly aggregation retention
# 小时聚合保留天数
hourly_days: 180
# Daily aggregation retention
# 日聚合保留天数
daily_days: 730
# =============================================================================
# Usage Cleanup Task Configuration
# 使用记录清理任务配置(重启生效)
# =============================================================================
usage_cleanup:
# Enable cleanup task worker
# 启用清理任务执行器
enabled: true
# Max date range (days) per task
# 单次任务最大时间跨度(天)
max_range_days: 31
# Batch delete size
# 单批删除数量
batch_size: 5000
# Worker interval (seconds)
# 执行器轮询间隔(秒)
worker_interval_seconds: 10
# Task execution timeout (seconds)
# 单次任务最大执行时长(秒)
task_timeout_seconds: 1800
# =============================================================================
# HTTP 写接口幂等配置
# Idempotency Configuration
# =============================================================================
idempotency:
# Observe-only 模式:
# true: 观察期,不带 Idempotency-Key 仍放行(但会记录)
# false: 强制期,不带 Idempotency-Key 直接拒绝(仅对接入幂等保护的接口生效)
observe_only: true
# 关键写接口幂等记录 TTL
default_ttl_seconds: 86400
# 系统操作接口update/rollback/restart幂等记录 TTL
system_operation_ttl_seconds: 3600
# processing 锁超时(秒)
processing_timeout_seconds: 30
# 可重试失败退避窗口(秒)
failed_retry_backoff_seconds: 5
# 持久化响应体最大长度(字节)
max_stored_response_len: 65536
# 过期幂等记录清理周期(秒)
cleanup_interval_seconds: 60
# 每轮清理最大删除条数
cleanup_batch_size: 500
# =============================================================================
# Concurrency Wait Configuration
# 并发等待配置
# =============================================================================
concurrency:
# SSE ping interval during concurrency wait (seconds)
# 并发等待期间的 SSE ping 间隔(秒)
ping_interval: 10
# =============================================================================
# Database Configuration (PostgreSQL)
# 数据库配置 (PostgreSQL)
# =============================================================================
database:
# Database host address
# 数据库主机地址
host: "localhost"
# Database port
# 数据库端口
port: 5432
# Database username
# 数据库用户名
user: "postgres"
# Database password
# 数据库密码
password: "your_secure_password_here"
# Database name
# 数据库名称
dbname: "sub2api"
# SSL mode: disable, prefer, require, verify-ca, verify-full
# SSL 模式disable禁用, prefer优先加密默认, require要求, verify-ca验证CA, verify-full完全验证
# 默认值为 "prefer",数据库支持 SSL 时自动使用加密连接,不支持时回退明文
sslmode: "prefer"
# Max open connections (高并发场景建议 256+,需配合 PostgreSQL max_connections 调整)
# 最大打开连接数
max_open_conns: 256
# Max idle connections (建议为 max_open_conns 的 50%,减少频繁建连开销)
# 最大空闲连接数
max_idle_conns: 128
# Connection max lifetime (minutes)
# 连接最大存活时间(分钟)
conn_max_lifetime_minutes: 30
# Connection max idle time (minutes)
# 空闲连接最大存活时间(分钟)
conn_max_idle_time_minutes: 5
# =============================================================================
# Redis Configuration
# Redis 配置
# =============================================================================
redis:
# Redis host address
# Redis 主机地址
host: "localhost"
# Redis port
# Redis 端口
port: 6379
# Redis password (leave empty if no password is set)
# Redis 密码(如果未设置密码则留空)
password: ""
# Database number (0-15)
# 数据库编号0-15
db: 0
# Connection pool size (max concurrent connections)
# 连接池大小(最大并发连接数)
pool_size: 1024
# Minimum number of idle connections (高并发场景建议 128+,保持足够热连接)
# 最小空闲连接数
min_idle_conns: 128
# Enable TLS/SSL connection
# 是否启用 TLS/SSL 连接
enable_tls: false
# =============================================================================
# Ops Monitoring (Optional)
# 运维监控 (可选)
# =============================================================================
ops:
# Enable ops monitoring features (background jobs and APIs)
# 是否启用运维监控功能(后台任务和接口)
# Set to false to hide ops menu in sidebar and disable all ops features
# 设置为 false 可在左侧栏隐藏运维监控菜单并禁用所有运维监控功能
# Other detailed settings (cleanup, aggregation, etc.) are configured in ops settings dialog
# 其他详细设置(数据清理、预聚合等)在运维监控设置对话框中配置
enabled: true
# =============================================================================
# JWT Configuration
# JWT 配置
# =============================================================================
jwt:
# IMPORTANT: Change this to a random string in production!
# 重要:生产环境中请更改为随机字符串!
# Generate with / 生成命令: openssl rand -hex 32
secret: "change-this-to-a-secure-random-string"
# Token expiration time in hours (max 168)
# 令牌过期时间(小时,最大 168
expire_hour: 24
# Access Token 过期时间(分钟)
# 优先级说明:
# - >0: 按分钟生效(优先于 expire_hour
# - =0: 回退使用 expire_hour
access_token_expire_minutes: 0
# =============================================================================
# TOTP (2FA) Configuration
# TOTP 双因素认证配置
# =============================================================================
totp:
# IMPORTANT: Set a fixed encryption key for TOTP secrets.
# 重要:设置固定的 TOTP 加密密钥。
# If left empty, a random key will be generated on each startup, causing all
# existing TOTP configurations to become invalid (users won't be able to
# login with 2FA).
# 如果留空,每次启动将生成随机密钥,导致现有的 TOTP 配置失效(用户无法使用
# 双因素认证登录)。
# Generate with / 生成命令: openssl rand -hex 32
encryption_key: ""
# =============================================================================
# LinuxDo Connect OAuth Login (SSO)
# LinuxDo Connect OAuth 登录(用于 Sub2API 用户登录)
# =============================================================================
linuxdo_connect:
enabled: false
client_id: ""
client_secret: ""
authorize_url: "https://connect.linux.do/oauth2/authorize"
token_url: "https://connect.linux.do/oauth2/token"
userinfo_url: "https://connect.linux.do/api/user"
scopes: "user"
# 示例: "https://your-domain.com/api/v1/auth/oauth/linuxdo/callback"
redirect_url: ""
# 安全提示:
# - 建议使用同源相对路径(以 / 开头),避免把 token 重定向到意外的第三方域名
# - 该地址不应包含 #fragment本实现使用 URL fragment 传递 access_token
frontend_redirect_url: "/auth/linuxdo/callback"
token_auth_method: "client_secret_post" # client_secret_post | client_secret_basic | none
# 注意:当 token_auth_method=nonepublic client必须启用 PKCE
use_pkce: false
userinfo_email_path: ""
userinfo_id_path: ""
userinfo_username_path: ""
# =============================================================================
# Default Settings
# 默认设置
# =============================================================================
default:
# Initial admin account (created on first run)
# 初始管理员账户(首次运行时创建)
admin_email: "admin@example.com"
admin_password: "admin123"
# Default settings for new users
# 新用户默认设置
# Max concurrent requests per user
# 每用户最大并发请求数
user_concurrency: 5
# Initial balance for new users
# 新用户初始余额
user_balance: 0
# API key settings
# API 密钥设置
# Prefix for generated API keys
# 生成的 API 密钥前缀
api_key_prefix: "sk-"
# Rate multiplier (affects billing calculation)
# 费率倍数(影响计费计算)
rate_multiplier: 1.0
# =============================================================================
# Rate Limiting
# 速率限制
# =============================================================================
rate_limit:
# Cooldown time (in minutes) when upstream returns 529 (overloaded)
# 上游返回 529过载时的冷却时间分钟
overload_cooldown_minutes: 10
# =============================================================================
# Pricing Data Source (Optional)
# 定价数据源(可选)
# =============================================================================
pricing:
# URL to fetch model pricing data (default: pinned model-price-repo commit)
# 获取模型定价数据的 URL默认固定 commit 的 model-price-repo
remote_url: "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/c7947e9871687e664180bc971d4837f1fc2784a9/model_prices_and_context_window.json"
# Hash verification URL (optional)
# 哈希校验 URL可选
hash_url: "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/c7947e9871687e664180bc971d4837f1fc2784a9/model_prices_and_context_window.sha256"
# Local data directory for caching
# 本地数据缓存目录
data_dir: "./data"
# Fallback pricing file
# 备用定价文件
fallback_file: "./resources/model-pricing/model_prices_and_context_window.json"
# Update interval in hours
# 更新间隔(小时)
update_interval_hours: 24
# Hash check interval in minutes
# 哈希检查间隔(分钟)
hash_check_interval_minutes: 10
# =============================================================================
# Billing Configuration
# 计费配置
# =============================================================================
billing:
circuit_breaker:
# Enable circuit breaker for billing service
# 启用计费服务熔断器
enabled: true
# Number of failures before opening circuit
# 触发熔断的失败次数阈值
failure_threshold: 5
# Time to wait before attempting reset (seconds)
# 熔断后重试等待时间(秒)
reset_timeout_seconds: 30
# Number of requests to allow in half-open state
# 半开状态允许通过的请求数
half_open_requests: 3
# =============================================================================
# Turnstile Configuration
# Turnstile 人机验证配置
# =============================================================================
turnstile:
# Require Turnstile in release mode (when enabled, login/register will fail if not configured)
# 在 release 模式下要求 Turnstile 验证(启用后,若未配置则登录/注册会失败)
required: false
# =============================================================================
# Gemini OAuth (Required for Gemini accounts)
# Gemini OAuth 配置Gemini 账户必需)
# =============================================================================
# Sub2API supports TWO Gemini OAuth modes:
# Sub2API 支持两种 Gemini OAuth 模式:
#
# 1. Code Assist OAuth (requires GCP project_id)
# 1. Code Assist OAuth需要 GCP project_id
# - Uses: cloudcode-pa.googleapis.com (Code Assist API)
# - 使用cloudcode-pa.googleapis.comCode Assist API
#
# 2. AI Studio OAuth (no project_id needed)
# 2. AI Studio OAuth不需要 project_id
# - Uses: generativelanguage.googleapis.com (AI Studio API)
# - 使用generativelanguage.googleapis.comAI Studio API
#
# Default: Uses Gemini CLI's public OAuth credentials (same as Google's official CLI tool)
# 默认:使用 Gemini CLI 的公开 OAuth 凭证(与 Google 官方 CLI 工具相同)
gemini:
oauth:
# OAuth 客户端配置说明:
# 1) 留空 client_id/client_secret使用 Gemini CLI 内置 OAuth Client其 client_secret 需通过环境变量注入)
# - GEMINI_CLI_OAUTH_CLIENT_SECRET
# 2) 同时设置 client_id/client_secret使用你自建的 OAuth Client推荐权限更完整
#
# 注意client_id 与 client_secret 必须同时为空或同时非空。
client_id: ""
client_secret: ""
# Optional scopes (space-separated). Leave empty to auto-select based on oauth_type.
# 可选的权限范围(空格分隔)。留空则根据 oauth_type 自动选择。
scopes: ""
quota:
# Optional: local quota simulation for Gemini Code Assist (local billing).
# 可选Gemini Code Assist 本地配额模拟(本地计费)。
# These values are used for UI progress + precheck scheduling, not official Google quotas.
# 这些值用于 UI 进度显示和预检调度,并非 Google 官方配额。
tiers:
LEGACY:
# Pro model requests per day
# Pro 模型每日请求数
pro_rpd: 50
# Flash model requests per day
# Flash 模型每日请求数
flash_rpd: 1500
# Cooldown time (minutes) after hitting quota
# 达到配额后的冷却时间(分钟)
cooldown_minutes: 30
PRO:
# Pro model requests per day
# Pro 模型每日请求数
pro_rpd: 1500
# Flash model requests per day
# Flash 模型每日请求数
flash_rpd: 4000
# Cooldown time (minutes) after hitting quota
# 达到配额后的冷却时间(分钟)
cooldown_minutes: 5
ULTRA:
# Pro model requests per day
# Pro 模型每日请求数
pro_rpd: 2000
# Flash model requests per day (0 = unlimited)
# Flash 模型每日请求数0 = 无限制)
flash_rpd: 0
# Cooldown time (minutes) after hitting quota
# 达到配额后的冷却时间(分钟)
cooldown_minutes: 5
# =============================================================================
# Update Configuration (在线更新配置)
# =============================================================================
update:
# Proxy URL for accessing GitHub (used for online updates and pricing data)
# 用于访问 GitHub 的代理地址(用于在线更新和定价数据获取)
# Supports: http, https, socks5, socks5h
# Examples:
# - HTTP proxy: "http://127.0.0.1:7890"
# - SOCKS5 proxy: "socks5://127.0.0.1:1080"
# - With authentication: "http://user:pass@proxy.example.com:8080"
# Leave empty for direct connection (recommended for overseas servers)
# 留空表示直连(适用于海外服务器)
proxy_url: ""

View File

@@ -0,0 +1,105 @@
# =============================================================================
# Sub2API Docker Compose - Local Development Build
# =============================================================================
# Build from local source code for testing changes.
#
# Usage:
# cd deploy
# docker compose -f docker-compose.dev.yml up --build
# =============================================================================
services:
sub2api:
build:
context: ..
dockerfile: Dockerfile
container_name: sub2api-dev
restart: unless-stopped
ports:
- "${BIND_HOST:-127.0.0.1}:${SERVER_PORT:-8080}:8080"
volumes:
- ./data:/app/data
environment:
- AUTO_SETUP=true
- SERVER_HOST=0.0.0.0
- SERVER_PORT=8080
- SERVER_MODE=debug
- RUN_MODE=${RUN_MODE:-standard}
- DATABASE_HOST=postgres
- DATABASE_PORT=5432
- DATABASE_USER=${POSTGRES_USER:-sub2api}
- DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
- DATABASE_DBNAME=${POSTGRES_DB:-sub2api}
- DATABASE_SSLMODE=disable
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD:-}
- REDIS_DB=${REDIS_DB:-0}
- ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-}
- JWT_SECRET=${JWT_SECRET:-}
- TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY:-}
- TZ=${TZ:-Asia/Shanghai}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- sub2api-network
healthcheck:
test: ["CMD", "wget", "-q", "-T", "5", "-O", "/dev/null", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
postgres:
image: postgres:18-alpine
container_name: sub2api-postgres-dev
restart: unless-stopped
volumes:
- ./postgres_data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=${POSTGRES_USER:-sub2api}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
- POSTGRES_DB=${POSTGRES_DB:-sub2api}
- PGDATA=/var/lib/postgresql/data
- TZ=${TZ:-Asia/Shanghai}
networks:
- sub2api-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
redis:
image: redis:8-alpine
container_name: sub2api-redis-dev
restart: unless-stopped
volumes:
- ./redis_data:/data
command: >
sh -c '
redis-server
--save 60 1
--appendonly yes
--appendfsync everysec
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
environment:
- TZ=${TZ:-Asia/Shanghai}
- REDISCLI_AUTH=${REDIS_PASSWORD:-}
networks:
- sub2api-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 5s
networks:
sub2api-network:
driver: bridge

View File

@@ -0,0 +1,233 @@
# =============================================================================
# Sub2API Docker Compose - Local Directory Version
# =============================================================================
# This configuration uses local directories for data storage instead of named
# volumes, making it easy to migrate the entire deployment by simply copying
# the deploy directory.
#
# Quick Start:
# 1. Copy .env.example to .env and configure
# 2. mkdir -p data postgres_data redis_data
# 3. docker-compose -f docker-compose.local.yml up -d
# 4. Check logs: docker-compose -f docker-compose.local.yml logs -f sub2api
# 5. Access: http://localhost:8080
#
# Migration to New Server:
# 1. docker-compose -f docker-compose.local.yml down
# 2. tar czf sub2api-deploy.tar.gz deploy/
# 3. Transfer to new server and extract
# 4. docker-compose -f docker-compose.local.yml up -d
# =============================================================================
services:
# ===========================================================================
# Sub2API Application
# ===========================================================================
sub2api:
image: weishaw/sub2api:latest
container_name: sub2api
restart: unless-stopped
ulimits:
nofile:
soft: 100000
hard: 100000
ports:
- "${BIND_HOST:-0.0.0.0}:${SERVER_PORT:-8080}:8080"
volumes:
# Local directory mapping for easy migration
- ./data:/app/data
# Optional: Mount custom config.yaml (uncomment and create the file first)
# Copy config.example.yaml to config.yaml, modify it, then uncomment:
# - ./config.yaml:/app/data/config.yaml:ro
environment:
# =======================================================================
# Auto Setup (REQUIRED for Docker deployment)
# =======================================================================
- AUTO_SETUP=true
# =======================================================================
# Server Configuration
# =======================================================================
- SERVER_HOST=0.0.0.0
- SERVER_PORT=8080
- SERVER_MODE=${SERVER_MODE:-release}
- RUN_MODE=${RUN_MODE:-standard}
# =======================================================================
# Database Configuration (PostgreSQL)
# =======================================================================
- DATABASE_HOST=postgres
- DATABASE_PORT=5432
- DATABASE_USER=${POSTGRES_USER:-sub2api}
- DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
- DATABASE_DBNAME=${POSTGRES_DB:-sub2api}
- DATABASE_SSLMODE=disable
- DATABASE_MAX_OPEN_CONNS=${DATABASE_MAX_OPEN_CONNS:-50}
- DATABASE_MAX_IDLE_CONNS=${DATABASE_MAX_IDLE_CONNS:-10}
- DATABASE_CONN_MAX_LIFETIME_MINUTES=${DATABASE_CONN_MAX_LIFETIME_MINUTES:-30}
- DATABASE_CONN_MAX_IDLE_TIME_MINUTES=${DATABASE_CONN_MAX_IDLE_TIME_MINUTES:-5}
# =======================================================================
# Redis Configuration
# =======================================================================
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD:-}
- REDIS_DB=${REDIS_DB:-0}
- REDIS_POOL_SIZE=${REDIS_POOL_SIZE:-1024}
- REDIS_MIN_IDLE_CONNS=${REDIS_MIN_IDLE_CONNS:-10}
- REDIS_ENABLE_TLS=${REDIS_ENABLE_TLS:-false}
# =======================================================================
# Admin Account (auto-created on first run)
# =======================================================================
- ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-}
# =======================================================================
# JWT Configuration
# =======================================================================
# IMPORTANT: Set a fixed JWT_SECRET to prevent login sessions from being
# invalidated after container restarts. If left empty, a random secret
# will be generated on each startup.
# Generate a secure secret: openssl rand -hex 32
- JWT_SECRET=${JWT_SECRET:-}
- JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24}
# =======================================================================
# TOTP (2FA) Configuration
# =======================================================================
# IMPORTANT: Set a fixed encryption key for TOTP secrets. If left empty,
# a random key will be generated on each startup, causing all existing
# TOTP configurations to become invalid (users won't be able to login
# with 2FA).
# Generate a secure key: openssl rand -hex 32
- TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY:-}
# =======================================================================
# Timezone Configuration
# This affects ALL time operations in the application:
# - Database timestamps
# - Usage statistics "today" boundary
# - Subscription expiry times
# - Log timestamps
# Common values: Asia/Shanghai, America/New_York, Europe/London, UTC
# =======================================================================
- TZ=${TZ:-Asia/Shanghai}
# =======================================================================
# Gemini OAuth Configuration (for Gemini accounts)
# =======================================================================
- GEMINI_OAUTH_CLIENT_ID=${GEMINI_OAUTH_CLIENT_ID:-}
- GEMINI_OAUTH_CLIENT_SECRET=${GEMINI_OAUTH_CLIENT_SECRET:-}
- GEMINI_OAUTH_SCOPES=${GEMINI_OAUTH_SCOPES:-}
- GEMINI_QUOTA_POLICY=${GEMINI_QUOTA_POLICY:-}
# Built-in OAuth client secrets (optional)
# SECURITY: This repo does not embed third-party client_secret.
- GEMINI_CLI_OAUTH_CLIENT_SECRET=${GEMINI_CLI_OAUTH_CLIENT_SECRET:-}
- ANTIGRAVITY_OAUTH_CLIENT_SECRET=${ANTIGRAVITY_OAUTH_CLIENT_SECRET:-}
# =======================================================================
# Security Configuration (URL Allowlist)
# =======================================================================
# Enable URL allowlist validation (false to skip allowlist checks)
- SECURITY_URL_ALLOWLIST_ENABLED=${SECURITY_URL_ALLOWLIST_ENABLED:-false}
# Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https)
- SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=${SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP:-false}
# Allow private IP addresses for upstream/pricing/CRS (for internal deployments)
- SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=${SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS:-false}
# Upstream hosts whitelist (comma-separated, only used when enabled=true)
- SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS=${SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS:-}
# =======================================================================
# Update Configuration (在线更新配置)
# =======================================================================
# Proxy for accessing GitHub (online updates + pricing data)
# Examples: http://host:port, socks5://host:port
- UPDATE_PROXY_URL=${UPDATE_PROXY_URL:-}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- sub2api-network
healthcheck:
test: ["CMD", "wget", "-q", "-T", "5", "-O", "/dev/null", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
# ===========================================================================
# PostgreSQL Database
# ===========================================================================
postgres:
image: postgres:18-alpine
container_name: sub2api-postgres
restart: unless-stopped
ulimits:
nofile:
soft: 100000
hard: 100000
volumes:
# Local directory mapping for easy migration
- ./postgres_data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=${POSTGRES_USER:-sub2api}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
- POSTGRES_DB=${POSTGRES_DB:-sub2api}
- PGDATA=/var/lib/postgresql/data
- TZ=${TZ:-Asia/Shanghai}
networks:
- sub2api-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
# 注意:不暴露端口到宿主机,应用通过内部网络连接
# 如需调试可临时添加ports: ["127.0.0.1:5433:5432"]
# ===========================================================================
# Redis Cache
# ===========================================================================
redis:
image: redis:8-alpine
container_name: sub2api-redis
restart: unless-stopped
ulimits:
nofile:
soft: 100000
hard: 100000
volumes:
# Local directory mapping for easy migration
- ./redis_data:/data
command: >
sh -c '
redis-server
--save 60 1
--appendonly yes
--appendfsync everysec
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
environment:
- TZ=${TZ:-Asia/Shanghai}
# REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag)
- REDISCLI_AUTH=${REDIS_PASSWORD:-}
networks:
- sub2api-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 5s
# =============================================================================
# Networks
# =============================================================================
networks:
sub2api-network:
driver: bridge

View File

@@ -0,0 +1,105 @@
# =============================================================================
# Sub2API Docker Compose - Standalone Configuration
# =============================================================================
# This configuration runs only the Sub2API application.
# PostgreSQL and Redis must be provided externally.
#
# Usage:
# 1. Copy .env.example to .env and configure database/redis connection
# 2. docker-compose -f docker-compose.standalone.yml up -d
# 3. Access: http://localhost:8080
# =============================================================================
services:
sub2api:
image: weishaw/sub2api:latest
container_name: sub2api
restart: unless-stopped
ulimits:
nofile:
soft: 100000
hard: 100000
ports:
- "${BIND_HOST:-0.0.0.0}:${SERVER_PORT:-8080}:8080"
volumes:
- sub2api_data:/app/data
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
# =======================================================================
# Auto Setup
# =======================================================================
- AUTO_SETUP=true
# =======================================================================
# Server Configuration
# =======================================================================
- SERVER_HOST=0.0.0.0
- SERVER_PORT=8080
- SERVER_MODE=${SERVER_MODE:-release}
- RUN_MODE=${RUN_MODE:-standard}
# =======================================================================
# Database Configuration (PostgreSQL) - Required
# =======================================================================
- DATABASE_HOST=${DATABASE_HOST:?DATABASE_HOST is required}
- DATABASE_PORT=${DATABASE_PORT:-5432}
- DATABASE_USER=${DATABASE_USER:-sub2api}
- DATABASE_PASSWORD=${DATABASE_PASSWORD:?DATABASE_PASSWORD is required}
- DATABASE_DBNAME=${DATABASE_DBNAME:-sub2api}
- DATABASE_SSLMODE=${DATABASE_SSLMODE:-disable}
- DATABASE_MAX_OPEN_CONNS=${DATABASE_MAX_OPEN_CONNS:-50}
- DATABASE_MAX_IDLE_CONNS=${DATABASE_MAX_IDLE_CONNS:-10}
- DATABASE_CONN_MAX_LIFETIME_MINUTES=${DATABASE_CONN_MAX_LIFETIME_MINUTES:-30}
- DATABASE_CONN_MAX_IDLE_TIME_MINUTES=${DATABASE_CONN_MAX_IDLE_TIME_MINUTES:-5}
# =======================================================================
# Redis Configuration - Required
# =======================================================================
- REDIS_HOST=${REDIS_HOST:?REDIS_HOST is required}
- REDIS_PORT=${REDIS_PORT:-6379}
- REDIS_PASSWORD=${REDIS_PASSWORD:-}
- REDIS_DB=${REDIS_DB:-0}
- REDIS_POOL_SIZE=${REDIS_POOL_SIZE:-1024}
- REDIS_MIN_IDLE_CONNS=${REDIS_MIN_IDLE_CONNS:-10}
- REDIS_ENABLE_TLS=${REDIS_ENABLE_TLS:-false}
# =======================================================================
# Admin Account (auto-created on first run)
# =======================================================================
- ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-}
# =======================================================================
# JWT Configuration
# =======================================================================
- JWT_SECRET=${JWT_SECRET:-}
- JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24}
# =======================================================================
# Timezone Configuration
# =======================================================================
- TZ=${TZ:-Asia/Shanghai}
# =======================================================================
# Gemini OAuth Configuration (optional)
# =======================================================================
- GEMINI_OAUTH_CLIENT_ID=${GEMINI_OAUTH_CLIENT_ID:-}
- GEMINI_OAUTH_CLIENT_SECRET=${GEMINI_OAUTH_CLIENT_SECRET:-}
- GEMINI_OAUTH_SCOPES=${GEMINI_OAUTH_SCOPES:-}
- GEMINI_QUOTA_POLICY=${GEMINI_QUOTA_POLICY:-}
# Built-in OAuth client secrets (optional)
# SECURITY: This repo does not embed third-party client_secret.
- GEMINI_CLI_OAUTH_CLIENT_SECRET=${GEMINI_CLI_OAUTH_CLIENT_SECRET:-}
- ANTIGRAVITY_OAUTH_CLIENT_SECRET=${ANTIGRAVITY_OAUTH_CLIENT_SECRET:-}
healthcheck:
test: ["CMD", "wget", "-q", "-T", "5", "-O", "/dev/null", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
volumes:
sub2api_data:
driver: local

View File

@@ -0,0 +1,237 @@
# =============================================================================
# Sub2API Docker Compose Configuration
# =============================================================================
# Quick Start:
# 1. Copy .env.example to .env and configure
# 2. docker-compose up -d
# 3. Check logs: docker-compose logs -f sub2api
# 4. Access: http://localhost:8080
#
# All configuration is done via environment variables.
# No Setup Wizard needed - the system auto-initializes on first run.
# =============================================================================
services:
# ===========================================================================
# Sub2API Application
# ===========================================================================
sub2api:
image: weishaw/sub2api:latest
container_name: sub2api
restart: unless-stopped
ulimits:
nofile:
soft: 100000
hard: 100000
ports:
- "${BIND_HOST:-0.0.0.0}:${SERVER_PORT:-8080}:8080"
volumes:
# Data persistence (config.yaml will be auto-generated here)
- sub2api_data:/app/data
# Optional: Mount custom config.yaml (uncomment and create the file first)
# Copy config.example.yaml to config.yaml, modify it, then uncomment:
# - ./config.yaml:/app/data/config.yaml:ro
environment:
# =======================================================================
# Auto Setup (REQUIRED for Docker deployment)
# =======================================================================
- AUTO_SETUP=true
# =======================================================================
# Server Configuration
# =======================================================================
- SERVER_HOST=0.0.0.0
- SERVER_PORT=8080
- SERVER_MODE=${SERVER_MODE:-release}
- RUN_MODE=${RUN_MODE:-standard}
# =======================================================================
# Database Configuration (PostgreSQL)
# =======================================================================
- DATABASE_HOST=postgres
- DATABASE_PORT=5432
- DATABASE_USER=${POSTGRES_USER:-sub2api}
- DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
- DATABASE_DBNAME=${POSTGRES_DB:-sub2api}
- DATABASE_SSLMODE=disable
- DATABASE_MAX_OPEN_CONNS=${DATABASE_MAX_OPEN_CONNS:-50}
- DATABASE_MAX_IDLE_CONNS=${DATABASE_MAX_IDLE_CONNS:-10}
- DATABASE_CONN_MAX_LIFETIME_MINUTES=${DATABASE_CONN_MAX_LIFETIME_MINUTES:-30}
- DATABASE_CONN_MAX_IDLE_TIME_MINUTES=${DATABASE_CONN_MAX_IDLE_TIME_MINUTES:-5}
# =======================================================================
# Redis Configuration
# =======================================================================
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_PASSWORD=${REDIS_PASSWORD:-}
- REDIS_DB=${REDIS_DB:-0}
- REDIS_POOL_SIZE=${REDIS_POOL_SIZE:-1024}
- REDIS_MIN_IDLE_CONNS=${REDIS_MIN_IDLE_CONNS:-10}
- REDIS_ENABLE_TLS=${REDIS_ENABLE_TLS:-false}
# =======================================================================
# Admin Account (auto-created on first run)
# =======================================================================
- ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-}
# =======================================================================
# JWT Configuration
# =======================================================================
# IMPORTANT: Set a fixed JWT_SECRET to prevent login sessions from being
# invalidated after container restarts. If left empty, a random secret
# will be generated on each startup.
# Generate a secure secret: openssl rand -hex 32
- JWT_SECRET=${JWT_SECRET:-}
- JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24}
# =======================================================================
# TOTP (2FA) Configuration
# =======================================================================
# IMPORTANT: Set a fixed encryption key for TOTP secrets. If left empty,
# a random key will be generated on each startup, causing all existing
# TOTP configurations to become invalid (users won't be able to login
# with 2FA).
# Generate a secure key: openssl rand -hex 32
- TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY:-}
# =======================================================================
# Timezone Configuration
# This affects ALL time operations in the application:
# - Database timestamps
# - Usage statistics "today" boundary
# - Subscription expiry times
# - Log timestamps
# Common values: Asia/Shanghai, America/New_York, Europe/London, UTC
# =======================================================================
- TZ=${TZ:-Asia/Shanghai}
# =======================================================================
# Gemini OAuth Configuration (for Gemini accounts)
# =======================================================================
- GEMINI_OAUTH_CLIENT_ID=${GEMINI_OAUTH_CLIENT_ID:-}
- GEMINI_OAUTH_CLIENT_SECRET=${GEMINI_OAUTH_CLIENT_SECRET:-}
- GEMINI_OAUTH_SCOPES=${GEMINI_OAUTH_SCOPES:-}
- GEMINI_QUOTA_POLICY=${GEMINI_QUOTA_POLICY:-}
# Built-in OAuth client secrets (optional)
# SECURITY: This repo does not embed third-party client_secret.
- GEMINI_CLI_OAUTH_CLIENT_SECRET=${GEMINI_CLI_OAUTH_CLIENT_SECRET:-}
- ANTIGRAVITY_OAUTH_CLIENT_SECRET=${ANTIGRAVITY_OAUTH_CLIENT_SECRET:-}
# =======================================================================
# Security Configuration (URL Allowlist)
# =======================================================================
# Enable URL allowlist validation (false to skip allowlist checks)
- SECURITY_URL_ALLOWLIST_ENABLED=${SECURITY_URL_ALLOWLIST_ENABLED:-false}
# Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https)
- SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=${SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP:-false}
# Allow private IP addresses for upstream/pricing/CRS (for internal deployments)
- SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=${SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS:-false}
# Upstream hosts whitelist (comma-separated, only used when enabled=true)
- SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS=${SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS:-}
# =======================================================================
# Update Configuration (在线更新配置)
# =======================================================================
# Proxy for accessing GitHub (online updates + pricing data)
# Examples: http://host:port, socks5://host:port
- UPDATE_PROXY_URL=${UPDATE_PROXY_URL:-}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- sub2api-network
healthcheck:
test: ["CMD", "wget", "-q", "-T", "5", "-O", "/dev/null", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
# ===========================================================================
# PostgreSQL Database
# ===========================================================================
postgres:
image: postgres:18-alpine
container_name: sub2api-postgres
restart: unless-stopped
ulimits:
nofile:
soft: 100000
hard: 100000
volumes:
- postgres_data:/var/lib/postgresql/data
environment:
# postgres:18-alpine 默认 PGDATA=/var/lib/postgresql/18/docker位于镜像声明的匿名卷 /var/lib/postgresql 内)。
# 若不显式设置 PGDATA则即使挂载了 postgres_data 到 /var/lib/postgresql/data数据也不会落盘到该命名卷
# docker compose down/up 后会触发 initdb 重新初始化,导致用户/密码等数据丢失。
- PGDATA=/var/lib/postgresql/data
- POSTGRES_USER=${POSTGRES_USER:-sub2api}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
- POSTGRES_DB=${POSTGRES_DB:-sub2api}
- TZ=${TZ:-Asia/Shanghai}
networks:
- sub2api-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
# 注意:不暴露端口到宿主机,应用通过内部网络连接
# 如需调试可临时添加ports: ["127.0.0.1:5433:5432"]
# ===========================================================================
# Redis Cache
# ===========================================================================
redis:
image: redis:8-alpine
container_name: sub2api-redis
restart: unless-stopped
ulimits:
nofile:
soft: 100000
hard: 100000
volumes:
- redis_data:/data
command: >
sh -c '
redis-server
--save 60 1
--appendonly yes
--appendfsync everysec
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
environment:
- TZ=${TZ:-Asia/Shanghai}
# REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag)
- REDISCLI_AUTH=${REDIS_PASSWORD:-}
networks:
- sub2api-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 5s
# =============================================================================
# Volumes
# =============================================================================
volumes:
sub2api_data:
driver: local
postgres_data:
driver: local
redis_data:
driver: local
# =============================================================================
# Networks
# =============================================================================
networks:
sub2api-network:
driver: bridge

View File

@@ -0,0 +1,171 @@
#!/bin/bash
# =============================================================================
# Sub2API Docker Deployment Preparation Script
# =============================================================================
# This script prepares deployment files for Sub2API:
# - Downloads docker-compose.local.yml and .env.example
# - Generates secure secrets (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD)
# - Creates necessary data directories
#
# After running this script, you can start services with:
# docker-compose up -d
# =============================================================================
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# GitHub raw content base URL
GITHUB_RAW_URL="https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy"
# Print colored message
print_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Generate random secret
generate_secret() {
openssl rand -hex 32
}
# Check if command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
# Main installation function
main() {
echo ""
echo "=========================================="
echo " Sub2API Deployment Preparation"
echo "=========================================="
echo ""
# Check if openssl is available
if ! command_exists openssl; then
print_error "openssl is not installed. Please install openssl first."
exit 1
fi
# Check if deployment already exists
if [ -f "docker-compose.yml" ] && [ -f ".env" ]; then
print_warning "Deployment files already exist in current directory."
read -p "Overwrite existing files? (y/N): " -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
print_info "Cancelled."
exit 0
fi
fi
# Download docker-compose.local.yml and save as docker-compose.yml
print_info "Downloading docker-compose.yml..."
if command_exists curl; then
curl -sSL "${GITHUB_RAW_URL}/docker-compose.local.yml" -o docker-compose.yml
elif command_exists wget; then
wget -q "${GITHUB_RAW_URL}/docker-compose.local.yml" -O docker-compose.yml
else
print_error "Neither curl nor wget is installed. Please install one of them."
exit 1
fi
print_success "Downloaded docker-compose.yml"
# Download .env.example
print_info "Downloading .env.example..."
if command_exists curl; then
curl -sSL "${GITHUB_RAW_URL}/.env.example" -o .env.example
else
wget -q "${GITHUB_RAW_URL}/.env.example" -O .env.example
fi
print_success "Downloaded .env.example"
# Generate .env file with auto-generated secrets
print_info "Generating secure secrets..."
echo ""
# Generate secrets
JWT_SECRET=$(generate_secret)
TOTP_ENCRYPTION_KEY=$(generate_secret)
POSTGRES_PASSWORD=$(generate_secret)
# Create .env from .env.example
cp .env.example .env
# Update .env with generated secrets (cross-platform compatible)
if sed --version >/dev/null 2>&1; then
# GNU sed (Linux)
sed -i "s/^JWT_SECRET=.*/JWT_SECRET=${JWT_SECRET}/" .env
sed -i "s/^TOTP_ENCRYPTION_KEY=.*/TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY}/" .env
sed -i "s/^POSTGRES_PASSWORD=.*/POSTGRES_PASSWORD=${POSTGRES_PASSWORD}/" .env
else
# BSD sed (macOS)
sed -i '' "s/^JWT_SECRET=.*/JWT_SECRET=${JWT_SECRET}/" .env
sed -i '' "s/^TOTP_ENCRYPTION_KEY=.*/TOTP_ENCRYPTION_KEY=${TOTP_ENCRYPTION_KEY}/" .env
sed -i '' "s/^POSTGRES_PASSWORD=.*/POSTGRES_PASSWORD=${POSTGRES_PASSWORD}/" .env
fi
# Create data directories
print_info "Creating data directories..."
mkdir -p data postgres_data redis_data
print_success "Created data directories"
# Set secure permissions for .env file (readable/writable only by owner)
chmod 600 .env
echo ""
# Display completion message
echo "=========================================="
echo " Preparation Complete!"
echo "=========================================="
echo ""
echo "Generated secure credentials:"
echo " POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}"
echo " JWT_SECRET: ${JWT_SECRET}"
echo " TOTP_ENCRYPTION_KEY: ${TOTP_ENCRYPTION_KEY}"
echo ""
print_warning "These credentials have been saved to .env file."
print_warning "Please keep them secure and do not share publicly!"
echo ""
echo "Directory structure:"
echo " docker-compose.yml - Docker Compose configuration"
echo " .env - Environment variables (generated secrets)"
echo " .env.example - Example template (for reference)"
echo " data/ - Application data (will be created on first run)"
echo " postgres_data/ - PostgreSQL data"
echo " redis_data/ - Redis data"
echo ""
echo "Next steps:"
echo " 1. (Optional) Edit .env to customize configuration"
echo " 2. Start services:"
echo " docker-compose up -d"
echo ""
echo " 3. View logs:"
echo " docker-compose logs -f sub2api"
echo ""
echo " 4. Access Web UI:"
echo " http://localhost:8080"
echo ""
print_info "If admin password is not set in .env, it will be auto-generated."
print_info "Check logs for the generated admin password on first startup."
echo ""
}
# Run main function
main "$@"

View File

@@ -0,0 +1,123 @@
#!/usr/bin/env bash
set -euo pipefail
# 用法:
# sudo ./install-datamanagementd.sh --binary /path/to/datamanagementd
# 或:
# sudo ./install-datamanagementd.sh --source /path/to/sub2api/repo
BIN_PATH=""
SOURCE_PATH=""
INSTALL_DIR="/opt/sub2api"
DATA_DIR="/var/lib/sub2api/datamanagement"
SERVICE_FILE_NAME="sub2api-datamanagementd.service"
function print_help() {
cat <<'EOF'
用法:
install-datamanagementd.sh [--binary <datamanagementd二进制路径>] [--source <仓库路径>]
参数:
--binary 指定已构建的 datamanagementd 二进制路径
--source 指定 sub2api 仓库路径(脚本会执行 go build
-h, --help 显示帮助
示例:
sudo ./install-datamanagementd.sh --binary ./datamanagement/datamanagementd
sudo ./install-datamanagementd.sh --source /opt/sub2api-src
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--binary)
BIN_PATH="${2:-}"
shift 2
;;
--source)
SOURCE_PATH="${2:-}"
shift 2
;;
-h|--help)
print_help
exit 0
;;
*)
echo "未知参数: $1"
print_help
exit 1
;;
esac
done
if [[ -n "$BIN_PATH" && -n "$SOURCE_PATH" ]]; then
echo "错误: --binary 与 --source 只能二选一"
exit 1
fi
if [[ -z "$BIN_PATH" && -z "$SOURCE_PATH" ]]; then
echo "错误: 必须提供 --binary 或 --source"
exit 1
fi
if [[ "$(id -u)" -ne 0 ]]; then
echo "错误: 请使用 root 权限执行(例如 sudo"
exit 1
fi
if [[ -n "$SOURCE_PATH" ]]; then
if [[ ! -d "$SOURCE_PATH/datamanagement" ]]; then
echo "错误: 无效仓库路径,未找到 $SOURCE_PATH/datamanagement"
exit 1
fi
echo "[1/6] 从源码构建 datamanagementd..."
(cd "$SOURCE_PATH/datamanagement" && go build -o datamanagementd ./cmd/datamanagementd)
BIN_PATH="$SOURCE_PATH/datamanagement/datamanagementd"
fi
if [[ ! -f "$BIN_PATH" ]]; then
echo "错误: 二进制文件不存在: $BIN_PATH"
exit 1
fi
if ! id sub2api >/dev/null 2>&1; then
echo "[2/6] 创建系统用户 sub2api..."
useradd --system --no-create-home --shell /usr/sbin/nologin sub2api
else
echo "[2/6] 系统用户 sub2api 已存在,跳过创建"
fi
echo "[3/6] 安装 datamanagementd 二进制..."
mkdir -p "$INSTALL_DIR"
install -m 0755 "$BIN_PATH" "$INSTALL_DIR/datamanagementd"
echo "[4/6] 准备数据目录..."
mkdir -p "$DATA_DIR"
chown -R sub2api:sub2api /var/lib/sub2api
chmod 0750 "$DATA_DIR"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SERVICE_TEMPLATE="$SCRIPT_DIR/$SERVICE_FILE_NAME"
if [[ ! -f "$SERVICE_TEMPLATE" ]]; then
echo "错误: 未找到服务模板 $SERVICE_TEMPLATE"
exit 1
fi
echo "[5/6] 安装 systemd 服务..."
cp "$SERVICE_TEMPLATE" "/etc/systemd/system/$SERVICE_FILE_NAME"
systemctl daemon-reload
systemctl enable --now sub2api-datamanagementd
echo "[6/6] 完成,当前状态:"
systemctl --no-pager --full status sub2api-datamanagementd || true
cat <<'EOF'
下一步建议:
1. 查看日志sudo journalctl -u sub2api-datamanagementd -f
2. 在 sub2api容器部署时挂载 socket:
/tmp/sub2api-datamanagement.sock:/tmp/sub2api-datamanagement.sock
3. 进入管理后台“数据管理”页面确认 agent=enabled
EOF

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
[Unit]
Description=Sub2API Data Management Daemon
After=network.target
Wants=network.target
[Service]
Type=simple
User=sub2api
Group=sub2api
WorkingDirectory=/opt/sub2api
ExecStart=/opt/sub2api/datamanagementd \
-socket-path /tmp/sub2api-datamanagement.sock \
-sqlite-path /var/lib/sub2api/datamanagement/datamanagementd.db \
-version 1.0.0
Restart=always
RestartSec=5s
LimitNOFILE=100000
NoNewPrivileges=true
PrivateTmp=false
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,33 @@
[Unit]
Description=Sub2API - AI API Gateway Platform
Documentation=https://github.com/Wei-Shaw/sub2api
After=network.target postgresql.service redis.service
Wants=postgresql.service redis.service
[Service]
Type=simple
User=sub2api
Group=sub2api
WorkingDirectory=/opt/sub2api
ExecStart=/opt/sub2api/sub2api
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=sub2api
# Security hardening
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
PrivateTmp=true
ReadWritePaths=/opt/sub2api
# Environment - Server configuration
# Modify these values to change listen address and port
Environment=GIN_MODE=release
Environment=SERVER_HOST=0.0.0.0
Environment=SERVER_PORT=8080
[Install]
WantedBy=multi-user.target