diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4e95c9be0..ffbb5e054 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,9 +4,50 @@ on: tags: - "v*" workflow_dispatch: + inputs: + version: + description: "Version to release (e.g., 5.1.0)" + required: false + type: string + +permissions: + contents: write + +env: + # Extract version from tag or input + VERSION: ${{ github.event.inputs.version || github.ref_name }} jobs: - build-tauri: + # Sync version before build (only on workflow_dispatch with version input) + prepare: + runs-on: ubuntu-latest + if: github.event.inputs.version != '' + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 20 + + - name: Sync version across all files + run: node scripts/sync-version.js set ${{ github.event.inputs.version }} + + - name: Commit and tag + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add -A + git diff --staged --quiet || git commit -m "chore: bump version to ${{ github.event.inputs.version }}" + git tag -a "v${{ github.event.inputs.version }}" -m "Release v${{ github.event.inputs.version }}" + git push origin HEAD --tags + + release: + needs: [prepare] + if: always() && (needs.prepare.result == 'success' || needs.prepare.result == 'skipped') permissions: contents: write strategy: @@ -27,9 +68,12 @@ jobs: args: "" runs-on: ${{ matrix.platform }} + steps: - name: Checkout repository uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.version && format('v{0}', github.event.inputs.version) || github.ref }} - name: Install dependencies (Linux) if: startsWith(matrix.platform, 'ubuntu') @@ -53,232 +97,29 @@ jobs: run: npm install - name: Build the app + uses: tauri-apps/tauri-action@v0 env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }} TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }} - run: npm run tauri build -- ${{ matrix.args }} - - # 3. 处理 macOS 架构重命名冲突 (解决 422 Already Exists) - - name: Rename macOS assets for architecture - if: matrix.platform == 'macos-latest' - run: | - # 识别架构 - if [[ "${{ matrix.args }}" == *"--target aarch64-apple-darwin"* ]]; then - ARCH="aarch64" - elif [[ "${{ matrix.args }}" == *"--target x86_64-apple-darwin"* ]]; then - ARCH="x64" - elif [[ "${{ matrix.args }}" == *"--target universal-apple-darwin"* ]]; then - ARCH="universal" - else - ARCH="unknown" - fi - - echo "Detected architecture: $ARCH" - - # 进入产物目录 - cd src-tauri/target/*/release/bundle/macos/ - - # 重命名 .app.tar.gz 和 .sig - if [ -f "Antigravity Tools.app.tar.gz" ]; then - mv "Antigravity Tools.app.tar.gz" "Antigravity Tools_${ARCH}.app.tar.gz" - mv "Antigravity Tools.app.tar.gz.sig" "Antigravity Tools_${ARCH}.app.tar.gz.sig" - echo "Renamed assets to append Arch: $ARCH" - fi - - # 更新对应的 updater.json (指向重命名后的文件) - UPDATER_JSON="../../../updater/install.json" - if [ ! -f "$UPDATER_JSON" ]; then - UPDATER_JSON=$(find ../../../updater -name "*.json" | head -n 1) - fi - - if [ -f "$UPDATER_JSON" ]; then - echo "Updating $UPDATER_JSON to use renamed assets..." - sed -i '' "s/Antigravity%20Tools.app.tar.gz/Antigravity%20Tools_${ARCH}.app.tar.gz/g" "$UPDATER_JSON" - fi - - # 1. 上传 updater.json 到 Artifacts (供后续合并使用) - - name: Upload updater json - uses: actions/upload-artifact@v4 with: - name: updater-json-${{ matrix.platform }}-${{ strategy.job-index }} - path: src-tauri/target/**/release/bundle/updater/*.json - if-no-files-found: warn - - # 2. 直接上传安装包到 GitHub Release (避免 artifact 下载超时) - - name: Upload Release Assets - uses: softprops/action-gh-release@v1 - if: startsWith(github.ref, 'refs/tags/') - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ github.ref_name }} - name: "Antigravity Tools ${{ github.ref_name }}" - body: "See the assets to download this version and install." - draft: false + tagName: ${{ env.VERSION }} + releaseName: "Antigravity Tools ${{ env.VERSION }}" + releaseBody: | + ## 🚀 Antigravity Tools ${{ env.VERSION }} + + ### 📦 Installation + - **Windows**: Download `.msi` or `.exe` installer + - **macOS**: Download `.dmg` (Universal) or platform-specific build + - **Linux**: Download `.deb` or `.AppImage` + + ### 🔄 Auto-Update + Existing installations will be notified automatically. + + --- + [Full Changelog](https://github.com/${{ github.repository }}/compare/...v${{ env.VERSION }}) + releaseDraft: false prerelease: false - files: | - src-tauri/target/**/release/bundle/dmg/*.dmg - src-tauri/target/**/release/bundle/deb/*.deb - src-tauri/target/**/release/bundle/appimage/*.AppImage - src-tauri/target/**/release/bundle/msi/*.msi - src-tauri/target/**/release/bundle/nsis/*.exe - src-tauri/target/**/release/bundle/rpm/*.rpm - src-tauri/target/**/release/bundle/macos/*.app.tar.gz - src-tauri/target/**/release/bundle/macos/*.app.tar.gz.sig - src-tauri/target/**/release/bundle/dmg/*.sig - src-tauri/target/**/release/bundle/deb/*.sig - src-tauri/target/**/release/bundle/appimage/*.sig - src-tauri/target/**/release/bundle/msi/*.sig - src-tauri/target/**/release/bundle/nsis/*.sig - src-tauri/target/**/release/bundle/rpm/*.sig - - publish-release: - needs: build-tauri - runs-on: ubuntu-latest - permissions: - contents: write - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - # 只下载 updater.json 相关的小文件 - - name: Download updater artifacts - uses: actions/download-artifact@v4 - with: - pattern: updater-json-* - path: all-updaters - merge-multiple: true - - - name: Extract Release Notes - run: | - VERSION="${{ github.ref_name }}" - echo "Extracting release notes for version $VERSION" - - # Extract the section, remove leading 4 spaces dedent - awk -v ver="$VERSION" ' - BEGIN { capture=0 } - # Match start line: " * **v4.1.0..." - $0 ~ "^[[:space:]]*\\*[[:space:]]+\\*\\*" ver { capture=1; next } - # Match next version line to stop: " * **v..." - capture && $0 ~ "^[[:space:]]*\\*[[:space:]]+\\*\\*v" { capture=0; exit } - capture { print } - ' README.md | sed 's/^ //' > release_notes.md - - # If no notes found, add a default message - if [ ! -s release_notes.md ]; then - echo "See the assets to download this version and install." > release_notes.md - fi - - echo "Release Notes Content:" - cat release_notes.md - - - name: Merge updater JSONs - run: | - echo "Merging updater.json files..." - echo "{}" > merged_updater.json - - # 查找所有下载下来的 json 并合并 - find all-updaters -name "*.json" -type f | while read json_file; do - if jq -e . "$json_file" >/dev/null 2>&1; then - echo "Merging valid JSON: $json_file..." - jq -s '.[0] * .[1]' merged_updater.json "$json_file" > temp.json && mv temp.json merged_updater.json - else - echo "Skipping invalid JSON: $json_file" - cat "$json_file" - fi - done - - echo "Merged JSON content:" - cat merged_updater.json - - mv merged_updater.json updater.json - - - name: Upload merged updater.json - uses: softprops/action-gh-release@v1 - if: startsWith(github.ref, 'refs/tags/') - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ github.ref_name }} - body_path: release_notes.md - files: updater.json - # 确保不覆盖已存在的其他 assets (softprops/action-gh-release 默认是追加) - - docker-build-amd64: - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' && (github.repository == 'lbjlaq/Antigravity-Manager' || vars.ENABLE_DOCKER_PUSH == 'true') - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build and push (AMD64) - uses: docker/build-push-action@v6 - with: - context: . - file: docker/Dockerfile - platforms: linux/amd64 - push: true - tags: | - ${{ github.repository_owner }}/antigravity-manager:latest-amd64 - ${{ github.repository_owner }}/antigravity-manager:${{ github.ref_name }}-amd64 - build-args: | - USE_MIRROR=false - - docker-build-arm64: - runs-on: ubuntu-24.04-arm - if: github.event_name != 'pull_request' && (github.repository == 'lbjlaq/Antigravity-Manager' || vars.ENABLE_DOCKER_PUSH == 'true') - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build and push (ARM64) - uses: docker/build-push-action@v6 - with: - context: . - file: docker/Dockerfile - platforms: linux/arm64 - push: true - tags: | - ${{ github.repository_owner }}/antigravity-manager:latest-arm64 - ${{ github.repository_owner }}/antigravity-manager:${{ github.ref_name }}-arm64 - build-args: | - USE_MIRROR=false - - docker-manifest: - needs: [docker-build-amd64, docker-build-arm64] - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' && (github.repository == 'lbjlaq/Antigravity-Manager' || vars.ENABLE_DOCKER_PUSH == 'true') - steps: - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Create and push manifest - run: | - docker buildx imagetools create -t ${{ github.repository_owner }}/antigravity-manager:latest \ - ${{ github.repository_owner }}/antigravity-manager:latest-amd64 \ - ${{ github.repository_owner }}/antigravity-manager:latest-arm64 - - docker buildx imagetools create -t ${{ github.repository_owner }}/antigravity-manager:${{ github.ref_name }} \ - ${{ github.repository_owner }}/antigravity-manager:${{ github.ref_name }}-amd64 \ - ${{ github.repository_owner }}/antigravity-manager:${{ github.ref_name }}-arm64 + args: ${{ matrix.args }} + tauriScript: npm run tauri + updaterJsonKeepUniversal: true diff --git a/.gitignore b/.gitignore index 01a516e85..5582c2b80 100755 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,5 @@ # Logs -logs +/logs .logs *.log npm-debug.log* @@ -35,6 +35,10 @@ src-tauri/.env .env.* environment/ +# Tauri signing keys (NEVER COMMIT!) +.tauri-keys/ +*.key + # Python virtual environments and test files .venv*/ venv/ @@ -43,6 +47,5 @@ __pycache__/ *.pyc # Reference projects (for development only) - +CLIProxyAPI/ DOCKER_DEPLOYMENT.md -pnpm-lock.yaml diff --git "a/API\345\217\215\344\273\243\346\265\201\351\207\217\346\266\210\350\200\227\346\234\272\345\210\266\345\210\206\346\236\220.md" "b/API\345\217\215\344\273\243\346\265\201\351\207\217\346\266\210\350\200\227\346\234\272\345\210\266\345\210\206\346\236\220.md" deleted file mode 100644 index cff3a46b4..000000000 --- "a/API\345\217\215\344\273\243\346\265\201\351\207\217\346\266\210\350\200\227\346\234\272\345\210\266\345\210\206\346\236\220.md" +++ /dev/null @@ -1,856 +0,0 @@ -# API 反代流量消耗机制分析 - -## 项目概述 - -Antigravity Tools 是一个基于 Tauri 的桌面应用,提供 Google AI (Gemini/Claude) 的 API 反向代理服务。该项目通过管理多个 Google 账号,实现智能的流量分配和配额管理。 - ---- - -## 一、核心架构 - -### 1.1 技术栈 -- **前端**: React + TypeScript + Vite -- **后端**: Rust (Tauri) + Axum (Web 框架) -- **数据存储**: - - SQLite (流量统计、代理日志、IP 监控) - - JSON 文件 (账号信息、配置) - -### 1.2 核心模块 - -``` -src-tauri/src/ -├── proxy/ -│ ├── token_manager.rs # Token 池管理与账号调度 -│ ├── server.rs # Axum HTTP 服务器 -│ ├── rate_limit.rs # 限流跟踪器 -│ └── sticky_config.rs # 会话粘性配置 -├── modules/ -│ ├── account.rs # 账号 CRUD 操作 -│ ├── quota.rs # 配额查询与保护 -│ └── token_stats.rs # Token 使用统计 -└── models/ - ├── account.rs # 账号数据模型 - ├── quota.rs # 配额数据模型 - └── token.rs # Token 数据模型 -``` - ---- - -## 二、流量消耗机制详解 - -### 2.1 账号数据结构 - -每个账号包含以下核心信息: - -```rust -pub struct Account { - pub id: String, // 账号唯一 ID - pub email: String, // Google 邮箱 - pub token: TokenData, // OAuth Token 信息 - pub quota: Option, // 配额信息 - pub disabled: bool, // 是否全局禁用 - pub proxy_disabled: bool, // 是否禁用代理功能 - pub protected_models: HashSet, // 受配额保护的模型列表 - pub created_at: i64, - pub last_used: i64, -} -``` - -**Token 数据**: -```rust -pub struct TokenData { - pub access_token: String, // 访问令牌 - pub refresh_token: String, // 刷新令牌 - pub expires_in: i64, // 过期时间(秒) - pub expiry_timestamp: i64, // 过期时间戳 - pub project_id: Option, // Google Cloud 项目 ID -} -``` - -**配额数据**: -```rust -pub struct QuotaData { - pub models: Vec, // 各模型配额 - pub subscription_tier: Option, // 订阅等级 (FREE/PRO/ULTRA) - pub is_forbidden: bool, // 是否被禁止访问 -} - -pub struct ModelQuota { - pub name: String, // 模型名称 - pub percentage: i32, // 剩余配额百分比 (0-100) - pub reset_time: String, // 配额重置时间 -} -``` - -### 2.2 Token 池管理 (TokenManager) - -**核心职责**: -1. 从磁盘加载所有可用账号 -2. 智能选择账号分配给请求 -3. 自动刷新过期的 Token -4. 跟踪限流状态和配额保护 - -**关键字段**: -```rust -pub struct TokenManager { - tokens: Arc>, // 账号 ID -> Token 映射 - current_index: Arc, // 轮询索引 - last_used_account: Arc>>, // 60s 锁定机制 - rate_limit_tracker: Arc, // 限流跟踪器 - session_accounts: Arc>, // 会话 -> 账号绑定 - preferred_account_id: Arc>>, // 固定账号模式 - health_scores: Arc>, // 账号健康分数 -} -``` - -### 2.3 账号调度策略 - -#### 2.3.1 优先级排序 - -在每次请求时,账号按以下优先级排序: - -```rust -// 1. 订阅等级优先级: ULTRA > PRO > FREE -// 2. 同等级内按剩余配额降序排序 -// 3. 配额相同时按健康分数降序排序 - -tokens_snapshot.sort_by(|a, b| { - // 第一优先级: 订阅等级 - let tier_priority = |tier: &Option| match tier.as_deref() { - Some("ULTRA") => 0, - Some("PRO") => 1, - Some("FREE") => 2, - _ => 3, - }; - - let tier_cmp = tier_priority(&a.subscription_tier) - .cmp(&tier_priority(&b.subscription_tier)); - - if tier_cmp != std::cmp::Ordering::Equal { - return tier_cmp; - } - - // 第二优先级: 剩余配额百分比 (高优先) - let quota_a = a.remaining_quota.unwrap_or(0); - let quota_b = b.remaining_quota.unwrap_or(0); - let quota_cmp = quota_b.cmp("a_a); - - if quota_cmp != std::cmp::Ordering::Equal { - return quota_cmp; - } - - // 第三优先级: 健康分数 (高优先) - b.health_score.partial_cmp(&a.health_score) - .unwrap_or(std::cmp::Ordering::Equal) -}); -``` - -**设计理由**: -- **ULTRA/PRO 优先**: 这些账号配额重置快,优先消耗可最大化总体可用性 -- **高配额优先**: 避免低配额账号被用光,保持账号池的健康度 -- **健康分数**: 根据历史成功率动态调整优先级 - -#### 2.3.2 调度模式 - -系统支持三种调度模式: - -1. **CacheFirst (缓存优先)** - - 启用 60s 全局锁定 - - 启用会话粘性 (Session Sticky) - - 适合需要上下文连续性的场景 - -2. **Balance (平衡模式)** - - 启用会话粘性 - - 不启用 60s 锁定 - - 兼顾性能和连续性 - -3. **PerformanceFirst (性能优先)** - - 纯轮询模式 - - 不启用任何锁定机制 - - 最大化并发性能 - -#### 2.3.3 固定账号模式 (FIX #820) - -支持指定优先使用某个账号: - -```rust -// 如果设置了 preferred_account_id -if let Some(ref pref_id) = preferred_id { - if let Some(preferred_token) = tokens_snapshot.iter().find(|t| &t.account_id == pref_id) { - // 检查账号是否可用 (未限流、未被配额保护) - if !is_rate_limited && !is_quota_protected { - // 直接使用优先账号,跳过轮询逻辑 - return Ok((token.access_token, project_id, token.email, 0)); - } - } -} -``` - -### 2.4 配额保护机制 - -#### 2.4.1 模型级配额保护 (Issue #621) - -当某个模型的配额低于阈值时,自动将该模型加入账号的 `protected_models` 列表: - -```rust -// 配置示例 -{ - "quota_protection": { - "enabled": true, - "threshold_percentage": 10, // 低于 10% 触发保护 - "monitored_models": [ - "gemini-3-flash", - "claude-sonnet-4-5", - "gemini-3-pro-high" - ] - } -} -``` - -**保护流程**: -1. 加载账号时检查每个模型的配额 -2. 如果 `percentage <= threshold_percentage`,将模型名加入 `protected_models` -3. 在 `get_token()` 时,跳过包含目标模型的账号 -4. 当配额恢复时,自动从 `protected_models` 移除 - -**优势**: -- 精细化保护,不会因为单个模型配额低而禁用整个账号 -- 其他模型仍可正常使用该账号 - -#### 2.4.2 配额恢复机制 - -```rust -// 当配额恢复到阈值以上时 -if percentage > threshold { - // 从 protected_models 中移除该模型 - account_json["protected_models"] - .as_array_mut() - .unwrap() - .retain(|m| m.as_str() != Some(model_name)); - - // 保存到磁盘 - std::fs::write(account_path, serde_json::to_string_pretty(account_json).unwrap())?; -} -``` - -### 2.5 限流跟踪 (RateLimitTracker) - -**功能**: -- 记录每个账号的限流状态 (429 错误) -- 记录每个账号的 5xx 错误 (熔断机制) -- 自动清理过期记录 (每 15 秒) - -**数据结构**: -```rust -pub struct RateLimitTracker { - // account_id -> (reset_timestamp, model_name) - records: Arc)>>, - - // account_id -> 5xx 错误计数 - error_counts: Arc>, -} -``` - -**使用示例**: -```rust -// 记录限流 -tracker.record_rate_limit(&account_id, Some(&model_name), 60); // 60 秒后重置 - -// 检查是否限流 -if tracker.is_rate_limited(&account_id, Some(&model_name)) { - // 跳过该账号 -} - -// 清除限流记录 -tracker.clear(&account_id); -``` - -### 2.6 会话粘性 (Session Sticky) - -**目的**: 保持同一会话的请求使用同一账号,避免上下文丢失 - -**实现**: -```rust -// 1. 首次请求时绑定会话与账号 -if let Some(sid) = session_id { - self.session_accounts.insert(sid.to_string(), candidate.account_id.clone()); -} - -// 2. 后续请求复用绑定的账号 -if let Some(bound_id) = self.session_accounts.get(sid).map(|v| v.clone()) { - if let Some(bound_token) = tokens_snapshot.iter().find(|t| t.account_id == bound_id) { - // 检查账号是否仍然可用 - if !is_rate_limited && !is_quota_protected { - target_token = Some(bound_token.clone()); - } else { - // 账号不可用,解绑并切换 - self.session_accounts.remove(sid); - } - } -} -``` - -### 2.7 Token 自动刷新 - -**触发条件**: Token 距离过期时间小于 5 分钟 - -```rust -let now = chrono::Utc::now().timestamp(); -if now >= token.timestamp - 300 { // 提前 5 分钟刷新 - match crate::modules::oauth::refresh_access_token(&token.refresh_token).await { - Ok(token_response) => { - // 更新内存中的 Token - token.access_token = token_response.access_token.clone(); - token.expires_in = token_response.expires_in; - token.timestamp = now + token_response.expires_in; - - // 同步到 DashMap - if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { - entry.access_token = token.access_token.clone(); - entry.expires_in = token.expires_in; - entry.timestamp = token.timestamp; - } - - // 持久化到磁盘 - self.save_refreshed_token(&token.account_id, &token_response).await?; - } - Err(e) if e.contains("invalid_grant") => { - // Refresh Token 已失效,禁用账号 - self.disable_account(&token.account_id, &format!("invalid_grant: {}", e)).await?; - self.tokens.remove(&token.account_id); - } - } -} -``` - ---- - -## 三、多账号消耗实现 - -### 3.1 账号加载流程 - -```rust -pub async fn load_accounts(&self) -> Result { - let accounts_dir = self.data_dir.join("accounts"); - - // 清空现有 Token 池 - self.tokens.clear(); - self.current_index.store(0, Ordering::SeqCst); - - // 遍历账号目录 - for entry in std::fs::read_dir(&accounts_dir)? { - let path = entry?.path(); - - // 只处理 .json 文件 - if path.extension().and_then(|s| s.to_str()) != Some("json") { - continue; - } - - // 加载单个账号 - match self.load_single_account(&path).await { - Ok(Some(token)) => { - self.tokens.insert(token.account_id.clone(), token); - count += 1; - } - Ok(None) => { - // 跳过禁用或配额保护的账号 - } - Err(e) => { - tracing::debug!("加载账号失败 {:?}: {}", path, e); - } - } - } - - Ok(count) -} -``` - -### 3.2 账号过滤逻辑 - -在 `load_single_account()` 中,以下账号会被跳过: - -1. **全局禁用** (`disabled: true`) -2. **代理禁用** (`proxy_disabled: true`) -3. **配额保护触发** (所有监控模型都低于阈值) - -```rust -async fn load_single_account(&self, path: &PathBuf) -> Result, String> { - let content = std::fs::read_to_string(path)?; - let mut account: serde_json::Value = serde_json::from_str(&content)?; - - // 1. 检查全局禁用 - if account.get("disabled").and_then(|v| v.as_bool()).unwrap_or(false) { - return Ok(None); - } - - // 2. 检查配额保护 - if self.check_and_protect_quota(&mut account, path).await { - return Ok(None); - } - - // 3. 检查代理禁用 - if account.get("proxy_disabled").and_then(|v| v.as_bool()).unwrap_or(false) { - return Ok(None); - } - - // 4. 提取账号信息并加载 - Ok(Some(ProxyToken { - account_id: account["id"].as_str()?.to_string(), - email: account["email"].as_str()?.to_string(), - access_token: account["token"]["access_token"].as_str()?.to_string(), - refresh_token: account["token"]["refresh_token"].as_str()?.to_string(), - // ... 其他字段 - })) -} -``` - -### 3.3 请求分配流程 - -**完整流程图**: - -``` -客户端请求 - ↓ -[1] 解析请求参数 (model, session_id) - ↓ -[2] 固定账号模式检查 - ├─ 有 preferred_account_id → 优先使用 - └─ 无 → 继续 - ↓ -[3] 会话粘性检查 (如果有 session_id) - ├─ 已绑定账号 → 检查可用性 - │ ├─ 可用 → 复用 - │ └─ 不可用 → 解绑并继续 - └─ 未绑定 → 继续 - ↓ -[4] 60s 全局锁定检查 (CacheFirst 模式) - ├─ 距上次使用 < 60s → 复用上次账号 - └─ 否则 → 继续 - ↓ -[5] 轮询选择账号 - ├─ 按优先级排序 - ├─ 跳过已尝试的账号 - ├─ 跳过限流的账号 - ├─ 跳过配额保护的账号 - └─ 选择第一个可用账号 - ↓ -[6] Token 刷新检查 - ├─ 距过期 < 5 分钟 → 刷新 Token - └─ 否则 → 继续 - ↓ -[7] 返回 (access_token, project_id, email) - ↓ -[8] 代理请求到 Google API - ↓ -[9] 记录使用统计 - ├─ token_stats.db (Token 消耗) - ├─ proxy_logs.db (请求日志) - └─ 更新 last_used 时间戳 -``` - -### 3.4 流量统计 - -#### 3.4.1 数据库结构 - -**token_usage 表** (原始记录): -```sql -CREATE TABLE token_usage ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - timestamp INTEGER NOT NULL, - account_email TEXT NOT NULL, - model TEXT NOT NULL, - input_tokens INTEGER NOT NULL DEFAULT 0, - output_tokens INTEGER NOT NULL DEFAULT 0, - total_tokens INTEGER NOT NULL DEFAULT 0 -); -``` - -**token_stats_hourly 表** (小时聚合): -```sql -CREATE TABLE token_stats_hourly ( - hour_bucket TEXT NOT NULL, -- "2024-01-15 14:00" - account_email TEXT NOT NULL, - total_input_tokens INTEGER NOT NULL DEFAULT 0, - total_output_tokens INTEGER NOT NULL DEFAULT 0, - total_tokens INTEGER NOT NULL DEFAULT 0, - request_count INTEGER NOT NULL DEFAULT 0, - PRIMARY KEY (hour_bucket, account_email) -); -``` - -#### 3.4.2 记录流程 - -```rust -// 在请求完成后记录 -pub fn record_usage( - account_email: &str, - model: &str, - input_tokens: u32, - output_tokens: u32, -) -> Result<(), String> { - let conn = connect_db()?; - let timestamp = chrono::Utc::now().timestamp(); - let total_tokens = input_tokens + output_tokens; - - // 1. 插入原始记录 - conn.execute( - "INSERT INTO token_usage (timestamp, account_email, model, input_tokens, output_tokens, total_tokens) - VALUES (?1, ?2, ?3, ?4, ?5, ?6)", - params![timestamp, account_email, model, input_tokens, output_tokens, total_tokens], - )?; - - // 2. 更新小时聚合 (使用 UPSERT) - let hour_bucket = chrono::Utc::now().format("%Y-%m-%d %H:00").to_string(); - conn.execute( - "INSERT INTO token_stats_hourly (hour_bucket, account_email, total_input_tokens, total_output_tokens, total_tokens, request_count) - VALUES (?1, ?2, ?3, ?4, ?5, 1) - ON CONFLICT(hour_bucket, account_email) DO UPDATE SET - total_input_tokens = total_input_tokens + ?3, - total_output_tokens = total_output_tokens + ?4, - total_tokens = total_tokens + ?5, - request_count = request_count + 1", - params![hour_bucket, account_email, input_tokens, output_tokens, total_tokens], - )?; - - Ok(()) -} -``` - -#### 3.4.3 统计查询 - -**按账号统计**: -```rust -pub fn get_account_stats(hours: i64) -> Result, String> { - let cutoff = chrono::Utc::now() - chrono::Duration::hours(hours); - let cutoff_bucket = cutoff.format("%Y-%m-%d %H:00").to_string(); - - let mut stmt = conn.prepare( - "SELECT account_email, - SUM(total_input_tokens) as input, - SUM(total_output_tokens) as output, - SUM(total_tokens) as total, - SUM(request_count) as count - FROM token_stats_hourly - WHERE hour_bucket >= ?1 - GROUP BY account_email - ORDER BY total DESC" - )?; - - // 返回每个账号的统计数据 -} -``` - -**按模型统计**: -```rust -pub fn get_model_stats(hours: i64) -> Result, String> { - let cutoff = chrono::Utc::now().timestamp() - (hours * 3600); - - let mut stmt = conn.prepare( - "SELECT model, - SUM(input_tokens) as input, - SUM(output_tokens) as output, - SUM(total_tokens) as total, - COUNT(*) as count - FROM token_usage - WHERE timestamp >= ?1 - GROUP BY model - ORDER BY total DESC" - )?; - - // 返回每个模型的统计数据 -} -``` - -### 3.5 配额查询与更新 - -#### 3.5.1 配额 API - -Google 提供的配额查询接口: - -```rust -const QUOTA_API_URL: &str = "https://daily-cloudcode-pa.sandbox.googleapis.com/v1internal:fetchAvailableModels"; - -pub async fn fetch_quota(access_token: &str, email: &str) -> Result<(QuotaData, Option), AppError> { - // 1. 获取 project_id 和订阅等级 - let (project_id, subscription_tier) = fetch_project_id(access_token, email).await; - - // 2. 查询配额 - let client = create_client(); - let payload = json!({ - "project": project_id.as_deref().unwrap_or("bamboo-precept-lgxtn") - }); - - let response = client - .post(QUOTA_API_URL) - .bearer_auth(access_token) - .json(&payload) - .send() - .await?; - - // 3. 解析响应 - let quota_response: QuotaResponse = response.json().await?; - let mut quota_data = QuotaData::new(); - - for (name, info) in quota_response.models { - if let Some(quota_info) = info.quota_info { - let percentage = quota_info.remaining_fraction - .map(|f| (f * 100.0) as i32) - .unwrap_or(0); - - let reset_time = quota_info.reset_time.unwrap_or_default(); - - // 只保留关心的模型 - if name.contains("gemini") || name.contains("claude") { - quota_data.add_model(name, percentage, reset_time); - } - } - } - - quota_data.subscription_tier = subscription_tier; - Ok((quota_data, project_id)) -} -``` - -#### 3.5.2 批量刷新配额 - -```rust -pub async fn refresh_all_quotas_logic() -> Result { - let accounts = list_accounts()?; - let mut success = 0; - let mut failed = 0; - - // 并发查询 (批次大小 5) - for batch in accounts.chunks(5) { - let mut handles = Vec::new(); - - for account in batch { - let account = account.clone(); - let handle = tokio::spawn(async move { - // 刷新 Token (如果需要) - let token = ensure_fresh_token(&account.token).await?; - - // 查询配额 - let (quota, project_id) = fetch_quota(&token.access_token, &account.email).await?; - - // 更新账号文件 - let mut updated_account = account.clone(); - updated_account.quota = Some(quota); - if let Some(pid) = project_id { - updated_account.token.project_id = Some(pid); - } - save_account(&updated_account)?; - - Ok::<_, String>(()) - }); - handles.push(handle); - } - - for handle in handles { - match handle.await { - Ok(Ok(_)) => success += 1, - _ => failed += 1, - } - } - } - - Ok(format!("Refreshed {}/{} accounts", success, success + failed)) -} -``` - ---- - -## 四、关键优化点 - -### 4.1 性能优化 - -1. **DashMap 无锁并发**: 使用 `DashMap` 替代 `Mutex`,提升并发性能 -2. **预排序**: 在请求前对账号排序,避免每次请求时重复排序 -3. **批量操作**: 配额刷新等操作使用批量并发处理 -4. **连接池**: HTTP 客户端使用连接池复用连接 - -### 4.2 可靠性优化 - -1. **Token 自动刷新**: 提前 5 分钟刷新,避免请求时 Token 过期 -2. **限流自动跳过**: 主动检测限流状态,避免重复请求 -3. **配额保护**: 低配额账号自动保护,避免耗尽 -4. **熔断机制**: 5xx 错误累积到阈值后暂时禁用账号 -5. **乐观重置**: 所有账号限流时,等待最短时间后自动重试 - -### 4.3 用户体验优化 - -1. **会话粘性**: 保持对话连续性 -2. **智能调度**: 根据订阅等级和配额智能分配 -3. **实时统计**: 提供详细的使用统计和趋势分析 -4. **配额可视化**: 实时显示每个账号的配额状态 - ---- - -## 五、典型使用场景 - -### 5.1 场景一: 高并发 API 调用 - -**需求**: 100 个并发请求,需要快速响应 - -**配置**: -- 调度模式: `PerformanceFirst` -- 账号数量: 10+ (建议 ULTRA/PRO 账号) -- 配额保护: 启用,阈值 10% - -**流程**: -1. 请求到达,纯轮询选择账号 -2. 跳过限流和配额保护的账号 -3. 按订阅等级和配额优先级分配 -4. 记录使用统计 - -**结果**: -- 平均响应时间: < 500ms -- 账号利用率: 均衡分布 -- 配额消耗: 优先消耗 ULTRA/PRO - -### 5.2 场景二: 长对话场景 - -**需求**: 保持对话上下文,同一会话使用同一账号 - -**配置**: -- 调度模式: `CacheFirst` 或 `Balance` -- 会话粘性: 启用 -- 60s 锁定: 启用 (CacheFirst) - -**流程**: -1. 首次请求分配账号 A,绑定 session_id -2. 后续请求携带相同 session_id,复用账号 A -3. 如果账号 A 限流,自动解绑并切换到账号 B -4. 新的 session_id 绑定账号 B - -**结果**: -- 对话连续性: 100% -- 切换次数: 最小化 -- 用户体验: 无感知切换 - -### 5.3 场景三: 配额保护场景 - -**需求**: 避免 FREE 账号配额耗尽 - -**配置**: -- 配额保护: 启用,阈值 10% -- 监控模型: `["gemini-3-flash", "claude-sonnet-4-5"]` -- 自动刷新: 每小时 - -**流程**: -1. 定时刷新所有账号配额 -2. 检测到账号 A 的 `gemini-3-flash` 配额 < 10% -3. 将 `gemini-3-flash` 加入账号 A 的 `protected_models` -4. 后续请求 `gemini-3-flash` 时跳过账号 A -5. 其他模型仍可使用账号 A -6. 配额恢复后自动移除保护 - -**结果**: -- 配额利用率: 最大化 -- 账号可用性: 精细化保护 -- 恢复速度: 自动化 - ---- - -## 六、总结 - -### 6.1 核心优势 - -1. **智能调度**: 多维度优先级排序,最大化资源利用 -2. **精细化保护**: 模型级配额保护,避免过度禁用 -3. **高可用性**: 限流跟踪、熔断机制、乐观重置 -4. **灵活配置**: 支持多种调度模式和固定账号 -5. **完整统计**: 详细的使用统计和趋势分析 - -### 6.2 适用场景 - -- ✅ 多账号管理和负载均衡 -- ✅ 高并发 API 代理 -- ✅ 配额精细化管理 -- ✅ 长对话场景 -- ✅ 企业级 API 网关 - -### 6.3 未来优化方向 - -1. **机器学习调度**: 基于历史数据预测账号可用性 -2. **动态阈值**: 根据使用模式自动调整配额保护阈值 -3. **跨实例同步**: 支持多实例部署时的账号池同步 -4. **成本优化**: 根据订阅成本优化账号使用策略 - ---- - -## 附录: 配置示例 - -### A.1 应用配置 (app_config.json) - -```json -{ - "proxy": { - "enabled": true, - "host": "127.0.0.1", - "port": 8045, - "scheduling_mode": "CacheFirst", - "request_timeout": 60 - }, - "quota_protection": { - "enabled": true, - "threshold_percentage": 10, - "monitored_models": [ - "gemini-3-flash", - "claude-sonnet-4-5", - "gemini-3-pro-high", - "gemini-3-pro-image" - ] - }, - "circuit_breaker": { - "enabled": true, - "error_threshold": 5, - "timeout_seconds": 300 - } -} -``` - -### A.2 账号文件示例 (accounts/xxx.json) - -```json -{ - "id": "abc123", - "email": "user@gmail.com", - "name": "User Name", - "token": { - "access_token": "ya29.xxx", - "refresh_token": "1//xxx", - "expires_in": 3599, - "expiry_timestamp": 1706000000, - "token_type": "Bearer", - "project_id": "bamboo-precept-lgxtn" - }, - "quota": { - "models": [ - { - "name": "gemini-3-flash", - "percentage": 85, - "reset_time": "2024-01-15T00:00:00Z" - }, - { - "name": "claude-sonnet-4-5", - "percentage": 5, - "reset_time": "2024-01-15T00:00:00Z" - } - ], - "subscription_tier": "PRO", - "last_updated": 1706000000, - "is_forbidden": false - }, - "disabled": false, - "proxy_disabled": false, - "protected_models": ["claude-sonnet-4-5"], - "created_at": 1705000000, - "last_used": 1706000000 -} -``` - ---- - -**文档版本**: 1.0 -**最后更新**: 2026-01-30 -**作者**: Antigravity Analysis diff --git a/Casks/antigravity-tools.rb b/Casks/antigravity-tools.rb index cff7adfbf..7d317b384 100644 --- a/Casks/antigravity-tools.rb +++ b/Casks/antigravity-tools.rb @@ -1,5 +1,5 @@ cask "antigravity-tools" do - version "4.1.8" + version "4.0.6" sha256 :no_check name "Antigravity Tools" diff --git a/IP_MONITORING_MIGRATION_ARCH.md b/IP_MONITORING_MIGRATION_ARCH.md deleted file mode 100644 index 8d61b5f3a..000000000 --- a/IP_MONITORING_MIGRATION_ARCH.md +++ /dev/null @@ -1,1033 +0,0 @@ -# IP 监控功能移植架构文档 - -## 项目概述 - -**源项目**: Antigravity-Manager (antigraviryManager) -**目标项目**: Antigravity-Tools (antigracitytools) -**移植功能**: IP 访问监控、黑白名单管理、流量统计 -**创建时间**: 2026-01-30 - ---- - -## 1. 功能概述 - -### 1.1 核心功能 - -IP 监控系统是 Antigravity-Manager 中的安全监控模块,提供以下核心能力: - -1. **IP 访问日志记录** - - 记录所有客户端 IP 访问信息 - - 包含请求方法、路径、User-Agent、状态码、耗时等 - - 支持分页查询和多维度过滤 - -2. **IP 黑名单管理** - - 精确 IP 匹配 - - CIDR 网段匹配 (支持 /8, /16, /24) - - 临时封禁(可设置过期时间) - - 命中计数统计 - - 自动清理过期条目 - -3. **IP 白名单管理** - - 精确 IP 匹配 - - CIDR 网段匹配 - - 白名单优先级(跳过黑名单检查) - - 白名单模式(仅允许白名单 IP) - -4. **限流与自动封禁** - - 按 IP 限制每分钟请求数 - - 按 API Key 限制每分钟请求数 - - 连续违规自动封禁 - - 可配置封禁时长 - -5. **统计与分析** - - 总请求数、独立 IP 数、封禁数统计 - - TOP N IP 访问排行 - - IP 访问时间线 - - IP-Token 流量矩阵 - - 支持按小时/天/周聚合 - ---- - -## 2. 架构分析 - -### 2.1 数据库层 (SQLite) - -**文件**: `modules/security_db.rs` - -#### 数据表结构 - -1. **ip_access_logs** - IP 访问日志表 -```sql -CREATE TABLE ip_access_logs ( - id TEXT PRIMARY KEY, - client_ip TEXT NOT NULL, - timestamp INTEGER NOT NULL, - method TEXT, - path TEXT, - user_agent TEXT, - status INTEGER, - duration INTEGER, - api_key_hash TEXT, - blocked INTEGER DEFAULT 0, - block_reason TEXT -) -``` - -2. **ip_blacklist** - IP 黑名单表 -```sql -CREATE TABLE ip_blacklist ( - id TEXT PRIMARY KEY, - ip_pattern TEXT NOT NULL UNIQUE, - reason TEXT, - created_at INTEGER NOT NULL, - expires_at INTEGER, - created_by TEXT DEFAULT 'manual', - hit_count INTEGER DEFAULT 0 -) -``` - -3. **ip_whitelist** - IP 白名单表 -```sql -CREATE TABLE ip_whitelist ( - id TEXT PRIMARY KEY, - ip_pattern TEXT NOT NULL UNIQUE, - description TEXT, - created_at INTEGER NOT NULL -) -``` - -#### 核心索引 -```sql -CREATE INDEX idx_ip_access_ip ON ip_access_logs (client_ip); -CREATE INDEX idx_ip_access_timestamp ON ip_access_logs (timestamp DESC); -CREATE INDEX idx_ip_access_blocked ON ip_access_logs (blocked); -CREATE INDEX idx_blacklist_pattern ON ip_blacklist (ip_pattern); -``` - -#### 核心函数 - -| 函数名 | 功能 | 备注 | -|--------|------|------| -| `init_db()` | 初始化数据库 | 创建表和索引 | -| `save_ip_access_log()` | 保存访问日志 | - | -| `get_ip_access_logs()` | 查询访问日志 | 支持分页、IP过滤、封禁过滤 | -| `get_ip_stats()` | 获取统计概览 | 总请求、独立IP、封禁数等 | -| `get_top_ips()` | 获取TOP IP排行 | 指定时间范围 | -| `cleanup_old_ip_logs()` | 清理旧日志 | 按天数清理 | -| `add_to_blacklist()` | 添加黑名单 | 支持过期时间 | -| `remove_from_blacklist()` | 移除黑名单 | - | -| `get_blacklist()` | 获取黑名单列表 | - | -| `is_ip_in_blacklist()` | 检查IP是否被封禁 | 支持CIDR匹配 | -| `add_to_whitelist()` | 添加白名单 | - | -| `remove_from_whitelist()` | 移除白名单 | - | -| `get_whitelist()` | 获取白名单列表 | - | -| `is_ip_in_whitelist()` | 检查IP是否在白名单 | 支持CIDR匹配 | -| `cidr_match()` | CIDR 网段匹配 | 支持 /8, /16, /24 | - ---- - -### 2.2 监控层 - -**文件**: `proxy/monitor.rs` - -#### 核心数据结构 - -```rust -pub struct ProxyRequestLog { - pub id: String, - pub timestamp: i64, - pub method: String, - pub url: String, - pub status: u16, - pub duration: u64, - pub model: Option, - pub mapped_model: Option, - pub account_email: Option, - pub client_ip: Option, // ⭐ 客户端 IP - pub error: Option, - pub request_body: Option, - pub response_body: Option, - pub input_tokens: Option, - pub output_tokens: Option, - pub protocol: Option, -} - -pub struct ProxyMonitor { - pub logs: RwLock>, - pub stats: RwLock, - pub max_logs: usize, - pub enabled: AtomicBool, - app_handle: Option, -} -``` - -#### 核心方法 - -| 方法 | 功能 | -|------|------| -| `log_request()` | 记录请求日志(包含 IP) | -| `get_logs()` | 获取日志(优先从DB) | -| `get_stats()` | 获取统计数据 | -| `get_logs_filtered()` | 过滤查询日志 | -| `clear()` | 清空日志 | - ---- - -### 2.3 中间件层 - -**文件**: `proxy/middleware/monitor.rs` - -#### 核心功能 - -1. **IP 提取逻辑** -```rust -// 从请求头提取客户端 IP -let client_ip = request.headers() - .get("x-forwarded-for") - .and_then(|v| v.to_str().ok()) - .map(|s| s.split(',').next().unwrap_or(s).trim().to_string()) - .or_else(|| { - request.headers() - .get("x-real-ip") - .and_then(|v| v.to_str().ok()) - .map(|s| s.to_string()) - }); -``` - -2. **请求/响应拦截** - - 捕获请求体(用于提取模型等信息) - - 捕获响应体(用于提取 token 用量) - - 流式响应特殊处理(SSE) - - 记录完整请求链路 - ---- - -### 2.4 配置层 - -**文件**: `proxy/config.rs` - -#### 配置结构 - -```rust -pub struct SecurityMonitorConfig { - pub enabled: bool, // 是否启用 IP 监控 - pub rate_limit: InboundRateLimitConfig, // 限流配置 - pub blacklist: IpBlacklistConfig, // 黑名单配置 - pub whitelist: IpWhitelistConfig, // 白名单配置 - pub log_retention_days: i64, // 日志保留天数 -} - -pub struct InboundRateLimitConfig { - pub enabled: bool, - pub requests_per_minute: u32, // 每IP每分钟最大请求数 - pub requests_per_minute_per_key: u32, // 每Key每分钟最大请求数 - pub auto_ban_threshold: u32, // 自动封禁阈值 - pub auto_ban_duration: u64, // 自动封禁时长(秒) -} - -pub struct IpBlacklistConfig { - pub enabled: bool, - pub block_message: String, -} - -pub struct IpWhitelistConfig { - pub enabled: bool, // 仅允许白名单IP - pub whitelist_priority: bool, // 白名单优先跳过黑名单 -} -``` - ---- - -### 2.5 统计分析层 - -**文件**: `modules/traffic_stats.rs` - -#### 核心功能 - -1. **IP 流量统计** - ```rust - pub struct IpTrafficStats { - pub client_ip: String, - pub request_count: u64, - pub total_input_tokens: u64, - pub total_output_tokens: u64, - pub unique_tokens: u64, // 使用的token数 - pub first_seen: i64, - pub last_seen: i64, - } - ``` - -2. **IP 时间线** - ```rust - pub struct IpTimelinePoint { - pub hour: String, - pub request_count: u64, - pub input_tokens: u64, - pub output_tokens: u64, - } - ``` - -3. **IP-Token 关联矩阵** - ```rust - pub struct IpTokenPair { - pub client_ip: String, - pub api_key_hash: String, - pub request_count: u64, - pub total_tokens: u64, - } - ``` - ---- - -### 2.6 命令层 (Tauri Commands) - -**文件**: `commands/mod.rs` (需要扩展安全相关命令) - -虽然当前代码中未明确看到完整的安全命令,但从功能推断应包含: - -```rust -// IP 访问日志 -#[tauri::command] -pub async fn get_ip_access_logs(...) -> Result, String> - -#[tauri::command] -pub async fn get_ip_stats() -> Result - -#[tauri::command] -pub async fn get_top_ips(...) -> Result, String> - -// 黑名单管理 -#[tauri::command] -pub async fn add_to_blacklist(...) -> Result - -#[tauri::command] -pub async fn remove_from_blacklist(...) -> Result<(), String> - -#[tauri::command] -pub async fn get_blacklist() -> Result, String> - -// 白名单管理 -#[tauri::command] -pub async fn add_to_whitelist(...) -> Result - -#[tauri::command] -pub async fn remove_from_whitelist(...) -> Result<(), String> - -#[tauri::command] -pub async fn get_whitelist() -> Result, String> - -// 流量统计 -#[tauri::command] -pub async fn get_traffic_by_ip(...) -> Result, String> - -#[tauri::command] -pub async fn get_ip_timeline(...) -> Result, String> - -#[tauri::command] -pub async fn get_ip_token_matrix(...) -> Result, String> -``` - ---- - -## 3. 移植方案 - -### 3.1 目标项目结构分析 - -**antigracitytools** 项目结构: -``` -src-tauri/ -├── src/ -│ ├── commands/ # Tauri 命令层 -│ ├── constants.rs # 常量定义 -│ ├── error.rs # 错误处理 -│ ├── lib.rs # 入口 -│ ├── main.rs -│ ├── models/ # 数据模型 -│ ├── modules/ # 业务模块 -│ ├── proxy/ # 代理相关 -│ └── utils/ # 工具函数 -``` - -### 3.2 移植步骤 - -#### Phase 1: 数据库层移植 (核心基础) - -**优先级**: 🔴 最高 - -1. **创建安全数据库模块** - ``` - src-tauri/src/modules/security_db.rs - ``` - -2. **直接复制核心代码** - - 从 `antigraviryManager/src-tauri/src/modules/security_db.rs` 复制 - - 保留所有数据表结构 - - 保留所有核心函数 - - 保留 CIDR 匹配逻辑 - -3. **适配数据库路径** - ```rust - pub fn get_security_db_path() -> Result { - // 适配 antigracitytools 的数据目录结构 - let data_dir = crate::modules::account::get_data_dir()?; - Ok(data_dir.join("security.db")) - } - ``` - -4. **初始化数据库** - - 在 `lib.rs` 或主入口调用 `security_db::init_db()` - - 确保应用启动时创建数据库 - ---- - -#### Phase 2: 监控层移植 - -**优先级**: 🔴 最高 - -1. **创建监控模块** - ``` - src-tauri/src/proxy/monitor.rs - ``` - -2. **复制核心数据结构** - - `ProxyRequestLog` (确保包含 `client_ip` 字段) - - `ProxyMonitor` - - `ProxyStats` - -3. **实现监控逻辑** - - 复制 `log_request()` 方法 - - 复制查询方法 - - 适配 Tauri 事件发送(如果需要) - ---- - -#### Phase 3: 中间件层移植 - -**优先级**: 🟡 高 - -1. **创建监控中间件** - ``` - src-tauri/src/proxy/middleware/monitor.rs - ``` - -2. **IP 提取逻辑** - - 复制 IP 提取代码 - - 确保支持 `X-Forwarded-For` 和 `X-Real-IP` - -3. **请求拦截** - - 捕获请求体(用于模型识别) - - 捕获响应体(用于 token 统计) - - 流式响应处理 - -4. **集成到 Axum 路由** - ```rust - use axum::middleware; - - let app = Router::new() - .route("/v1/chat/completions", post(handler)) - .layer(middleware::from_fn_with_state( - state.clone(), - monitor_middleware - )); - ``` - ---- - -#### Phase 4: 配置层移植 - -**优先级**: 🟡 高 - -1. **更新配置结构** - - 在 `proxy/config.rs` 中添加: - ```rust - pub struct ProxyConfig { - // ... 现有字段 ... - - #[serde(default)] - pub security_monitor: SecurityMonitorConfig, - } - ``` - -2. **添加安全配置结构** - - `SecurityMonitorConfig` - - `InboundRateLimitConfig` - - `IpBlacklistConfig` - - `IpWhitelistConfig` - -3. **默认配置** - ```rust - impl Default for SecurityMonitorConfig { - fn default() -> Self { - Self { - enabled: true, - rate_limit: InboundRateLimitConfig::default(), - blacklist: IpBlacklistConfig::default(), - whitelist: IpWhitelistConfig::default(), - log_retention_days: 30, - } - } - } - ``` - ---- - -#### Phase 5: 统计分析层移植 - -**优先级**: 🟢 中 - -1. **创建流量统计模块** - ``` - src-tauri/src/modules/traffic_stats.rs - ``` - -2. **复制统计功能** - - IP 流量统计 - - Token 流量统计 - - IP 时间线 - - IP-Token 矩阵 - - 流量概览 - -3. **依赖 security_db** - - 确保从 `security_db` 读取数据 - - 实现聚合查询 - ---- - -#### Phase 6: 命令层移植 (Tauri Commands) - -**优先级**: 🟢 中 - -1. **创建或扩展安全命令模块** - ``` - src-tauri/src/commands/security.rs - ``` - -2. **实现 Tauri 命令** - ```rust - // IP 日志 - #[tauri::command] - pub async fn get_ip_access_logs( - limit: usize, - offset: usize, - ip_filter: Option, - blocked_only: bool, - ) -> Result, String> { - crate::modules::security_db::get_ip_access_logs( - limit, - offset, - ip_filter.as_deref(), - blocked_only - ) - } - - // 黑名单 - #[tauri::command] - pub async fn add_to_blacklist( - ip_pattern: String, - reason: Option, - expires_at: Option, - ) -> Result { - crate::modules::security_db::add_to_blacklist( - &ip_pattern, - reason.as_deref(), - expires_at, - "manual" - ) - } - - // ... 其他命令 ... - ``` - -3. **注册命令到 Tauri** - - 在 `lib.rs` 中: - ```rust - .invoke_handler(tauri::generate_handler![ - // ... 现有命令 ... - - // 安全相关命令 - commands::security::get_ip_access_logs, - commands::security::get_ip_stats, - commands::security::get_top_ips, - commands::security::add_to_blacklist, - commands::security::remove_from_blacklist, - commands::security::get_blacklist, - commands::security::add_to_whitelist, - commands::security::remove_from_whitelist, - commands::security::get_whitelist, - commands::security::get_traffic_by_ip, - commands::security::get_ip_timeline, - commands::security::get_ip_token_matrix, - ]) - ``` - ---- - -#### Phase 7: 限流与自动封禁 (可选) - -**优先级**: 🔵 低 - -1. **创建限流中间件** - ``` - src-tauri/src/proxy/middleware/rate_limit.rs - ``` - -2. **实现限流逻辑** - - 基于内存的滑动窗口计数器 - - 按 IP 限流 - - 按 API Key 限流 - - 集成到 Axum 中间件链 - -3. **自动封禁** - - 检测连续违规 - - 自动添加到黑名单 - - 触发封禁事件 - ---- - -#### Phase 8: 自动清理任务 - -**优先级**: 🔵 低 - -1. **后台清理任务** - ```rust - use tokio::time::{interval, Duration}; - - tokio::spawn(async { - let mut interval = interval(Duration::from_secs(3600 * 24)); // 每天 - loop { - interval.tick().await; - if let Err(e) = security_db::cleanup_old_ip_logs(30) { - tracing::error!("Failed to cleanup old IP logs: {}", e); - } - } - }); - ``` - -2. **启动时自动清理** - - 在 `ProxyMonitor::new()` 中触发一次清理 - ---- - -### 3.3 依赖项检查 - -确保 `Cargo.toml` 包含以下依赖: - -```toml -[dependencies] -# 数据库 -rusqlite = { version = "0.32", features = ["bundled"] } - -# 异步运行时 -tokio = { version = "1", features = ["full"] } - -# HTTP框架 (如果还没有) -axum = "0.7" -tower = "0.4" - -# 序列化 -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" - -# UUID -uuid = { version = "1.0", features = ["v4", "serde"] } - -# 时间 -chrono = "0.4" - -# 日志 -tracing = "0.1" - -# Tauri -tauri = { version = "2", features = [...] } -``` - ---- - -### 3.4 数据流图 - -``` -┌─────────────┐ -│ Client │ -└──────┬──────┘ - │ HTTP Request - ▼ -┌─────────────────────────────────┐ -│ Axum Middleware Chain │ -├─────────────────────────────────┤ -│ 1. monitor_middleware │◄───── Extract IP -│ ├── Extract client_ip │ Extract request body -│ ├── Extract request body │ Extract response body -│ └── Measure duration │ Calculate tokens -└──────┬──────────────────────────┘ - │ Log Entry - ▼ -┌─────────────────────────────────┐ -│ ProxyMonitor │ -├─────────────────────────────────┤ -│ - Validate IP whitelist │ -│ - Check IP blacklist │ -│ - Check rate limit │ -│ - Log to memory (VecDeque) │ -│ - Save to DB (async) │ -│ - Emit Tauri event │ -└──────┬──────────────────────────┘ - │ - ▼ -┌─────────────────────────────────┐ -│ security_db (SQLite) │ -├─────────────────────────────────┤ -│ - ip_access_logs │ -│ - ip_blacklist │ -│ - ip_whitelist │ -└─────────────────────────────────┘ - │ Query - ▼ -┌─────────────────────────────────┐ -│ Traffic Stats Module │ -├─────────────────────────────────┤ -│ - Aggregate by IP │ -│ - Aggregate by Token │ -│ - Generate timeline │ -│ - Generate matrix │ -└─────────────────────────────────┘ - │ Tauri Command - ▼ -┌─────────────────────────────────┐ -│ Frontend (React/Vue) │ -└─────────────────────────────────┘ -``` - ---- - -## 4. 关键技术点 - -### 4.1 IP 提取优先级 - -``` -1. X-Forwarded-For (取第一个IP,逗号分隔) -2. X-Real-IP -3. Connection remote address (作为兜底) -``` - -### 4.2 CIDR 匹配算法 - -```rust -fn cidr_match(ip: &str, cidr: &str) -> bool { - let parts: Vec<&str> = cidr.split('/').collect(); - let network = parts[0]; - let prefix_len: u8 = parts[1].parse().unwrap_or(32); - - let ip_u32 = ip_to_u32(ip); - let net_u32 = ip_to_u32(network); - - let mask = !0u32 << (32 - prefix_len); - - (ip_u32 & mask) == (net_u32 & mask) -} -``` - -**支持的 CIDR**: -- `/8` - A类网段 (16,777,216 个IP) -- `/16` - B类网段 (65,536 个IP) -- `/24` - C类网段 (256 个IP) -- `/32` - 单个IP - -### 4.3 数据库性能优化 - -1. **WAL 模式** - 提升并发读写性能 - ```rust - conn.pragma_update(None, "journal_mode", "WAL")?; - ``` - -2. **索引策略** - - `client_ip` 索引:快速查询特定IP - - `timestamp` 索引:时间范围查询 - - `blocked` 索引:快速筛选封禁记录 - -3. **定期 VACUUM** - 回收磁盘空间 - ```rust - conn.execute("VACUUM", [])?; - ``` - -### 4.4 内存管理 - -- **VecDeque** 作为内存缓存(最大容量限制) -- **SQLite** 作为持久化存储 -- 优先从数据库查询(确保数据完整性) - ---- - -## 5. 测试计划 - -### 5.1 单元测试 - -```rust -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_cidr_match_24() { - assert!(cidr_match("192.168.1.100", "192.168.1.0/24")); - assert!(!cidr_match("192.168.2.100", "192.168.1.0/24")); - } - - #[test] - fn test_ip_blacklist() { - init_db().unwrap(); - add_to_blacklist("1.2.3.4", Some("test"), None, "test").unwrap(); - assert!(is_ip_in_blacklist("1.2.3.4").unwrap()); - assert!(!is_ip_in_blacklist("1.2.3.5").unwrap()); - } -} -``` - -### 5.2 集成测试 - -1. **IP 提取测试** - - 测试 `X-Forwarded-For` 多IP场景 - - 测试 `X-Real-IP` 场景 - - 测试无Header场景 - -2. **黑名单测试** - - 精确匹配 - - CIDR 匹配 - - 过期清理 - -3. **限流测试** - - 正常请求 - - 超限请求 - - 自动封禁 - ---- - -## 6. 前端集成 (可选) - -如果需要前端界面展示,可以参考 Antigravity-Manager 的实现: - -### 6.1 页面结构 - -``` -Security Dashboard -├── IP Access Logs -│ ├── Real-time logs -│ ├── Search & Filter -│ └── Export -├── IP Blacklist -│ ├── Add/Remove -│ ├── CIDR support -│ └── Expiration management -├── IP Whitelist -│ ├── Add/Remove -│ └── CIDR support -├── Statistics -│ ├── Overview cards -│ ├── TOP IP ranking -│ ├── Timeline chart -│ └── IP-Token matrix -└── Settings - ├── Enable/Disable monitoring - ├── Rate limit config - └── Log retention -``` - -### 6.2 Tauri 事件订阅 - -```typescript -import { listen } from '@tauri-apps/api/event'; - -// 监听实时请求日志 -await listen('proxy://request', (event) => { - const log = event.payload as ProxyRequestLog; - console.log('New request from IP:', log.client_ip); -}); -``` - ---- - -## 7. 迁移检查清单 - -### Phase 1: 数据库层 ✅ -- [ ] 创建 `modules/security_db.rs` -- [ ] 复制数据表结构 -- [ ] 复制核心函数 -- [ ] 适配数据库路径 -- [ ] 初始化数据库 -- [ ] 单元测试 - -### Phase 2: 监控层 ✅ -- [ ] 创建 `proxy/monitor.rs` -- [ ] 复制 `ProxyRequestLog` -- [ ] 复制 `ProxyMonitor` -- [ ] 实现日志记录 -- [ ] 实现查询方法 - -### Phase 3: 中间件层 ✅ -- [ ] 创建 `proxy/middleware/monitor.rs` -- [ ] IP 提取逻辑 -- [ ] 请求拦截 -- [ ] 响应拦截 -- [ ] 集成到 Axum - -### Phase 4: 配置层 ✅ -- [ ] 更新 `proxy/config.rs` -- [ ] 添加安全配置结构 -- [ ] 设置默认值 -- [ ] 配置热更新 - -### Phase 5: 统计分析层 ✅ -- [ ] 创建 `modules/traffic_stats.rs` -- [ ] IP 流量统计 -- [ ] Token 流量统计 -- [ ] 时间线生成 -- [ ] 矩阵生成 - -### Phase 6: 命令层 ✅ -- [ ] 创建 `commands/security.rs` -- [ ] 实现所有 Tauri 命令 -- [ ] 注册到 Tauri -- [ ] API 文档 - -### Phase 7: 限流 (可选) ⭕ -- [ ] 创建限流中间件 -- [ ] 滑动窗口实现 -- [ ] 自动封禁 - -### Phase 8: 清理任务 (可选) ⭕ -- [ ] 后台清理任务 -- [ ] 启动时清理 - -### 测试与验证 ✅ -- [ ] 单元测试 -- [ ] 集成测试 -- [ ] 性能测试 -- [ ] 压力测试 - ---- - -## 8. 潜在风险与注意事项 - -### 8.1 性能风险 - -| 风险点 | 影响 | 缓解方案 | -|--------|------|----------| -| 高频写入 | SQLite 锁竞争 | WAL模式 + 批量写入 | -| 日志膨胀 | 磁盘占用 | 定期清理 + VACUUM | -| CIDR 匹配 | CPU 开销 | 缓存匹配结果 | -| 流式响应缓冲 | 内存占用 | 限制缓冲大小 | - -### 8.2 兼容性风险 - -1. **数据库路径差异** - - antigracitytools 可能使用不同的数据目录 - - 需要适配 `get_data_dir()` 函数 - -2. **Axum 版本差异** - - 检查 Axum 版本兼容性 - - 中间件 API 可能有变化 - -3. **Tauri 版本差异** - - 事件系统 API 差异 - - 命令注册方式差异 - -### 8.3 安全风险 - -1. **IP 伪造** - - `X-Forwarded-For` 可被伪造 - - 建议:仅在受信任的反向代理后使用 - -2. **CIDR 覆盖** - - 过大的网段可能误封 - - 建议:限制最大网段为 /16 - -3. **日志敏感信息** - - API Key 应 Hash 存储 - - 请求体可能包含敏感信息 - - 建议:脱敏处理 - ---- - -## 9. 后续优化方向 - -### 9.1 功能增强 - -- [ ] GeoIP 地理位置识别 -- [ ] IP 信誉评分集成 -- [ ] 异常行为检测(ML) -- [ ] Webhook 告警 -- [ ] 多级限流策略 - -### 9.2 性能优化 - -- [ ] Redis 缓存层(替代内存VecDeque) -- [ ] PostgreSQL 支持(大规模场景) -- [ ] 日志分片存储 -- [ ] 异步批量写入 - -### 9.3 可观测性 - -- [ ] Prometheus 指标导出 -- [ ] Grafana Dashboard -- [ ] 实时告警 -- [ ] 审计日志 - ---- - -## 10. 参考资料 - -### 10.1 源代码文件列表 - -| 文件路径 | 说明 | -|---------|------| -| `antigraviryManager/src-tauri/src/modules/security_db.rs` | 数据库层核心实现 | -| `antigraviryManager/src-tauri/src/proxy/monitor.rs` | 监控层核心实现 | -| `antigraviryManager/src-tauri/src/proxy/middleware/monitor.rs` | 中间件层实现 | -| `antigraviryManager/src-tauri/src/proxy/config.rs` | 配置定义 | -| `antigraviryManager/src-tauri/src/modules/traffic_stats.rs` | 流量统计实现 | - -### 10.2 关键概念 - -- **WAL (Write-Ahead Logging)**: SQLite 并发优化模式 -- **CIDR (Classless Inter-Domain Routing)**: 无类别域间路由 -- **VecDeque**: Rust 双端队列,高效的环形缓冲区 -- **Axum Middleware**: 基于 Tower 的中间件抽象 -- **Tauri Command**: Rust 后端暴露给前端的 API - ---- - -## 11. 总结 - -IP 监控功能是一个完整的安全监控子系统,包含: - -1. **数据层**: SQLite 持久化存储 -2. **业务层**: 黑白名单、限流、统计 -3. **接入层**: Axum 中间件拦截 -4. **展示层**: Tauri 命令暴露 - -**核心优势**: -- 轻量级(SQLite) -- 高性能(WAL + 索引) -- 功能完整(日志、黑白名单、限流、统计) -- 易扩展(模块化设计) - -**移植重点**: -1. 保持数据库结构一致性 -2. 适配目标项目的数据目录 -3. 确保中间件正确集成到 Axum -4. 实现完整的 Tauri 命令 - -**时间估算**: -- Phase 1-3 (核心功能): 2-3 天 -- Phase 4-6 (配置与命令): 1-2 天 -- Phase 7-8 (可选功能): 1-2 天 -- 测试与调优: 1-2 天 - -**总计**: 约 5-9 个工作日 - ---- - -**文档版本**: v1.0 -**最后更新**: 2026-01-30 -**作者**: Antigravity 开发团队 diff --git a/IP_MONITORING_OPTIMIZATION_PLAN.md b/IP_MONITORING_OPTIMIZATION_PLAN.md deleted file mode 100644 index f0a291706..000000000 --- a/IP_MONITORING_OPTIMIZATION_PLAN.md +++ /dev/null @@ -1,1257 +0,0 @@ -# IP 监控功能优化方案 - -## 文档信息 -- **创建时间**: 2026-01-30 -- **版本**: v1.0 -- **基于**: IP_MONITORING_MIGRATION_ARCH.md -- **优化层级**: 性能、功能、安全、可扩展性、用户体验 - ---- - -## 优化总览 - -> 当前实现已经是一个**功能完整、设计合理**的监控系统,但在**高并发、大规模、智能化**场景下仍有提升空间。 - -### 优化维度评分 - -| 维度 | 当前评分 | 优化后评分 | 优先级 | -|------|---------|-----------|--------| -| **性能** | 7/10 | 9/10 | 🔴 高 | -| **功能完整性** | 8/10 | 10/10 | 🟡 中 | -| **安全性** | 7/10 | 9/10 | 🔴 高 | -| **可扩展性** | 6/10 | 9/10 | 🟡 中 | -| **用户体验** | 7/10 | 9/10 | 🟢 低 | - ---- - -## 1. 性能优化 (🔴 高优先级) - -### 1.1 数据库层优化 - -#### 问题诊断 -``` -❌ 当前问题: -- 每次请求都写入 SQLite(磁盘I/O瓶颈) -- CIDR 匹配需要全表扫描黑名单 -- 大量日志查询时性能下降 -- WAL 模式下仍可能有锁竞争 -``` - -#### 优化方案 A: 批量写入队列 - -**原理**: 使用内存队列缓冲日志,定期批量写入数据库 - -```rust -use tokio::sync::mpsc; -use std::time::Duration; - -pub struct BatchLogger { - tx: mpsc::UnboundedSender, -} - -impl BatchLogger { - pub fn new() -> Self { - let (tx, mut rx) = mpsc::unbounded_channel::(); - - // 后台批量写入任务 - tokio::spawn(async move { - let mut buffer = Vec::with_capacity(100); - let mut interval = tokio::time::interval(Duration::from_secs(5)); - - loop { - tokio::select! { - // 定期刷新 - _ = interval.tick() => { - if !buffer.is_empty() { - Self::flush_batch(&buffer).await; - buffer.clear(); - } - } - // 接收新日志 - Some(log) = rx.recv() => { - buffer.push(log); - // 缓冲区满立即刷新 - if buffer.len() >= 100 { - Self::flush_batch(&buffer).await; - buffer.clear(); - } - } - } - } - }); - - Self { tx } - } - - pub fn log(&self, entry: IpAccessLog) { - let _ = self.tx.send(entry); - } - - async fn flush_batch(logs: &[IpAccessLog]) { - if let Err(e) = security_db::batch_insert_logs(logs).await { - tracing::error!("Batch insert failed: {}", e); - } - } -} - -// 数据库批量插入 -pub fn batch_insert_logs(logs: &[IpAccessLog]) -> Result<(), String> { - let conn = connect_db()?; - let tx = conn.transaction().map_err(|e| e.to_string())?; - - { - let mut stmt = tx.prepare( - "INSERT INTO ip_access_logs (...) VALUES (?, ?, ...)" - ).map_err(|e| e.to_string())?; - - for log in logs { - stmt.execute(params![ - log.id, log.client_ip, log.timestamp, ... - ]).map_err(|e| e.to_string())?; - } - } - - tx.commit().map_err(|e| e.to_string())?; - Ok(()) -} -``` - -**性能提升**: -- 写入吞吐量: **100倍** (1000 req/s → 100,000 req/s) -- 磁盘 I/O: 减少 **95%** - ---- - -#### 优化方案 B: Redis 缓存层 - -**架构**: -``` -Request → Monitor → Redis (hot data) → SQLite (cold data) - ↓ TTL=1h - Async flush -``` - -**实现**: -```rust -use redis::AsyncCommands; - -pub struct RedisCache { - client: redis::Client, -} - -impl RedisCache { - // 缓存热点 IP - pub async fn cache_ip_stats(&self, ip: &str, stats: &IpStats) -> Result<(), String> { - let mut conn = self.client.get_async_connection().await - .map_err(|e| e.to_string())?; - - let key = format!("ip:stats:{}", ip); - let value = serde_json::to_string(stats).unwrap(); - - conn.set_ex(key, value, 3600).await // 1小时过期 - .map_err(|e| e.to_string())?; - - Ok(()) - } - - // 缓存黑名单(避免频繁数据库查询) - pub async fn is_blacklisted(&self, ip: &str) -> Result, String> { - let mut conn = self.client.get_async_connection().await - .map_err(|e| e.to_string())?; - - let key = format!("blacklist:{}", ip); - conn.get(key).await.map_err(|e| e.to_string()) - } - - // 滑动窗口限流(高性能) - pub async fn check_rate_limit(&self, ip: &str, limit: u32, window: u64) -> Result { - let mut conn = self.client.get_async_connection().await - .map_err(|e| e.to_string())?; - - let key = format!("rate:{}", ip); - let now = chrono::Utc::now().timestamp(); - - // 使用 Redis Sorted Set 实现滑动窗口 - redis::pipe() - .atomic() - .zrembyscore(&key, 0, now - window as i64) // 移除过期 - .zadd(&key, now, now) // 添加当前时间戳 - .zcard(&key) // 获取计数 - .expire(&key, window as usize) // 设置过期 - .query_async(&mut conn) - .await - .map_err(|e| e.to_string()) - .map(|count: u32| count <= limit) - } -} -``` - -**性能提升**: -- 黑名单查询: **1ms** → **0.1ms** (10倍) -- 限流判断: **O(n)** → **O(log n)** -- 支持分布式部署 - -**成本**: 需要额外的 Redis 服务 - ---- - -#### 优化方案 C: 分区表 (时间分区) - -**原理**: 按月/周分区存储日志,提升老数据查询性能 - -```sql --- 主表(虚拟表) -CREATE VIEW ip_access_logs AS - SELECT * FROM ip_access_logs_2026_01 - UNION ALL - SELECT * FROM ip_access_logs_2026_02 - ...; - --- 分区表 -CREATE TABLE ip_access_logs_2026_01 ( - -- 同主表结构 - CHECK (timestamp >= 1704067200 AND timestamp < 1706745600) -); -``` - -**优点**: -- 快速删除老数据(直接 DROP TABLE) -- 查询性能提升(分区剪枝) -- VACUUM 耗时减少 - ---- - -### 1.2 CIDR 匹配优化 - -#### 问题 -```rust -// 当前实现:每次都要遍历所有黑名单 CIDR 规则 -fn is_ip_in_blacklist(ip: &str) -> Result { - let entries = get_blacklist()?; // 😱 全表扫描 - for entry in entries { - if entry.ip_pattern.contains('/') { - if cidr_match(ip, &entry.ip_pattern) { - return Ok(true); - } - } - } - Ok(false) -} -``` - -#### 优化:IP Trie (前缀树) - -```rust -use std::net::Ipv4Addr; - -#[derive(Default)] -pub struct IpTrie { - children: [Option>; 2], // 0 和 1 两个子节点 - is_blocked: bool, -} - -impl IpTrie { - // 插入 CIDR 规则 - pub fn insert_cidr(&mut self, cidr: &str) { - let (net, prefix_len) = parse_cidr(cidr); - let mut node = self; - - for i in 0..prefix_len { - let bit = ((net >> (31 - i)) & 1) as usize; - node = node.children[bit].get_or_insert_with(Default::default); - } - - node.is_blocked = true; - } - - // 查询 IP 是否被封禁(O(32) = O(1)) - pub fn is_blocked(&self, ip: &Ipv4Addr) -> bool { - let ip_u32 = u32::from(*ip); - let mut node = self; - - for i in 0..32 { - if node.is_blocked { - return true; // 前缀匹配 - } - - let bit = ((ip_u32 >> (31 - i)) & 1) as usize; - match &node.children[bit] { - Some(child) => node = child, - None => return false, - } - } - - node.is_blocked - } -} - -// 全局缓存 -lazy_static! { - static ref BLACKLIST_TRIE: RwLock = RwLock::new(IpTrie::default()); -} - -// 启动时加载 + 热更新 -pub async fn reload_blacklist_trie() -> Result<(), String> { - let entries = get_blacklist()?; - let mut trie = IpTrie::default(); - - for entry in entries { - if entry.ip_pattern.contains('/') { - trie.insert_cidr(&entry.ip_pattern); - } - } - - *BLACKLIST_TRIE.write().unwrap() = trie; - Ok(()) -} -``` - -**性能提升**: -- 查询时间: **O(n)** → **O(1)** (n=黑名单规则数) -- 内存占用: 可接受(每条规则 < 1KB) - ---- - -### 1.3 查询优化:物化视图 - -**问题**: 频繁计算相同的统计数据(如 TOP IP、每日请求数) - -**解决**: 使用物化视图 + 定时刷新 - -```sql --- 每小时 TOP 100 IP(物化表) -CREATE TABLE ip_stats_hourly ( - hour TEXT PRIMARY KEY, - top_ips TEXT, -- JSON: [{ip, count, tokens}, ...] - total_requests INTEGER, - unique_ips INTEGER, - updated_at INTEGER -); - --- 定时任务(每小时触发) -INSERT OR REPLACE INTO ip_stats_hourly (hour, top_ips, ...) -SELECT - strftime('%Y-%m-%d %H:00:00', timestamp, 'unixepoch') as hour, - json_group_array(...) as top_ips, - COUNT(*) as total_requests, - COUNT(DISTINCT client_ip) as unique_ips, - strftime('%s', 'now') as updated_at -FROM ip_access_logs -WHERE timestamp >= ... -GROUP BY hour; -``` - -**性能提升**: -- Dashboard 加载速度: **5秒** → **0.1秒** (50倍) -- 数据库负载: 减少 **80%** - ---- - -## 2. 功能增强 (🟡 中优先级) - -### 2.1 智能威胁检测 - -#### 2.1.1 异常行为检测 - -```rust -pub struct AnomalyDetector { - baseline: HashMap, -} - -#[derive(Clone)] -struct IpBaseline { - avg_req_per_min: f64, - std_dev: f64, - common_paths: HashSet, - common_user_agents: HashSet, -} - -impl AnomalyDetector { - // Z-Score 异常检测 - pub fn detect_anomaly(&self, ip: &str, current_rpm: f64) -> Option { - if let Some(baseline) = self.baseline.get(ip) { - let z_score = (current_rpm - baseline.avg_req_per_min) / baseline.std_dev; - - if z_score > 3.0 { - return Some(AnomalyType::TrafficSpike); - } - } - None - } - - // 检测扫描行为 - pub fn detect_scanning(&self, logs: &[IpAccessLog]) -> bool { - let unique_paths: HashSet<_> = logs.iter() - .filter_map(|l| l.path.as_ref()) - .collect(); - - // 短时间内访问大量不同路径 = 可能是扫描 - if unique_paths.len() > 50 && logs.len() > 100 { - let error_rate = logs.iter() - .filter(|l| l.status >= 400) - .count() as f64 / logs.len() as f64; - - return error_rate > 0.8; // 80% 都是 404/403 = 扫描 - } - - false - } -} - -pub enum AnomalyType { - TrafficSpike, // 流量突增 - Scanning, // 路径扫描 - BruteForce, // 暴力破解 - SuspiciousAgent, // 可疑 User-Agent -} -``` - ---- - -#### 2.1.2 GeoIP 地理位置分析 - -```rust -use maxminddb::{geoip2, MaxMindDBError, Reader}; - -pub struct GeoIpAnalyzer { - reader: Reader>, -} - -impl GeoIpAnalyzer { - pub fn new() -> Result { - let reader = maxminddb::Reader::open_readfile("GeoLite2-City.mmdb") - .map_err(|e| e.to_string())?; - Ok(Self { reader }) - } - - pub fn lookup(&self, ip: &str) -> Result { - let ip_addr: std::net::IpAddr = ip.parse() - .map_err(|e| format!("Invalid IP: {}", e))?; - - let city: geoip2::City = self.reader.lookup(ip_addr) - .map_err(|e| e.to_string())?; - - Ok(GeoInfo { - country: city.country.and_then(|c| c.names) - .and_then(|n| n.get("en")) - .map(|s| s.to_string()), - city: city.city.and_then(|c| c.names) - .and_then(|n| n.get("en")) - .map(|s| s.to_string()), - latitude: city.location.as_ref().and_then(|l| l.latitude), - longitude: city.location.as_ref().and_then(|l| l.longitude), - }) - } -} - -pub struct GeoInfo { - pub country: Option, - pub city: Option, - pub latitude: Option, - pub longitude: Option, -} -``` - -**应用场景**: -- 地理位置可视化(地图热力图) -- 异地登录告警 -- 地域访问控制(只允许特定国家) - ---- - -#### 2.1.3 IP 信誉评分集成 - -```rust -use reqwest::Client; - -pub struct IpReputationChecker { - client: Client, - api_key: String, -} - -impl IpReputationChecker { - // 查询 AbuseIPDB 信誉评分 - pub async fn check_reputation(&self, ip: &str) -> Result { - let url = format!("https://api.abuseipdb.com/api/v2/check?ipAddress={}", ip); - - let response = self.client - .get(&url) - .header("Key", &self.api_key) - .send() - .await - .map_err(|e| e.to_string())?; - - let data: serde_json::Value = response.json().await - .map_err(|e| e.to_string())?; - - Ok(ReputationScore { - abuse_confidence: data["data"]["abuseConfidenceScore"] - .as_u64() - .unwrap_or(0) as u8, - is_tor: data["data"]["isTor"].as_bool().unwrap_or(false), - is_vpn: data["data"]["usageType"] - .as_str() - .map(|s| s.contains("VPN")) - .unwrap_or(false), - }) - } -} - -pub struct ReputationScore { - pub abuse_confidence: u8, // 0-100 - pub is_tor: bool, - pub is_vpn: bool, -} -``` - -**自动化操作**: -```rust -// 高风险 IP 自动封禁 -if reputation.abuse_confidence > 80 { - add_to_blacklist(ip, Some("High abuse score"), Some(3600 * 24), "auto").await?; -} -``` - ---- - -### 2.2 高级限流策略 - -#### 2.2.1 令牌桶算法(平滑限流) - -```rust -use std::time::{Duration, Instant}; - -pub struct TokenBucket { - capacity: u32, - tokens: f64, - refill_rate: f64, // tokens per second - last_refill: Instant, -} - -impl TokenBucket { - pub fn new(capacity: u32, refill_rate: f64) -> Self { - Self { - capacity, - tokens: capacity as f64, - refill_rate, - last_refill: Instant::now(), - } - } - - pub fn try_consume(&mut self, tokens: u32) -> bool { - self.refill(); - - if self.tokens >= tokens as f64 { - self.tokens -= tokens as f64; - true - } else { - false - } - } - - fn refill(&mut self) { - let now = Instant::now(); - let elapsed = now.duration_since(self.last_refill).as_secs_f64(); - - self.tokens = (self.tokens + elapsed * self.refill_rate) - .min(self.capacity as f64); - - self.last_refill = now; - } -} -``` - -**优点**: -- 允许短暂突发(burst) -- 更平滑的限流体验 - ---- - -#### 2.2.2 分级限流 - -```rust -pub struct TieredRateLimiter { - limits: HashMap, -} - -#[derive(Hash, Eq, PartialEq)] -pub enum IpTier { - Trusted, // 白名单 IP - Normal, // 普通 IP - Suspicious, // 可疑 IP(曾有异常) - Blacklisted, // 黑名单 IP -} - -pub struct RateLimit { - requests_per_minute: u32, - burst_size: u32, -} - -impl TieredRateLimiter { - pub fn get_limit(&self, ip: &str) -> RateLimit { - let tier = self.classify_ip(ip); - self.limits.get(&tier).cloned().unwrap_or_default() - } - - fn classify_ip(&self, ip: &str) -> IpTier { - if is_ip_in_whitelist(ip).unwrap_or(false) { - IpTier::Trusted - } else if is_ip_in_blacklist(ip).unwrap_or(false) { - IpTier::Blacklisted - } else if self.has_anomaly_history(ip) { - IpTier::Suspicious - } else { - IpTier::Normal - } - } -} -``` - ---- - -### 2.3 丰富的数据导出 - -#### 2.3.1 多格式导出 - -```rust -#[tauri::command] -pub async fn export_ip_logs( - format: String, - filter: LogFilter, -) -> Result { - let logs = get_filtered_logs(filter)?; - - match format.as_str() { - "csv" => export_csv(&logs), - "json" => export_json(&logs), - "excel" => export_excel(&logs), - "pdf" => export_pdf_report(&logs), - _ => Err("Unsupported format".to_string()), - } -} - -fn export_csv(logs: &[IpAccessLog]) -> Result { - let mut wtr = csv::Writer::from_writer(vec![]); - - for log in logs { - wtr.serialize(log).map_err(|e| e.to_string())?; - } - - let data = wtr.into_inner().map_err(|e| e.to_string())?; - Ok(String::from_utf8(data).unwrap()) -} -``` - ---- - -### 2.4 告警系统 - -#### 2.4.1 多渠道告警 - -```rust -pub trait AlertChannel: Send + Sync { - async fn send_alert(&self, alert: &Alert) -> Result<(), String>; -} - -pub struct EmailAlertChannel { - smtp_config: SmtpConfig, -} - -impl AlertChannel for EmailAlertChannel { - async fn send_alert(&self, alert: &Alert) -> Result<(), String> { - // 发送邮件 - todo!() - } -} - -pub struct WebhookAlertChannel { - webhook_url: String, -} - -impl AlertChannel for WebhookAlertChannel { - async fn send_alert(&self, alert: &Alert) -> Result<(), String> { - let client = reqwest::Client::new(); - client.post(&self.webhook_url) - .json(alert) - .send() - .await - .map_err(|e| e.to_string())?; - Ok(()) - } -} - -pub struct AlertManager { - channels: Vec>, -} - -impl AlertManager { - pub async fn trigger_alert(&self, alert: Alert) { - for channel in &self.channels { - if let Err(e) = channel.send_alert(&alert).await { - tracing::error!("Alert failed: {}", e); - } - } - } -} - -pub struct Alert { - pub level: AlertLevel, - pub title: String, - pub message: String, - pub metadata: serde_json::Value, -} - -pub enum AlertLevel { - Info, - Warning, - Critical, -} -``` - -**告警场景**: -- 🚨 检测到 DDoS 攻击(流量突增 10倍) -- ⚠️ 检测到扫描行为(404 错误率 > 80%) -- 📧 IP 黑名单命中率过高(需要优化规则) - ---- - -## 3. 安全性增强 (🔴 高优先级) - -### 3.1 IP 伪造防护 - -#### 问题 -```rust -// 当前实现:简单信任 X-Forwarded-For -let client_ip = request.headers() - .get("x-forwarded-for") - .and_then(|v| v.to_str().ok()) - .map(|s| s.split(',').next().unwrap_or(s).trim().to_string()); -``` - -**风险**: 攻击者可以伪造 `X-Forwarded-For: 127.0.0.1` 绕过限流 - -#### 解决方案:受信任代理链验证 - -```rust -pub struct TrustedProxyConfig { - trusted_proxies: Vec, // CIDR 列表 -} - -impl TrustedProxyConfig { - pub fn extract_real_ip(&self, request: &Request) -> Option { - let forwarded_for = request.headers() - .get("x-forwarded-for")? - .to_str().ok()?; - - let ips: Vec<&str> = forwarded_for.split(',') - .map(|s| s.trim()) - .collect(); - - // 从右往左找到第一个不受信任的 IP - for ip in ips.iter().rev() { - if !self.is_trusted_proxy(ip) { - return Some(ip.to_string()); - } - } - - // 全部都是受信任代理,取第一个 - ips.first().map(|s| s.to_string()) - } - - fn is_trusted_proxy(&self, ip: &str) -> bool { - let ip_addr: IpAddr = match ip.parse() { - Ok(addr) => addr, - Err(_) => return false, - }; - - for network in &self.trusted_proxies { - if network.contains(ip_addr) { - return true; - } - } - - false - } -} -``` - -**配置示例**: -```toml -[security_monitor] -trusted_proxies = [ - "10.0.0.0/8", # 内网代理 - "172.16.0.0/12", # 内网代理 - "192.168.0.0/16", # 内网代理 - "1.2.3.4/32", # Cloudflare CDN -] -``` - ---- - -### 3.2 数据脱敏 - -#### 敏感字段加密存储 - -```rust -use aes_gcm::{Aes256Gcm, Key, Nonce}; -use aes_gcm::aead::{Aead, NewAead}; - -pub struct SensitiveDataEncryptor { - cipher: Aes256Gcm, -} - -impl SensitiveDataEncryptor { - pub fn new(key: &[u8; 32]) -> Self { - let cipher = Aes256Gcm::new(Key::from_slice(key)); - Self { cipher } - } - - // 加密 API Key(存储时) - pub fn encrypt_api_key(&self, api_key: &str) -> Result { - let nonce = Nonce::from_slice(b"unique nonce"); - let ciphertext = self.cipher.encrypt(nonce, api_key.as_bytes()) - .map_err(|e| e.to_string())?; - - Ok(base64::encode(ciphertext)) - } - - // 仅存储 Hash(不可逆) - pub fn hash_api_key(&self, api_key: &str) -> String { - use sha2::{Sha256, Digest}; - let mut hasher = Sha256::new(); - hasher.update(api_key.as_bytes()); - format!("{:x}", hasher.finalize()) - } -} -``` - -**存储策略**: -- ✅ `api_key_hash`: 存储 SHA-256(用于匹配) -- ❌ `api_key`: 不存储原文 - ---- - -### 3.3 审计日志 - -```rust -pub struct AuditLog { - pub id: String, - pub timestamp: i64, - pub action: AuditAction, - pub operator: String, // 操作者(API Key / Admin) - pub target: String, // 操作目标(IP、规则ID) - pub old_value: Option, - pub new_value: Option, - pub ip_address: String, // 操作者 IP -} - -pub enum AuditAction { - AddBlacklist, - RemoveBlacklist, - AddWhitelist, - RemoveWhitelist, - UpdateConfig, - ExportData, -} - -// 记录所有敏感操作 -pub async fn add_to_blacklist_with_audit( - ip_pattern: &str, - reason: Option<&str>, - operator: &str, - operator_ip: &str, -) -> Result { - let entry = security_db::add_to_blacklist(ip_pattern, reason, None, operator)?; - - // 记录审计日志 - audit_db::log_action(AuditLog { - id: uuid::Uuid::new_v4().to_string(), - timestamp: chrono::Utc::now().timestamp(), - action: AuditAction::AddBlacklist, - operator: operator.to_string(), - target: ip_pattern.to_string(), - old_value: None, - new_value: Some(serde_json::to_string(&entry).unwrap()), - ip_address: operator_ip.to_string(), - }).await?; - - Ok(entry) -} -``` - ---- - -## 4. 可扩展性优化 (🟡 中优先级) - -### 4.1 插件化架构 - -```rust -pub trait SecurityPlugin: Send + Sync { - fn name(&self) -> &str; - - async fn on_request(&self, ctx: &RequestContext) -> PluginResult; - - async fn on_response(&self, ctx: &ResponseContext) -> PluginResult; -} - -pub struct PluginManager { - plugins: Vec>, -} - -impl PluginManager { - pub async fn execute_pipeline(&self, ctx: &RequestContext) -> Result<(), String> { - for plugin in &self.plugins { - match plugin.on_request(ctx).await { - PluginResult::Allow => continue, - PluginResult::Block(reason) => { - return Err(format!("Blocked by {}: {}", plugin.name(), reason)); - } - PluginResult::Modified(new_ctx) => { - // 允许插件修改上下文 - // ctx = new_ctx; - } - } - } - Ok(()) - } -} - -pub enum PluginResult { - Allow, - Block(String), - Modified(RequestContext), -} - -// 示例插件:Bot 检测 -pub struct BotDetectionPlugin; - -impl SecurityPlugin for BotDetectionPlugin { - fn name(&self) -> &str { - "bot_detection" - } - - async fn on_request(&self, ctx: &RequestContext) -> PluginResult { - if let Some(ua) = &ctx.user_agent { - if ua.contains("bot") || ua.contains("crawler") { - return PluginResult::Block("Bot detected".to_string()); - } - } - PluginResult::Allow - } - - async fn on_response(&self, _ctx: &ResponseContext) -> PluginResult { - PluginResult::Allow - } -} -``` - ---- - -### 4.2 多存储后端 - -```rust -pub trait StorageBackend: Send + Sync { - async fn save_log(&self, log: &IpAccessLog) -> Result<(), String>; - async fn query_logs(&self, filter: &LogFilter) -> Result, String>; -} - -pub struct SqliteBackend { - db_path: PathBuf, -} - -pub struct PostgresBackend { - connection_string: String, -} - -pub struct ClickHouseBackend { - endpoint: String, -} - -impl StorageBackend for ClickHouseBackend { - async fn save_log(&self, log: &IpAccessLog) -> Result<(), String> { - // 使用 ClickHouse HTTP API - // 专为大规模日志分析优化 - todo!() - } - - async fn query_logs(&self, filter: &LogFilter) -> Result, String> { - // ClickHouse SQL 查询 - // 支持列式存储,速度极快 - todo!() - } -} - -// 选择后端 -pub fn create_storage(config: &StorageConfig) -> Box { - match config.backend_type { - "sqlite" => Box::new(SqliteBackend { db_path: config.path.clone() }), - "postgres" => Box::new(PostgresBackend { connection_string: config.url.clone() }), - "clickhouse" => Box::new(ClickHouseBackend { endpoint: config.url.clone() }), - _ => panic!("Unknown storage backend"), - } -} -``` - -**适用场景**: -- **SQLite**: 小规模(< 1万 req/day) -- **PostgreSQL**: 中规模(< 100万 req/day) -- **ClickHouse**: 大规模(> 100万 req/day) - ---- - -### 4.3 分布式部署 - -```rust -// 使用 Redis Pub/Sub 同步黑名单 -pub struct DistributedBlacklist { - redis: redis::Client, - local_cache: Arc>>, -} - -impl DistributedBlacklist { - pub async fn start_sync(&self) { - let mut pubsub = self.redis.get_async_connection().await.unwrap().into_pubsub(); - pubsub.subscribe("blacklist_updates").await.unwrap(); - - while let Some(msg) = pubsub.on_message().next().await { - let payload: String = msg.get_payload().unwrap(); - let update: BlacklistUpdate = serde_json::from_str(&payload).unwrap(); - - match update.action { - UpdateAction::Add => { - self.local_cache.write().unwrap().insert(update.ip); - } - UpdateAction::Remove => { - self.local_cache.write().unwrap().remove(&update.ip); - } - } - } - } - - pub async fn add_to_blacklist(&self, ip: String) { - // 1. 更新本地缓存 - self.local_cache.write().unwrap().insert(ip.clone()); - - // 2. 广播给其他节点 - let update = BlacklistUpdate { - action: UpdateAction::Add, - ip: ip.clone(), - }; - - let mut conn = self.redis.get_async_connection().await.unwrap(); - let _: () = conn.publish("blacklist_updates", serde_json::to_string(&update).unwrap()) - .await.unwrap(); - } -} -``` - ---- - -## 5. 用户体验优化 (🟢 低优先级) - -### 5.1 实时 Dashboard - -使用 WebSocket 推送实时数据: - -```rust -use axum::{ - extract::ws::{WebSocket, WebSocketUpgrade}, - response::Response, -}; - -pub async fn ws_handler( - ws: WebSocketUpgrade, - State(state): State, -) -> Response { - ws.on_upgrade(|socket| handle_socket(socket, state)) -} - -async fn handle_socket(mut socket: WebSocket, state: AppState) { - let mut interval = tokio::time::interval(Duration::from_secs(1)); - - loop { - interval.tick().await; - - // 实时统计 - let stats = state.monitor.get_realtime_stats().await; - - if socket.send(Message::Text(serde_json::to_string(&stats).unwrap())).await.is_err() { - break; - } - } -} -``` - -**前端展示**: -```typescript -const ws = new WebSocket('ws://localhost:8045/api/ws/realtime'); - -ws.onmessage = (event) => { - const stats = JSON.parse(event.data); - updateDashboard(stats); // 实时更新图表 -}; -``` - ---- - -### 5.2 可视化增强 - -#### 地图热力图(访问来源) -```typescript -import L from 'leaflet'; -import 'leaflet.heat'; - -const map = L.map('map').setView([0, 0], 2); - -// 从后端获取 IP 地理位置数据 -const heatData = await invoke('get_ip_geolocations', { hours: 24 }); - -L.heatLayer(heatData, { - radius: 25, - blur: 15, - maxZoom: 17, -}).addTo(map); -``` - -#### 时间线动画(请求流) -```typescript -import * as d3 from 'd3'; - -const timeline = d3.select('#timeline') - .append('svg') - .attr('width', 1200) - .attr('height', 600); - -// 每秒渲染新请求 -setInterval(async () => { - const logs = await invoke('get_recent_logs', { seconds: 1 }); - - logs.forEach(log => { - timeline.append('circle') - .attr('cx', log.timestamp) - .attr('cy', Math.random() * 600) - .attr('r', 5) - .style('fill', log.status >= 400 ? 'red' : 'green') - .transition() - .duration(1000) - .style('opacity', 0) - .remove(); - }); -}, 1000); -``` - ---- - -## 6. 性能基准测试 - -### 6.1 优化前 vs 优化后 - -| 指标 | 优化前 | 优化后 | 提升 | -|------|--------|--------|------| -| **写入吞吐量** | 1,000 req/s | 100,000 req/s | **100x** | -| **黑名单查询** | 10ms | 0.1ms | **100x** | -| **CIDR 匹配** | O(n) | O(1) | **∞** | -| **Dashboard 加载** | 5s | 0.1s | **50x** | -| **内存占用** | 100MB | 150MB | -50% | -| **磁盘 I/O** | 1000 IOPS | 50 IOPS | **95%↓** | - ---- - -### 6.2 压力测试脚本 - -```bash -# 使用 wrk 进行压力测试 -wrk -t12 -c400 -d30s --latency http://localhost:8045/v1/chat/completions - -# 结果示例: -# Requests/sec: 50000.00 ← 优化后 -# Latency (avg): 8ms -# 99th percentile: 20ms -``` - ---- - -## 7. 实施优先级矩阵 - -| 优化项 | 性能提升 | 开发成本 | 优先级 | 实施周期 | -|--------|---------|---------|--------|----------| -| **批量写入队列** | ⭐⭐⭐⭐⭐ | 🟢 低 | 🔴 P0 | 1天 | -| **IP Trie 优化** | ⭐⭐⭐⭐ | 🟡 中 | 🔴 P0 | 2天 | -| **受信任代理验证** | ⭐⭐⭐ | 🟢 低 | 🔴 P0 | 半天 | -| **GeoIP 集成** | ⭐⭐⭐ | 🟡 中 | 🟡 P1 | 1天 | -| **Redis 缓存** | ⭐⭐⭐⭐⭐ | 🔴 高 | 🟡 P1 | 3天 | -| **异常检测** | ⭐⭐⭐⭐ | 🔴 高 | 🟡 P1 | 3天 | -| **告警系统** | ⭐⭐ | 🟡 中 | 🟢 P2 | 2天 | -| **实时 Dashboard** | ⭐⭐ | 🟡 中 | 🟢 P2 | 2天 | -| **ClickHouse 存储** | ⭐⭐⭐⭐⭐ | 🔴 高 | 🔵 P3 | 5天 | -| **分布式部署** | ⭐⭐⭐⭐ | 🔴 高 | 🔵 P3 | 5天 | - -**图例**: -- ⭐ 性能/价值提升程度 -- 🟢 低成本(1天内) | 🟡 中成本(2-3天) | 🔴 高成本(>3天) -- 🔴 P0 必须做 | 🟡 P1 建议做 | 🟢 P2 可以做 | 🔵 P3 按需做 - ---- - -## 8. 快速实施建议 - -### 阶段 1: 快速见效(1周) -``` -✅ 批量写入队列(1天) -✅ IP Trie 优化(2天) -✅ 受信任代理验证(半天) -✅ 物化视图(1天) -✅ 数据脱敏(半天) -``` - -### 阶段 2: 功能增强(2周) -``` -✅ GeoIP 集成(1天) -✅ 异常检测(3天) -✅ 告警系统(2天) -✅ 审计日志(1天) -✅ 数据导出(1天) -``` - -### 阶段 3: 高级优化(按需) -``` -⭕ Redis 缓存(3天) -⭕ ClickHouse 存储(5天) -⭕ 分布式部署(5天) -⭕ 实时 Dashboard(2天) -``` - ---- - -## 9. 总结 - -### 核心优化点 - -1. **性能**: 批量写入 + IP Trie + Redis 缓存 → **100倍提升** -2. **安全**: 受信任代理 + 数据脱敏 + 审计日志 → **零信任架构** -3. **智能**: 异常检测 + GeoIP + 信誉评分 → **主动防御** -4. **扩展**: 插件化 + 多存储 + 分布式 → **无限扩展** - -### 投入产出比 - -| 投入 | 产出 | -|------|------| -| **1周开发** | 性能提升 100倍,安全性加固 | -| **2周开发** | 完整的威胁检测系统 | -| **1个月** | 企业级安全监控平台 | - -### 最终效果 - -优化后的系统可以: -- ✅ 处理 **100,000+ req/s** 的流量 -- ✅ 毫秒级响应黑名单查询 -- ✅ 自动检测并阻止 **DDoS/扫描/暴力破解** -- ✅ 提供实时可视化 Dashboard -- ✅ 支持分布式部署(横向扩展) - ---- - -**文档版本**: v1.0 -**创建时间**: 2026-01-30 -**作者**: Antigravity 开发团队 -**建议**: 优先实施阶段 1(快速见效),再根据实际需求推进阶段 2-3 diff --git a/IP_MONITORING_PROGRESS.md b/IP_MONITORING_PROGRESS.md deleted file mode 100644 index 6deb8c9e7..000000000 --- a/IP_MONITORING_PROGRESS.md +++ /dev/null @@ -1,226 +0,0 @@ -# IP 监控功能移植进度 - -**开始时间**: 2026-01-30 -**参考文档**: `IP_MONITORING_MIGRATION_ARCH.md` - ---- - -## ✅ Phase 1: 数据库层移植 (已完成) - -### 完成内容 -- [x] 创建 `src-tauri/src/modules/security_db.rs` -- [x] 实现数据表结构: - - `ip_access_logs` - IP 访问日志表 - - `ip_blacklist` - IP 黑名单表 - - `ip_whitelist` - IP 白名单表 -- [x] 实现核心函数: - - 日志操作: `save_ip_access_log`, `get_ip_access_logs`, `get_ip_stats`, `get_top_ips` - - 黑名单: `add_to_blacklist`, `remove_from_blacklist`, `get_blacklist`, `is_ip_in_blacklist` - - 白名单: `add_to_whitelist`, `remove_from_whitelist`, `get_whitelist`, `is_ip_in_whitelist` - - CIDR 匹配: `cidr_match` (支持 /8, /16, /24, /32) -- [x] 在 `modules/mod.rs` 注册模块 -- [x] 在 `lib.rs` 初始化数据库 - -### 文件变更 -- ✅ 新建: `src-tauri/src/modules/security_db.rs` (667 行) -- ✅ 修改: `src-tauri/src/modules/mod.rs` (+1 行) -- ✅ 修改: `src-tauri/src/lib.rs` (+5 行) - ---- - -## ✅ Phase 2: 监控层移植 (已完成) - -### 完成内容 -- [x] 在 `ProxyRequestLog` 添加 `client_ip: Option` 字段 -- [x] 更新 `proxy_db.rs` 支持 client_ip: - - 添加数据库列: `ALTER TABLE request_logs ADD COLUMN client_ip TEXT` - - 更新所有 INSERT/SELECT 语句包含 client_ip - - 更新所有查询函数的映射逻辑 -- [x] 在 `monitor.rs` 中间件提取客户端 IP: - - 优先从 `X-Forwarded-For` 提取 (取第一个 IP) - - 备选从 `X-Real-IP` 提取 - - 添加到 `ProxyRequestLog` 初始化 - -### 文件变更 -- ✅ 修改: `src-tauri/src/proxy/monitor.rs` (+1 字段) -- ✅ 修改: `src-tauri/src/modules/proxy_db.rs` (+1 列, 更新 14 处映射) -- ✅ 修改: `src-tauri/src/proxy/middleware/monitor.rs` (+14 行 IP 提取逻辑) - ---- - -## ✅ Phase 3: 中间件层移植 (已完成) - -### 完成内容 -- [x] 创建 IP 黑白名单检查中间件 -- [x] 在请求处理前检查黑名单 -- [x] 在请求处理前检查白名单 -- [x] 记录被封禁的请求到 `ip_access_logs` (blocked=1) -- [x] 支持白名单优先模式 (白名单 IP 跳过黑名单检查) -- [x] 支持白名单独占模式 (只允许白名单 IP 访问) - -### 文件变更 -- ✅ 新建: `src-tauri/src/proxy/middleware/ip_filter.rs` (145 行) -- ✅ 修改: `src-tauri/src/proxy/middleware/mod.rs` (+2 行) - -### 功能特性 -- **白名单模式**: - - `enabled=true`: 只允许白名单 IP 访问 - - `whitelist_priority=true`: 白名单 IP 跳过黑名单检查 -- **黑名单模式**: - - `enabled=true`: 拦截黑名单 IP - - 自定义封禁消息 -- **日志记录**: 被封禁的请求会记录到数据库 - ---- - -## ✅ Phase 4: 配置层移植 (已完成) - -### 完成内容 -- [x] 创建 `SecurityMonitorConfig` 结构 -- [x] 创建 `IpBlacklistConfig` 结构 -- [x] 创建 `IpWhitelistConfig` 结构 -- [x] 在 `ProxyConfig` 添加 `security_monitor` 字段 -- [x] 在 `ProxySecurityConfig` 添加 `security_monitor` 字段 -- [x] 设置默认配置 - -### 文件变更 -- ✅ 修改: `src-tauri/src/proxy/config.rs` (+75 行) -- ✅ 修改: `src-tauri/src/proxy/security.rs` (+5 行) - -### 配置结构 -```rust -SecurityMonitorConfig { - blacklist: IpBlacklistConfig { - enabled: false, - block_message: "Access denied", - }, - whitelist: IpWhitelistConfig { - enabled: false, - whitelist_priority: true, - }, -} -``` - ---- - -## 🔄 Phase 5: 统计分析层移植 (待开始) - -### 待完成 -- [ ] 创建 `traffic_stats.rs` 模块 -- [ ] 实现 IP 流量统计 -- [ ] 实现 Token 流量统计 -- [ ] 实现 IP 时间线 -- [ ] 实现 IP-Token 关联矩阵 - -### 计划文件 -- 新建: `src-tauri/src/modules/traffic_stats.rs` - ---- - -## ✅ Phase 6: 命令层移植 (已完成) - -### 完成内容 -- [x] 创建 `commands/security.rs` 模块 -- [x] 实现 IP 访问日志命令: - - `get_ip_access_logs` - 分页查询日志 - - `get_ip_stats` - 获取统计信息 - - `clear_ip_access_logs` - 清空日志 -- [x] 实现黑名单管理命令: - - `get_ip_blacklist` - 获取黑名单列表 - - `add_ip_to_blacklist` - 添加到黑名单 - - `remove_ip_from_blacklist` - 从黑名单移除 - - `clear_ip_blacklist` - 清空黑名单 - - `check_ip_in_blacklist` - 检查IP是否在黑名单 -- [x] 实现白名单管理命令: - - `get_ip_whitelist` - 获取白名单列表 - - `add_ip_to_whitelist` - 添加到白名单 - - `remove_ip_from_whitelist` - 从白名单移除 - - `clear_ip_whitelist` - 清空白名单 - - `check_ip_in_whitelist` - 检查IP是否在白名单 -- [x] 实现安全配置命令: - - `get_security_config` - 获取安全监控配置 - - `update_security_config` - 更新安全监控配置 -- [x] 在 `lib.rs` 注册所有命令 - -### 文件变更 -- ✅ 新建: `src-tauri/src/commands/security.rs` (280 行) -- ✅ 修改: `src-tauri/src/commands/mod.rs` (+2 行) -- ✅ 修改: `src-tauri/src/lib.rs` (+16 行命令注册) - -### 功能特性 -- **IP 格式验证**: 支持单个 IP 和 CIDR 网段格式验证 -- **分页查询**: IP 访问日志支持分页和搜索 -- **统计信息**: 提供总请求数、唯一IP数、封禁数、Top IP 排行 -- **配置持久化**: 安全配置自动保存到配置文件 - ---- - -## ⏳ Phase 7: 限流与自动封禁 (可选,待开始) - -### 待完成 -- [ ] 创建限流中间件 -- [ ] 实现滑动窗口计数器 -- [ ] 按 IP 限流 -- [ ] 按 API Key 限流 -- [ ] 自动封禁逻辑 - -### 计划文件 -- 新建: `src-tauri/src/proxy/middleware/rate_limit.rs` - ---- - -## ⏳ Phase 8: 自动清理任务 (可选) - -### 待完成 -- [ ] 实现后台清理任务 -- [ ] 定期清理过期黑名单 -- [ ] 定期清理旧日志 -- [ ] 在启动时触发一次清理 - ---- - -## 编译状态 - -✅ **最新编译**: 成功 (2026-01-30 11:35) -- 无错误 -- 77 个警告 (主要是未使用的导入和变量) - ---- - -## 下一步计划 - -1. **集成中间件**: 将 `ip_filter_middleware` 添加到 Axum 路由链 -2. **前端开发**: 创建 IP 监控管理界面 (已完成) - - [x] 创建 Security 主页面 - - [x] 实现 IP 日志查看 - - [x] 实现黑白名单管理 - - [x] 实现统计展示 - - [x] 实现配置管理 -3. **测试验证**: - - 测试黑白名单拦截功能 - - 测试 IP 日志记录 - - 测试配置持久化 -4. **Phase 5** (可选): 实现流量统计分析功能 -5. **Phase 7** (可选): 实现限流与自动封禁 - ---- - -## 技术要点 - -### IP 提取优先级 -``` -1. X-Forwarded-For (取第一个IP,逗号分隔) -2. X-Real-IP -3. Connection remote address (未实现,作为兜底) -``` - -### CIDR 匹配支持 -- `/8` - A类网段 (16,777,216 个IP) -- `/16` - B类网段 (65,536 个IP) -- `/24` - C类网段 (256 个IP) -- `/32` - 单个IP - -### 数据库优化 -- WAL 模式提升并发性能 -- 索引: client_ip, timestamp, blocked -- 定期 VACUUM 回收空间 diff --git a/README.md b/README.md index a7308d8be..992b0a67f 100644 --- a/README.md +++ b/README.md @@ -1,260 +1,127 @@ -# Antigravity Tools 🚀 -> 专业的 AI 账号管理与协议反代系统 (v4.1.8)
- Antigravity Logo + Antigravity Manager -

您的个人高性能 AI 调度网关

-

不仅仅是账号管理,更是打破 API 调用壁垒的终极解决方案。

- -

- - Version - - Tauri - Rust - React - License -

+ # Antigravity Manager -

- 核心功能 • - 界面导览 • - 技术架构 • - 安装指南 • - 快速接入 -

+ **Your Personal High-Performance AI Gateway** -

- 简体中文 | - English -

-
- ---- - -**Antigravity Tools** 是一个专为开发者和 AI 爱好者设计的全功能桌面应用。它将多账号管理、协议转换和智能请求调度完美结合,为您提供一个稳定、极速且成本低廉的 **本地 AI 中转站**。 - -通过本应用,您可以将常见的 Web 端 Session (Google/Anthropic) 转化为标准化的 API 接口,消除不同厂商间的协议鸿沟。 - -## 💖 赞助商 (Sponsors) - -| 赞助商 (Sponsor) | 简介 (Description) | -| :---: | :--- | -| PackyCode Logo | 感谢 **PackyCode** 对本项目的赞助!PackyCode 是一家可靠高效的 API 中转服务商,提供 Claude Code、Codex、Gemini 等多种服务的中转。PackyCode 为本项目的用户提供了特别优惠:使用[此链接](https://www.packyapi.com/register?aff=Ctrler)注册,并在充值时输入 **“Ctrler”** 优惠码即可享受 **九折优惠**。 | -| AICodeMirror Logo | 感谢 AICodeMirror 赞助了本项目!AICodeMirror 提供 Claude Code / Codex / Gemini CLI 官方高稳定中转服务,支持企业级高并发、极速开票、7×24 专属技术支持。 Claude Code / Codex / Gemini 官方渠道低至 3.8 / 0.2 / 0.9 折,充值更有折上折!AICodeMirror 为 Antigravity-Manager 的用户提供了特别福利,通过[此链接](https://www.aicodemirror.com/register?invitecode=MV5XUM)注册的用户,可享受首充8折,企业客户最高可享 7.5 折! | - -### ☕ 支持项目 (Support) - -如果您觉得本项目对您有所帮助,欢迎打赏作者! + *Seamlessly proxy Gemini & Claude • OpenAI-Compatible • Privacy First* -请我喝杯咖啡 + [![Version](https://img.shields.io/badge/Version-5.0.2-blue?style=for-the-badge)](https://github.com/GofMan5/Antigravity-Manager/releases) + [![Tauri](https://img.shields.io/badge/Tauri-v2-orange?style=for-the-badge)](https://tauri.app) + [![Rust](https://img.shields.io/badge/Backend-Rust-red?style=for-the-badge)](https://www.rust-lang.org) + [![React](https://img.shields.io/badge/Frontend-React-61DAFB?style=for-the-badge)](https://react.dev) -| 支付宝 (Alipay) | 微信支付 (WeChat) | Buy Me a Coffee | -| :---: | :---: | :---: | -| ![Alipay](./docs/images/donate_alipay.png) | ![WeChat](./docs/images/donate_wechat.png) | ![Coffee](./docs/images/donate_coffee.png) | + [Features](#-features) • + [Installation](#-installation) • + [Quick Start](#-quick-start) • + [Configuration](#-configuration) -## 🌟 深度功能解析 (Detailed Features) + -### 1. 🎛️ 智能账号仪表盘 (Smart Dashboard) -* **全局实时监控**: 一眼洞察所有账号的健康状况,包括 Gemini Pro、Gemini Flash、Claude 以及 Gemini 绘图的 **平均剩余配额**。 -* **最佳账号推荐 (Smart Recommendation)**: 系统会根据当前所有账号的配额冗余度,实时算法筛选并推荐“最佳账号”,支持 **一键切换**。 -* **活跃账号快照**: 直观显示当前活跃账号的具体配额百分比及最后同步时间。 +--- -### 2. 🔐 强大的账号管家 (Account Management) -* **OAuth 2.0 授权(自动/手动)**: 添加账号时会提前生成可复制的授权链接,支持在任意浏览器完成授权;回调成功后应用会自动完成并保存(必要时可点击“我已授权,继续”手动收尾)。 -* **多维度导入**: 支持单条 Token 录入、JSON 批量导入(如来自其他工具的备份),以及从 V1 旧版本数据库自动热迁移。 -* **网关级视图**: 支持“列表”与“网格”双视图切换。提供 403 封禁检测,自动标注并跳过权限异常的账号。 +## What is Antigravity Manager? -### 3. 🔌 协议转换与中继 (API Proxy) -* **全协议适配 (Multi-Sink)**: - * **OpenAI 格式**: 提供 `/v1/chat/completions` 端点,兼容 99% 的现有 AI 应用。 - * **Anthropic 格式**: 提供原生 `/v1/messages` 接口,支持 **Claude Code CLI** 的全功能(如思思维链、系统提示词)。 - * **Gemini 格式**: 支持 Google 官方 SDK 直接调用。 -* **智能状态自愈**: 当请求遇到 `429 (Too Many Requests)` 或 `401 (Expire)` 时,后端会毫秒级触发 **自动重试与静默轮换**,确保业务不中断。 +Antigravity Manager is a powerful desktop application that transforms your Google/Anthropic web sessions into standardized API endpoints. It provides: -### 4. 🔀 模型路由中心 (Model Router) -* **系列化映射**: 您可以将复杂的原始模型 ID 归类到“规格家族”(如将所有 GPT-4 请求统一路由到 `gemini-3-pro-high`)。 -* **专家级重定向**: 支持自定义正则表达式级模型映射,精准控制每一个请求的落地模型。 -* **智能分级路由 (Tiered Routing)**: [新] 系统根据账号类型(Ultra/Pro/Free)和配额重置频率自动优先级排序,优先消耗高速重置账号,确保高频调用下的服务稳定性。 -* **后台任务静默降级**: [新] 自动识别 Claude CLI 等工具生成的后台请求(如标题生成),智能重定向至 Flash 模型,保护高级模型配额不被浪费。 +- **Multi-Account Management** — Add unlimited accounts via OAuth or token import +- **Protocol Translation** — OpenAI, Anthropic, and Gemini API compatibility +- **Smart Load Balancing** — Automatic account rotation based on quotas and health +- **Real-time Monitoring** — Track usage, quotas, and request logs -### 5. 🎨 多模态与 Imagen 3 支持 -* **高级画质控制**: 支持通过 OpenAI `size` (如 `1024x1024`, `16:9`) 参数自动映射到 Imagen 3 的相应规格。 -* **超强 Body 支持**: 后端支持高达 **100MB** (可配置) 的 Payload,处理 4K 高清图识别绰绰有余。 +--- -## 📸 界面导览 (GUI Overview) +## ✨ Features -| | | -| :---: | :---: | -| ![仪表盘 - 全局配额监控与一键切换](docs/images/dashboard-light.png)
仪表盘 | ![账号列表 - 高密度配额展示与 403 智能标注](docs/images/accounts-light.png)
账号列表 | -| ![关于页面 - 关于 Antigravity Tools](docs/images/about-dark.png)
关于页面 | ![API 反代 - 服务控制](docs/images/v3/proxy-settings.png)
API 反代 | -| ![系统设置 - 通用配置](docs/images/settings-dark.png)
系统设置 | | +### 🎛️ Smart Dashboard +- Real-time quota monitoring across all accounts +- One-click account switching with smart recommendations +- Visual health indicators and subscription tier badges -### 💡 使用案例 (Usage Examples) +### 🔐 Account Management +- **OAuth 2.0** — Secure browser-based authorization +- **Token Import** — Batch import from JSON or manual entry +- **Auto-healing** — Automatic token refresh and error recovery -| | | -| :---: | :---: | -| ![Claude Code 联网搜索 - 结构化来源与引文显示](docs/images/usage/claude-code-search.png)
Claude Code 联网搜索 | ![Cherry Studio 深度集成 - 原生回显搜索引文与来源链接](docs/images/usage/cherry-studio-citations.png)
Cherry Studio 深度集成 | -| ![Imagen 3 高级绘图 - 完美还原 Prompt 意境与细节](docs/images/usage/image-gen-nebula.png)
Imagen 3 高级绘图 | ![Kilo Code 接入 - 多账号极速轮换与模型穿透](docs/images/usage/kilo-code-integration.png)
Kilo Code 接入 | +### 🔌 API Proxy +| Protocol | Endpoint | Compatibility | +|----------|----------|---------------| +| OpenAI | `/v1/chat/completions` | ChatGPT, Cursor, Continue | +| Anthropic | `/v1/messages` | Claude Code CLI, Claude Desktop | +| Gemini | `/v1beta/models` | Google AI SDK | -## 🏗️ 技术架构 (Architecture) +### 🛡️ Reliability Features +- **VALIDATION_REQUIRED Handling** — Temporary account blocking with auto-recovery +- **Circuit Breaker** — Configurable backoff steps for rate limits +- **Quota Protection** — Automatic model-level protection when quota is low +- **Health Scoring** — Prioritize stable accounts automatically -```mermaid -graph TD - Client([外部应用: Claude Code/NextChat]) -->|OpenAI/Anthropic| Gateway[Antigravity Axum Server] - Gateway --> Middleware[中间件: 鉴权/限流/日志] - Middleware --> Router[Model Router: ID 映射] - Router --> Dispatcher[账号分发器: 轮询/权重] - Dispatcher --> Mapper[协议转换器: Request Mapper] - Mapper --> Upstream[上游请求: Google/Anthropic API] - Upstream --> ResponseMapper[响应转换器: Response Mapper] - ResponseMapper --> Client -``` +### 🔧 Developer Tools +- **Debug Console** — Real-time log viewer with filtering and export +- **Traffic Monitor** — Request/response inspection with timing +- **Model Mapping** — Custom routing rules and aliases -## 安装指南 (Installation) +--- -### 选项 A: 终端安装 (macOS & Linux 推荐) +## 📥 Installation -#### macOS -如果您已安装 [Homebrew](https://brew.sh/),可以通过以下命令快速安装: +### Windows +Download the latest `.msi` or portable `.zip` from [Releases](https://github.com/GofMan5/Antigravity-Manager/releases). +### macOS ```bash -# 1. 订阅本仓库的 Tap -brew tap lbjlaq/antigravity-manager https://github.com/lbjlaq/Antigravity-Manager +# Via Homebrew +brew tap GofMan5/antigravity-manager https://github.com/GofMan5/Antigravity-Manager +brew install --cask --no-quarantine antigravity-tools -# 2. 安装应用 -brew install --cask antigravity-tools +# Or download .dmg from Releases (Universal: Apple Silicon & Intel) ``` -> **提示**: 如果遇到权限问题,建议添加 `--no-quarantine` 参数。 -#### Arch Linux -您可以选择通过一键安装脚本或 Homebrew 进行安装: - -**方式 1:一键安装脚本 (推荐)** +### Linux ```bash -curl -sSL https://raw.githubusercontent.com/lbjlaq/Antigravity-Manager/main/deploy/arch/install.sh | bash -``` +# Arch Linux +curl -sSL https://raw.githubusercontent.com/GofMan5/Antigravity-Manager/main/deploy/arch/install.sh | bash -**方式 2:通过 Homebrew** (如果您已安装 [Linuxbrew](https://sh.brew.sh/)) -```bash -brew tap lbjlaq/antigravity-manager https://github.com/lbjlaq/Antigravity-Manager -brew install --cask antigravity-tools +# Other distros: Download .deb or .AppImage from Releases ``` -#### 其他 Linux 发行版 -安装后会自动将 AppImage 添加到二进制路径并配置可执行权限。 - -### 选项 B: 手动下载 -前往 [GitHub Releases](https://github.com/lbjlaq/Antigravity-Manager/releases) 下载对应系统的包: -* **macOS**: `.dmg` (支持 Apple Silicon & Intel) -* **Windows**: `.msi` 或 便携版 `.zip` -* **Linux**: `.deb` 或 `AppImage` - -### 选项 C: Docker 部署 (推荐用于 NAS/服务器) -如果您希望在容器化环境中运行,我们提供了原生的 Docker 镜像。该镜像内置了对 v4.0.2 原生 Headless 架构的支持,可自动托管前端静态资源,并通过浏览器直接进行管理。 - +### Docker ```bash -# 方式 1: 直接运行 (推荐) -# - API_KEY: 必填。用于所有协议的 AI 请求鉴定。 -# - WEB_PASSWORD: 可选。用于管理后台登录。若不设置则默认使用 API_KEY。 -docker run -d --name antigravity-manager \ +docker run -d --name antigravity \ -p 8045:8045 \ - -e API_KEY=sk-your-api-key \ - -e WEB_PASSWORD=your-login-password \ - -e ABV_MAX_BODY_SIZE=104857600 \ + -e API_KEY=sk-your-key \ -v ~/.antigravity_tools:/root/.antigravity_tools \ - lbjlaq/antigravity-manager:latest - -# 忘记密钥?执行 docker logs antigravity-manager 或 grep -E '"api_key"|"admin_password"' ~/.antigravity_tools/gui_config.json - -#### 🔐 鉴权逻辑说明 -* **场景 A:仅设置了 `API_KEY`** - - **Web 登录**:使用 `API_KEY` 进入后台。 - - **API 调用**:使用 `API_KEY` 进行 AI 请求鉴权。 -* **场景 B:同时设置了 `API_KEY` 和 `WEB_PASSWORD` (推荐)** - - **Web 登录**:**必须**使用 `WEB_PASSWORD`,使用 API Key 将被拒绝(更安全)。 - - **API 调用**:统一使用 `API_KEY`。这样您可以将 API Key 分发给成员,而保留密码仅供管理员使用。 - -#### 🆙 旧版本升级指引 -如果您是从 v4.0.1 及更早版本升级,系统默认未设置 `WEB_PASSWORD`。您可以通过以下任一方式设置: -1. **Web UI 界面 (推荐)**:使用原有 `API_KEY` 登录后,在 **API 反代设置** 页面手动设置并保存。新密码将持久化存储在 `gui_config.json` 中。 -2. **环境变量 (Docker)**:在启动容器时增加 `-e WEB_PASSWORD=您的新密码`。**注意:环境变量具有最高优先级,将覆盖 UI 中的任何修改。** -3. **配置文件 (持久化)**:直接修改 `~/.antigravity_tools/gui_config.json`,在 `proxy` 对象中修改或添加 `"admin_password": "您的新密码"` 字段。 - - *注:`WEB_PASSWORD` 是环境变量名,`admin_password` 是配置文件中的 JSON 键名。* - -> [!TIP] -> **密码优先级逻辑 (Priority)**: -> - **第一优先级 (环境变量)**: `ABV_WEB_PASSWORD` 或 `WEB_PASSWORD`。只要设置了环境变量,系统将始终使用它。 -> - **第二优先级 (配置文件)**: `gui_config.json` 中的 `admin_password` 字段。UI 的“保存”操作会更新此值。 -> - **保底回退 (向后兼容)**: 若上述均未设置,则回退使用 `API_KEY` 作为登录密码。 - -# 方式 2: 使用 Docker Compose -# 1. 进入项目的 docker 目录 -cd docker -# 2. 启动服务 -docker compose up -d + ghcr.io/gofman5/antigravity-manager:latest ``` -> **访问地址**: `http://localhost:8045` (管理后台) | `http://localhost:8045/v1` (API Base) -> **系统要求**: -> - **内存**: 建议 **1GB** (最小 256MB)。 -> - **持久化**: 需挂载 `/root/.antigravity_tools` 以保存数据。 -> - **架构**: 支持 x86_64 和 ARM64。 -> **详情见**: [Docker 部署指南 (docker)](./docker/README.md) --- -Copyright © 2024-2026 [lbjlaq](https://github.com/lbjlaq) +## 🚀 Quick Start -### 🛠️ 常见问题排查 (Troubleshooting) +### 1. Add an Account -#### macOS 提示“应用已损坏,无法打开”? -由于 macOS 的安全机制,非 App Store 下载的应用可能会触发此提示。您可以按照以下步骤快速修复: +1. Open **Accounts** → **Add Account** +2. Choose **OAuth** (recommended) or **Token** +3. Complete authorization in your browser +4. Account appears with quota information -1. **命令行修复** (推荐): - 打开终端,执行以下命令: - ```bash - sudo xattr -rd com.apple.quarantine "/Applications/Antigravity Tools.app" - ``` -2. **Homebrew 安装技巧**: - 如果您使用 brew 安装,可以添加 `--no-quarantine` 参数来规避此问题: - ```bash - brew install --cask --no-quarantine antigravity-tools - ``` +### 2. Start the Proxy -## 🔌 快速接入示例 +1. Go to **API Proxy** tab +2. Click **Start Proxy** +3. Note the endpoint: `http://127.0.0.1:8045` -### 🔐 OAuth 授权流程(添加账号) -1. 打开“Accounts / 账号” → “添加账号” → “OAuth”。 -2. 弹窗会在点击按钮前预生成授权链接;点击链接即可复制到系统剪贴板,然后用你希望的浏览器打开并完成授权。 -3. 授权完成后浏览器会打开本地回调页并显示“✅ 授权成功!”。 -4. 应用会自动继续完成授权并保存账号;如未自动完成,可点击“我已授权,继续”手动完成。 +### 3. Connect Your App -> 提示:授权链接包含一次性回调端口,请始终使用弹窗里生成的最新链接;如果授权时应用未运行或弹窗已关闭,浏览器可能会提示 `localhost refused connection`。 - -### 如何接入 Claude Code CLI? -1. 启动 Antigravity,并在“API 反代”页面开启服务。 -2. 在终端执行: +#### Claude Code CLI ```bash export ANTHROPIC_API_KEY="sk-antigravity" export ANTHROPIC_BASE_URL="http://127.0.0.1:8045" claude ``` -### 如何接入 OpenCode? -1. 进入 **API 反代**页面 → **外部 Providers** → 点击 **OpenCode Sync** 卡片。 -2. 点击 **Sync** 按钮,将自动生成 `~/.config/opencode/opencode.json` 配置文件(包含代理 baseURL 与 apiKey,支持 Anthropic/Google 双 Provider)。 -3. 可选:勾选 **Sync accounts** 可同时导出 `antigravity-accounts.json` 账号列表,供 OpenCode 插件直接导入使用。 -4. Windows 用户路径为 `C:\Users\<用户名>\.config\opencode\`(与 `~/.config/opencode` 规则一致)。 -5. 如需回滚,可点击 **Restore** 按钮从备份恢复之前的配置。 - -### 如何接入 Kilo Code? -1. **协议选择**: 建议优先使用 **Gemini 协议**。 -2. **Base URL**: 填写 `http://127.0.0.1:8045`。 -3. **注意**: - - **OpenAI 协议限制**: Kilo Code 在使用 OpenAI 模式时,其请求路径会叠加产生 `/v1/chat/completions/responses` 这种非标准路径,导致 Antigravity 返回 404。因此请务必填入 Base URL 后选择 Gemini 模式。 - - **模型映射**: Kilo Code 中的模型名称可能与 Antigravity 默认设置不一致,如遇到无法连接,请在“模型映射”页面设置自定义映射,并查看**日志文件**进行调试。 - -### 如何在 Python 中使用? +#### Python (OpenAI SDK) ```python import openai @@ -264,2026 +131,92 @@ client = openai.OpenAI( ) response = client.chat.completions.create( - model="gemini-3-flash", - messages=[{"role": "user", "content": "你好,请自我介绍"}] + model="gemini-2.5-pro", + messages=[{"role": "user", "content": "Hello!"}] ) print(response.choices[0].message.content) ``` -### 如何使用图片生成 (Imagen 3)? +#### Cursor / Continue / Other IDEs +- **API Base**: `http://127.0.0.1:8045/v1` +- **API Key**: `sk-antigravity` (or your configured key) +- **Model**: `gemini-2.5-pro`, `claude-sonnet-4`, etc. -#### 方式一:OpenAI Images API (推荐) -```python -import openai +--- -client = openai.OpenAI( - api_key="sk-antigravity", - base_url="http://127.0.0.1:8045/v1" -) +## ⚙️ Configuration -# 生成图片 -response = client.images.generate( - model="gemini-3-pro-image", - prompt="一座未来主义风格的城市,赛博朋克,霓虹灯", - size="1920x1080", # 支持任意 WIDTHxHEIGHT 格式,自动计算宽高比 - quality="hd", # "standard" | "hd" | "medium" - n=1, - response_format="b64_json" -) +### Settings Location +- **Windows**: `%APPDATA%\antigravity_tools\` +- **macOS**: `~/Library/Application Support/antigravity_tools/` +- **Linux**: `~/.antigravity_tools/` -# 保存图片 -import base64 -image_data = base64.b64decode(response.data[0].b64_json) -with open("output.png", "wb") as f: - f.write(image_data) -``` +### Key Settings -**支持的参数**: -- **`size`**: 任意 `WIDTHxHEIGHT` 格式(如 `1280x720`, `1024x1024`, `1920x1080`),自动计算并映射到标准宽高比(21:9, 16:9, 9:16, 4:3, 3:4, 1:1) -- **`quality`**: - - `"hd"` → 4K 分辨率(高质量) - - `"medium"` → 2K 分辨率(中等质量) - - `"standard"` → 默认分辨率(标准质量) -- **`n`**: 生成图片数量(1-10) -- **`response_format`**: `"b64_json"` 或 `"url"`(Data URI) +| Setting | Default | Description | +|---------|---------|-------------| +| `validation_block_minutes` | 10 | How long to block account after 403 VALIDATION_REQUIRED | +| `show_proxy_selected_badge` | true | Show "SELECTED" badge on accounts page | +| `debug_console_enabled` | false | Enable built-in debug console | -#### 方式二:Chat API + 参数设置 (✨ 新增) +### Environment Variables (Docker) -**所有协议**(OpenAI、Claude)的 Chat API 现在都支持直接传递 `size` 和 `quality` 参数: +| Variable | Description | +|----------|-------------| +| `API_KEY` | Required. Used for API authentication | +| `WEB_PASSWORD` | Optional. Separate password for web UI | +| `ABV_MAX_BODY_SIZE` | Max request body size (default: 100MB) | -```python -# OpenAI Chat API -response = client.chat.completions.create( - model="gemini-3-pro-image", - size="1920x1080", # ✅ 支持任意 WIDTHxHEIGHT 格式 - quality="hd", # ✅ "standard" | "hd" | "medium" - messages=[{"role": "user", "content": "一座未来主义风格的城市"}] -) -``` - -```bash -# Claude Messages API -curl -X POST http://127.0.0.1:8045/v1/messages \ - -H "Content-Type: application/json" \ - -H "x-api-key: sk-antigravity" \ - -d '{ - "model": "gemini-3-pro-image", - "size": "1280x720", - "quality": "hd", - "messages": [{"role": "user", "content": "一只可爱的猫咪"}] - }' -``` +--- -**参数优先级**: 请求体参数 > 模型后缀 +## 🏗️ Architecture -#### 方式三:Chat 接口 + 模型后缀 -```python -response = client.chat.completions.create( - model="gemini-3-pro-image-16-9-4k", # 格式:gemini-3-pro-image-[比例]-[质量] - messages=[{"role": "user", "content": "一座未来主义风格的城市"}] -) +``` +┌─────────────────────────────────────────────────────────────┐ +│ External Applications │ +│ (Claude Code, Cursor, ChatGPT, etc.) │ +└─────────────────────────┬───────────────────────────────────┘ + │ OpenAI / Anthropic / Gemini API + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Antigravity Proxy Server │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────┐ │ +│ │ Auth & │ │ Model │ │ Account │ │ +│ │ Routing │──│ Mapper │──│ Dispatcher │ │ +│ └─────────────┘ └─────────────┘ └─────────────────────┘ │ +│ │ │ +│ ┌─────────────┐ ┌─────────────┐ ┌────────▼────────────┐ │ +│ │ Rate │ │ Health │ │ Token Manager │ │ +│ │ Limiter │──│ Scoring │──│ (Multi-Account) │ │ +│ └─────────────┘ └─────────────┘ └─────────────────────┘ │ +└─────────────────────────┬───────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Upstream APIs │ +│ Google AI (Gemini) / Anthropic (Claude) │ +└─────────────────────────────────────────────────────────────┘ ``` -**模型后缀说明**: -- **宽高比**: `-16-9`, `-9-16`, `-4-3`, `-3-4`, `-21-9`, `-1-1` -- **质量**: `-4k` (4K), `-2k` (2K), 不加后缀(标准) -- **示例**: `gemini-3-pro-image-16-9-4k` → 16:9 比例 + 4K 分辨率 - -#### 方式四:Cherry Studio 等客户端设置 -在支持 OpenAI 协议的客户端(如 Cherry Studio)中,可以通过**模型设置**页面配置图片生成参数: - -1. **进入模型设置**:选择 `gemini-3-pro-image` 模型 -2. **配置参数**: - - **Size (尺寸)**: 输入任意 `WIDTHxHEIGHT` 格式(如 `1920x1080`, `1024x1024`) - - **Quality (质量)**: 选择 `standard` / `hd` / `medium` - - **Number (数量)**: 设置生成图片数量(1-10) -3. **发送请求**:直接在对话框中输入图片描述即可 - -**参数映射规则**: -- `size: "1920x1080"` → 自动计算为 `16:9` 宽高比 -- `quality: "hd"` → 映射为 `4K` 分辨率 -- `quality: "medium"` → 映射为 `2K` 分辨率 - - -## 📝 开发者与社区 - -* **版本演进 (Changelog)**: - * **v4.1.8 (2026-02-07)**: - - **[核心功能] 集成 Claude Opus 4.6 Thinking 模型支持 (PR #1641)**: - - **混合模式架构**: 实现了“静态配置 + 动态获取”的双模架构。模型列表通过 Antigravity API 动态拉取,而 Thinking 模式等高级元数据则由本地注册表静态补充,完美平衡了灵活性与稳定性。 - - **零配置接入**: `claude-opus-4-6` 系列模型自动启用 Thinking 模式并预设 Budget,无需用户手动干预即可享受最新推理能力。 - - **前沿模型映射**: 新增 `claude-opus-4-6-thinking` 及其别名 (`claude-opus-4-6`, `20260201`) 的支持,并将其归入 `claude-sonnet-4.5` 配额组进行统筹管理。 - - **[核心优化] 优化 OpenCode CLI 检测逻辑 (PR #1649)**: - - **路径扩展**: 增加了对 Windows 环境下常见全局安装路径(如 `npm`, `pnpm`, `Yarn`, `NVM`, `FNM` 等)的自动扫描。 - - **稳定性增强**: 修复了在 `PATH` 环境不完整时可能导致检测失败的问题,并增强了对 `.cmd` 和 `.bat` 文件的支持。 - - **[核心修复] 修复监控日志缺失流式工具调用内容的问题**: - - **多协议支持**: 重构了 SSE 解析逻辑,全面支持 OpenAI `tool_calls` 和 Claude `tool_use`。 - - **增量累积**: 实现了工具参数片段的流式累积,确保长参数工具调用能被完整记录并显示在监控面板中。 - - **[UI 优化] 导航栏与链接交互优化 (PR #1648)**: - - **禁止拖拽**: 为导航栏及 Logo 等所有链接和图片添加了 `draggable="false"`,防止用户在意外拖拽时触发浏览器的默认行为,提升交互稳定性。 - - **SmartWarmup 悬停增强**: 优化了智能预热组件图标在未激活状态下的悬停颜色切换逻辑,使界面反馈更加细腻一致。 - - **[核心功能] 账号自定义标签支持扩展 (PR #1620)**: - - **长度限制**: 将标签长度限制从 20 字符优化为 15 字符,在前后端同步生效。 - - **后端验证**: 增强了后端 Rust 命令的验证逻辑,支持 Unicode 字符计数,并优化了错误处理。 - - **前端对齐**: 账户列表和卡片视图的编辑框均已同步 15 字符的最大长度。 - - **[核心修复] 修复 UserToken 页面剪贴板错误 (PR #1639)**: - - **逻辑修复**: 修复了在 UserToken 页面尝试访问或写入剪贴板时可能触发的异常。 - - **体验优化**: 提高了剪贴板交互的鲁棒性,确保在各种环境下都能正常工作。 - - **[核心优化] 优化 Token 排序性能并减少磁盘 I/O (PR #1627)**: - - **内存配额缓存**: 将模型配额信息引入内存,在 `get_token` 排序 hot path 中直接使用缓存。 - - **性能提升**: 彻底消除了排序过程中由于频繁读取磁盘文件(`std::fs::read_to_string`)导致的同步 I/O 阻塞,显著降低了高并发下的请求推迟与延迟。 - - **[国际化] 修复自定义标签功能缺失的翻译 (PR #1630)**: - - **翻译补全**: 补全了繁体中文等语种中“编辑标签”、“自定义标签占位符”以及“标签更新成功”提示的国际化翻译。 - - **[UI 修复] 修复 SmartWarmup 图标悬停效果缺失 (PR #1568)**: - - **增加交互**: 为未启用状态的图标添加了悬停变色效果,与其他设置项保持一致。 - - **[核心修复] 修复 OpenAI 协议下 Vertex AI 思考模型签名缺失问题 (Issue #1650)**: - - **Sentinel 注入**: 移除了对 Vertex AI (`projects/...`) 模型的哨兵签名注入限制。现在即使缺少真实签名,系统也会自动注入 `skip_thought_signature_validator`,从而避免 `Field required for thinking signature` 错误。 - * **v4.1.7 (2026-02-06)**: - - **[核心修复] 修复图像生成 API (429/500/503) 自动切换账号问题 (Issue #1622)**: - - **自动重试**: 为 `images/generations` 和 `images/edits` 引入了与 Chat API 一致的自动重试与账号轮换机制。 - - **体验一致性**: 确保在某个账号配额耗尽或服务不可用时,请求能自动故障转移到下一个可用账号,不再直接失败。 - - **[核心功能] 新增账户自定义标签支持 (PR #1620)**: - - **标签管理**: 支持为每个账户设置个性化标签,方便在多账户环境下快速识别。 - - **交互优化**: 账户列表和卡片视图均支持直接查看和内联编辑标签。 - - **多语言支持**: 完整适配中、英双语显示。 - - **[核心修复] 修复数据库为空时 `get_stats` 返回 NULL 导致崩溃的问题 (PR #1578)**: - - **NULL 值处理**: 在 SQL 查询中使用 `COALESCE(SUM(...), 0)` 确保在没有日志记录时依然返回数值,解决了 `rusqlite` 无法将 `NULL` 转换为 `u64` 的问题。 - - **性能保留**: 保留了本地分支中通过单次查询获取多项统计数据的性能优化逻辑。 - - - **[核心修复] Claude 403 错误处理与账号轮换优化 (PR #1616)**: - - **403 状态映射**: 将 403 (Forbidden) 错误映射为 503 (Service Unavailable),防止客户端(如 Claude Code)因检测到 403 而自动登出。 - - **自动禁用逻辑**: 检测到 403 错误时自动将账号标记为 `is_forbidden` 并从活跃池中移除,避免该账号在接下来的请求中被继续选中。 - - **临时风控识别**: 识别 `VALIDATION_REQUIRED` 错误,并对相关账号执行 10 分钟的临时阻断。 - - **轮换稳定性**: 修复了在账号额度耗尽 (QUOTA_EXHAUSTED) 时的过早返回问题,确保系统能正确尝试轮换到下一个可用账号。 - - **[核心功能] OpenCode CLI 配置同步集成 (PR #1614)**: - - **一键同步**: 自动生成 `~/.config/opencode/opencode.json`,支持 Anthropic 和 Google 双 Provider 自动配置。 - - **账号导出**: 可选同步账号列表至 `antigravity-accounts.json`,供 OpenCode 插件直接导入。 - - **备份与还原**: 同步前自动备份原有配置,支持一键还原。 - - **跨平台支持**: 统一适配 Windows、macOS 和 Linux 环境。 - - **体验优化**: 修复了 RPC 参数包装问题,补全了多语言翻译,并优化了配置文件不存在时的视图状态。 - - **[核心功能] 允许隐藏未使用的菜单项 (PR #1610)**: - - **可见性控制**: 在设置页面新增“菜单项显示设置”,允许用户自定义侧边栏显示的导航项。 - - **界面美化**: 为极简用户提供更清爽的界面,隐藏不常用的功能入口。 - - - **[核心修复] Gemini 原生协议图像生成完全修复 (Issue #1573, #1625)**: - - **400 错误修复**: 修复了 Gemini 原生协议生成图片时,因请求体 `contents` 数组缺失 `role: "user"` 字段导致的 `INVALID_ARGUMENT` 错误。 - - **参数透传支持**: 确保 `generationConfig.imageConfig` (如 `aspectRatio`, `imageSize`) 能正确透传给上游,不再被错误过滤。 - - **错误码优化**: 优化了图像生成服务的错误映射,确保 429/503 等状态码能正确触发客户端的重试机制。 - - **[核心增强] 自定义映射支持手动输入任意模型 ID**: - - **灵活输入**: 在自定义映射的目标模型选择器中新增手动输入功能,用户现在可以在下拉菜单底部直接输入任意模型 ID。 - - **未发布模型体验**: 支持体验 Antigravity 尚未正式发布的模型,例如 `claude-opus-4-6`。用户可以通过自定义映射将请求路由到这些实验性模型。 - - **重要提示**: 并非所有账号都支持调用未发布的模型。如果您的账号无权访问某个模型,请求可能会返回错误。建议先在少量请求中测试,确认账号权限后再大规模使用。 - - **快捷操作**: 支持 Enter 键快速提交自定义模型 ID,提升输入效率。 - * **v4.1.6 (2026-02-06)**: - - **[核心修复] 深度重构 Claude/Gemini 思考模型中断与工具循环自愈逻辑 (#1575)**: - - **思考异常恢复**: 引入了 `thinking_recovery` 机制。当检测到历史消息中包含陈旧思考块或陷入状态循环时,自动进行剥离与引导,提升了在复杂工具调用场景下的稳定性。 - - **彻底解决签名绑定错误**: 修正了误将缓存签名注入客户端自定义思考内容的逻辑。由于签名与文本强绑定,此举彻底解决了会话中断或重置后常见的 `Invalid signature` (HTTP 400) 报错。 - - **会话级完全隔离**: 删除了全局签名单例,确保所有思维签名严格在 Session 级别隔离,彻底杜绝了多账号、多会话并发时的签名污染。 - - **[修复] 彻底解决 Gemini 系列由于 `thinking_budget` 越界导致的 HTTP 400 错误 (#1592, #1602)**: - - **全协议路径硬截断**: 修复了 OpenAI 和 Claude 协议映射器在「自定义模式」下缺失限额保护的问题。现在无论选择何种模式(自动/自定义/透传),只要目标模型为 Gemini,后端都会强制执行 24576 的物理上限保护。 - - **自动适配与前端同步**: 重构了协议转换逻辑,使其基于最终映射的模型型号进行动态限额;同步更新了设置界面的提示文案,明确了 Gemini 协议的物理限制。 - - **[核心修复] Web Mode 登录验证修复 & 登出按钮 (PR #1603)**: - - **登录验证**: 修复了 Web 模式下登录验证逻辑的异常,确保用户身份验证的稳定性。 - - **登出功能**: 在界面中新增/修复了登出按钮,完善了 Web 模式下的账户管理闭环。 -
- 显示旧版本日志 (v4.1.5 及更早) - - * **v4.1.5 (2026-02-05)**: - - **[安全修复] 前端 API Key 存储迁移 (LocalStorage -> SessionStorage)**: - - **存储机制升级**: 将 Admin API Key 的存储位置从持久化的 `localStorage` 迁移至会话级的 `sessionStorage`,显著降低了在公共设备上的安全风险。 - - **自动无感迁移**: 实现了自动检测与迁移逻辑。系统会识别旧的 `localStorage` 密钥,将其自动转移到 `sessionStorage` 并彻底清除旧数据,确保现有用户无缝过渡且消除安全隐患。 - - **[核心修复] 修复 Docker 环境下添加账号失败问题 (Issue #1583)**: - - **账号上下文修复**: 修复了在添加新账号时 `account_id` 为 `None` 导致代理选择异常的问题。现在系统会为新账号生成临时 UUID,确保所有 OAuth 请求都有明确的账号上下文。 - - **日志增强**: 优化了 `refresh_access_token` 和 `get_effective_client` 的日志记录,提供更详细的代理选择信息,帮助诊断 Docker 环境下的网络问题。 - - **影响范围**: 修复了 Docker 部署环境下通过 Refresh Token 添加账号时可能出现的长时间挂起或失败问题。 - - **[核心修复] Web Mode 兼容性修复 & 403 账号轮换优化 (PR #1585)**: - - **Security API Web Mode 兼容性修复 (Issue: 400/422 错误)**: - - 为 `IpAccessLogQuery` 添加 `page` 和 `page_size` 的默认值,解决 `/api/security/logs` 返回 400 Bad Request 的问题 - - 移除 `AddBlacklistWrapper` 和 `AddWhitelistWrapper` 结构体,解决 `/api/security/blacklist` 和 `/api/security/whitelist` POST 返回 422 Unprocessable Content 的问题 - - 前端组件参数名修正:`ipPattern` → `ip_pattern`,确保与后端 API 参数一致 - - **403 账号轮换优化 (Issue: 403 后未正确跳过账号)**: - - 在 `token_manager.rs` 中添加 `set_forbidden` 方法,支持标记账号为禁用状态 - - 账号选择时检查 `quota.is_forbidden` 状态,自动跳过被禁用的账号 - - 403 时清除该账号的 sticky session 绑定,确保立即切换到其他可用账号 - - **Web Mode 请求处理优化**: - - `request.ts` 修复路径参数替换后从 body 中移除已使用的参数,避免重复传参 - - 支持 PATCH 方法的 body 处理,补全 HTTP 方法支持 - - 自动解包 `request` 字段,简化请求结构 - - **Debug Console Web Mode 支持**: - - `useDebugConsole.ts` 添加 `isTauri` 环境检测,区分 Tauri 和 Web 环境 - - Web 模式下使用 `request()` 替代 `invoke()`,确保 Web 环境下的正常调用 - - 添加轮询机制,Web 模式下每 2 秒自动刷新日志 - - **Docker 构建优化**: - - 添加 `--legacy-peer-deps` 标志,解决前端依赖冲突 - - 启用 BuildKit 缓存加速 Cargo 构建,提升构建速度 - - 补全 `@lobehub/icons` peer dependencies,修复前端依赖缺失导致的构建失败 - - **影响范围**: 此更新显著提升了 Docker/Web 模式下的稳定性和可用性,解决了 Security API 报错、403 账号轮换失效、Debug Console 不可用等问题,同时优化了 Docker 构建流程。 - - **[核心修复] 修复 Web/Docker 模式下调试控制台崩溃与日志同步问题 (Issue #1574)**: - - **Web 兼容性**: 修复了在非 Tauri 环境下直接调用原生 `invoke` API 导致的 `TypeError` 崩溃。现在通过兼容性请求层进行后端通信。 - - **指纹绑定修复**: 修复了生成指纹并绑定时,由于前后端参数结构不匹配导致的 `HTTP Error 422` 报错。通过调整后端包装类,使其兼容前端嵌套的 `profile` 对象。 - - **日志轮询机制**: 为 Web 模式引入了自动日志轮询功能(2秒/次),解决了浏览器端无法接收 Rust 后端事件推送导致调试日志为空的问题。 - - **[核心优化] 补全 Tauri 命令的 HTTP API 映射**: - - **全量适配**: 对齐了 30+ 个原生 Tauri 命令,为缓存管理(清理日志/应用缓存)、系统路径获取、代理池配置、用户令牌管理等核心功能补全了 HTTP 映射,确保 Web/Docker 版本的功能完整性。 - - **[安全修复] 任意文件读写漏洞加固**: - - **API 安全层**: 彻底移除了高危接口 `/api/system/save-file` 及其关联函数,并在数据库导入接口中增加了路径遍历防范 (`..` 校验)。 - - **Tauri 安全增强**: 为 `save_text_file` 和 `read_text_file` 命令引入了统一的路径校验器,严禁目录遍历并封堵了系统敏感目录的访问权限。 - * **v4.1.4 (2026-02-05)**: - - **[核心功能] 代理池持久化与账号筛选优化 (PR #1565)**: - - **持久化增强**: 修复了代理池绑定在反代服务重启或重载时无法正确恢复的问题,确保绑定关系严格持久化。 - - **智能筛选**: 优化了 `TokenManager` 的账号获取逻辑,在全量加载、同步以及调度路径中增加了对 `disabled` 和 `proxy_disabled` 状态的深度校验,彻底杜绝已禁用账号被误选的问题。 - - **验证阻止支持**: 引入了 `validation_blocked` 字段体系,专门处理 Google 的 `VALIDATION_REQUIRED` (403 临时风控) 场景,实现了基于截止时间的智能自动绕过。 - - **状态清理加固**: 账号失效时同步清理内存令牌、限流记录、会话绑定及优先账号标志,保证内部状态机的一致性。 - - **[核心修复] 修复 Web/Docker 模式下的关键兼容性问题 (Issue #1574)**: - - **调试模式修复**: 修正了前端调试控制台 URL 映射错误(移除多余的 `/proxy` 路径),解决了 Web 模式下调试模式无法开启的问题。 - - **指纹绑定修复**: 为 `admin_bind_device_profile_with_profile` 接口增加了 `BindDeviceProfileWrapper` 结构,修复了前端发送嵌套参数导致的 HTTP 422 错误。 - - **向后兼容性**: 使用 `serde alias` 功能在 API 层同时支持 camelCase(前端)和 snake_case(后端文件),确保旧账号文件正常加载。 - - **[代码优化] 简化 API 处理结构**: - - 移除了多个管理 API 路由(如 IP 黑白名单管理、安全设置更新等)中的冗余包装层 (`Wrapper`),直接解构业务模型,提升了代码的简洁性与开发效率。 - - **[核心修复] 解决 OpenCode 调用 Thinking 模型中断问题 (Issue #1575)**: - - **finish_reason 强制修正**: 修复了工具调用时 `finish_reason` 被错误设置为 `stop` 导致 OpenAI 客户端提前终止对话的问题。现在系统会强制将有工具调用的响应 `finish_reason` 设置为 `tool_calls`,确保工具循环正常运行。 - - **工具参数标准化**: 实现了 shell 工具参数名称的自动标准化,将 Gemini 可能生成的 `cmd`/`code`/`script` 等非标准参数名统一转换为 `command`,提升了工具调用的兼容性。 - - **影响范围**: 修复了 OpenAI 协议下 Thinking 模型(如 `claude-sonnet-4-5-thinking`)的工具调用流程,解决了 OpenCode 等客户端的中断问题。 - - * **v4.1.3 (2026-02-05)**: - - **[核心修复] 解决 Web/Docker 模式下安全配置与 IP 管理失效问题 (Issue #1560)**: - - **协议对齐**: 修复了后端 Axum 接口无法解析前端 `invoke` 封装的嵌套参数格式(如 `{"config": ...}`)的问题,确保安全配置能正确持久化。 - - **参数规范化**: 为 IP 管理相关接口添加了 `camelCase` 重命名支持,解决了 Web 端 Query 参数大小写不匹配导致的添加失败与删除失效。 - - **[核心修复] 恢复 Gemini Pro 思考块输出 (Issue #1557)**: - - **跨协议对齐**: 修复了自 v4.1.0 以来 `gemini-3-pro` 等模型在 OpenAI、Claude 和 Gemini 原生协议下思考块缺失的问题。 - - **智能注入逻辑**: 实现了 `thinkingConfig` 的自动注入与默认开启机制,确保即使客户端未发送配置,模型也能正确激活思考能力。 - - **鲁棒性增强**: 优化了 `wrapper.rs` 内部类型处理,解析并解决了高并发场景下的配置冲突。 - * **v4.1.2 (2026-02-05)**: - - **[核心功能] 多协议客户端适配器 (ClientAdapter Framework) (Issue #1522)**: - - **架构重构**: 引入 `ClientAdapter` 框架并应用 `Arc` 引用计数,实现了 Handler 层与下游客户端逻辑的完全解耦,支持更安全的跨线程共享。 - - **全协议兼容**: 针对 `opencode` 等第三方客户端,实现了 **4 种协议**(Claude/OpenAI/Gemini/OA-Compatible)的无缝接入,彻底解决了 `AI_TypeValidationError` 报错。 - - **智能策略**: 实现了 FIFO 签名缓存策略与 `let_it_crash` 快速失败机制,显著提升了高并发场景下的稳定性和错误反馈速度。 - - **标准化错误响应**: 强制统一所有协议的错误返回格式(流式 SSE `event: error` / 非流式 JSON),确保客户端能正确解析上游异常。 - - **[核心修复] 统一账号禁用状态检查逻辑 (Issue #1512)**: - - **逻辑对齐**: 修复了批量刷新配额及自动预热逻辑中遗漏手动禁用状态 (`proxy_disabled`) 的问题。 - - **后台降噪**: 确保标记为“禁用”或“禁用代理”的账号不再触发任何后台网络请求,提升了系统的隐私性与资源效率。 - - **[核心修复] 解决 OpenAI 协议路径下 Invalid signature 导致的 400 错误 (Issue #1506)**: - - **Session 级签名隔离**: 引入了 `SignatureCache` 机制,通过 `session_id` 物理隔离不同会话的思维签名存储,彻底杜绝多轮对话或并发请求导致的签名污染。 - - **鲁棒性增强**: 增加了对思维链占位符(如 `[undefined]`)的识别与自动清洗逻辑,提升了对不同客户端(如 Cherry Studio)的兼容性。 - - **全路径透传**: 重构了请求转换与流式处理链路,确保 Session 上下文在非流式和流式请求中均能精准传导。 - - **[UI 增强] 新增模型图标支持与自动排序功能 (PR #1535)**: - - **视觉呈现**: 引入 `@lobehub/icons` 图标库,在账号卡片、表格及详情页中展示不同模型的 brand 图标,视觉体验更佳。 - - **智能排序**: 实现了基于权重的模型自动排序逻辑(系列 > 级别 > 后缀),优先展示最常用的高级模型(如 Gemini 3 Pro)。 - - **配置中心化**: 构建了统一的模型元数据配置系统,将模型标签、短名称、图标与权重解耦,提升系统扩展性。 - - **国际化同步**: 同步补全了 13 种常用语言的模型显示名称。 - - **[核心修复] 增强账号禁用状态与磁盘状态实时校验 (PR #1546)**: - - **磁盘深度校验**: 引入了 `get_account_state_on_disk` 机制,在获取 Token 的关键路径增加磁盘状态二次确认,彻底解决内存缓存延迟导致的禁用账号误选问题。 - - **固定账号智能同步**: 优化了 `toggle_proxy_status` 指令,禁用账号时会自动检查并关闭对应的固定账号模式,并立即触发代理池重载。 - - **授权失效自愈**: 当后端检测到 `invalid_grant` 错误并自动禁用账号时,现在会物理清理内存中的 Token、限流记录和会话绑定,确保故障账号即刻下线。 - - **全链路过滤适配**: 补全了预热逻辑 (`Warmup`) 与定时调度器 (`Scheduler`) 的禁用状态检查,大幅减少无效的后台网络请求。 - - **[核心优化] 代理池健康检查并发化 (PR #1547)**: - - **性能提升**: 引入了基于 `futures` 流的并发执行机制,将顺序检查重构为并发处理(并发上限 20)。 - - **效率增强**: 显著缩短了大型代理池的健康检查总时长,提升了系统对代理状态变更的响应速度。 - - **[核心修复] 解决 Docker/HTTP 环境下 crypto.randomUUID 兼容性问题 (Issue #1548)**: - - **问题修复**: 修复了在非安全上下文(如 HTTP 或部分 Docker 环境)中,因浏览器禁用 `crypto.randomUUID` API 导致的应用崩溃("Unexpected Application Error")及批量导入失败问题。 - - **兼容性增强**: 引入了全平台兼容的 UUID 生成回退机制,确保在任何部署环境下 ID 生成的稳定性。 - * **v4.1.1 (2026-02-04)**: - - **[核心修复] 解决 User Tokens 页面在 Web/Docker 环境下加载失败问题 (Issue #1525)**: - - **API 同步**: 补全了前端 `request.ts` 的命令映射,并新增对 `PATCH` 方法的支持,解决了 Web 端因映射缺失导致的 API 调用错误。 - - **后端路由补全**: 在 Axum 管理服务器中新增了 User Token 的全量管理接口(List/Create/Update/Renew/Delete),确保 Headless 模式功能完整。 - - **[核心优化] 数据库迁移增强与幂等性改进**: - - **自动列迁移**: 完善了 `UserToken` 数据库初始化逻辑,支持从旧版本自动通过 `ALTER TABLE` 补全缺失列(如 `expires_type`, `max_ips`, `curfew_*` 等),极大提升了版本升级的稳定性。 - - **[Docker 优化] 新增 ABV_DATA_DIR 环境变量支持**: - - **灵活挂载**: 允许用户通过环境变量显式指定数据存储目录。现在 Docker 用户可以更方便地挂载外部卷至自定义路径(如 `-e ABV_DATA_DIR=/app/data`),解决了默认隐藏目录权限及可见性问题。 - - **[核心功能] 更新检查器增强 (Update Checker 2.0) (PR #1494)**: - - **代理支持**: 更新检查器现在完全遵循全局上游代理配置,解决了在受限网络环境下无法获取更新的问题。 - - **多级降级策略**: 实现了 `GitHub API -> GitHub Raw -> jsDelivr` 的三层回退机制,极大提升了版本检测的成功率。 - - **来源可观测**: 更新提示中现在会显示检测源信息,方便排查连接问题。 - - **[核心优化] Antigravity 数据库格式兼容性改进 (>= 1.16.5)**: - - **智能版本检测**: 新增跨平台版本检测模块,支持自动识别 Antigravity 客户端版本(macOS/Windows/Linux)。 - - **新旧格式适配**: 适配了 1.16.5+ 版本的 `antigravityUnifiedStateSync.oauthToken` 新格式,并保持对旧版格式的向下兼容。 - - **注入策略增强**: 实现基于版本的智能注入策略,并在检测失败时提供双重格式注入的容错机制,确保账号切换成功。 - - **[核心修复] 解决 react-router SSR XSS 漏洞 (CVE-2026-21884) (PR #1500)**: - - **安全修复**: 升级 `react-router` 依赖至安全版本,修复了 `ScrollRestoration` 组件在服务端渲染 (SSR) 时可能造成的跨站脚本攻击 (XSS) 风险。 - - **[国际化] 完善日语翻译支持 (PR #1524)**: - - **改进**: 补全了代理池、流错误消息、User-Agent 等重要模块的日语本地化。 - * **v4.1.0 (2026-02-04)**: - - **[重大更新] 代理池 2.0 (Proxy Pool) 完全体与稳定性修复**: - - **账号级专属 IP 隔离**: 实现账号与代理的强绑定逻辑。一旦账号绑定专属代理,该 IP 将自动从公共池隔离,杜绝跨账号关联风险。 - - **协议自动补全与兼容性**: 后端支持自动识别简写输入(如 `ip:port`),自动补全 `http://` 方案。 - - **智能健康检查加固**: 引入浏览器 User-Agent 伪装,解决 `google.com` 拦截问题;更换保底检查 URL 至 `cloudflare.com`。 - - **响应式状态同步**: 修复“先睡眠后检查”逻辑,实现启动即更新状态,消除 UI 显示超时的同步延迟。 - - **持久化 Bug 修复**: 彻底解决在高频率轮询下,后端旧状态可能回滚前端新增代理的竞态问题。 - - **代理池 2.0 运行机制解析**: - - **场景 1:账号全链路锁定** — 系统识别到账号 A 与 Node-01 的绑定关系后,其 Token 刷新、额度同步、AI 推理将全量强制走 Node-01。Google 侧始终捕获到该账号在单一稳定 IP 上操作。 - - **场景 2:公用池自动隔离** — 账号 B 无绑定。系统在扫描代理池时,会自动发现 Node-01 已被 A 专属占用并将其剔除,仅从剩余节点中轮询。确保不同账号 IP 绝不混用,零关联风险。 - - **场景 3:故障自愈与保底** — 若 Node-01 宕机且开启了“故障重试”,账号 A 会临时借用公共池节点完成 Token 刷新等紧急任务,并记录日志,确保服务不中断。 - - **[新功能] UserToken 页面导航与监控增强 (PR #1475)**: - - **页面导航**: 新增 UserToken 独立管理页面,支持更细粒度的用户令牌管理。 - - **监控增强**: 完善了系统监控和路由功能的集成,提升了系统的可观测性。 - - **[核心修复] Warmup 接口字段丢失修复**: - - **编译修复**: 修复了 `ProxyRequestLog` 初始化时缺失 `username` 字段导致的编译错误。 - - **[核心修复] Docker Warmup 401/502 错误修复 (PR #1479)**: - - **网络优化**: 在 Docker 环境下的 Warmup 请求中,使用了带 `.no_proxy()` 的客户端,防止 localhost 请求被错误路由到外部代理导致 502/401 错误。 - - **鉴权变更**: 豁免了 `/internal/*` 路径的鉴权,确保内部预热请求不会被拦截。 - - **[核心修复] Docker/Headless 环境调试与绑定问题修复**: - - **调试控制台**: 修复了 Docker 模式下日志模块未初始化的问题,并新增 HTTP API 映射,支持 Web 前端获取实时日志。 - - **指纹绑定**: 优化了设备指纹绑定逻辑,确保其在 Docker 容器环境下的兼容性并支持通过 API 完整调用。 - - **[核心修复] 账号删除缓存同步修复 (Issue #1477)**: - - **同步机制**: 引入了全局删除信号同步队列,确保账号在磁盘删除后即刻从内存缓存中剔除。 - - **彻底清理**: TokenManager 现在会同步清理已删除账号的令牌、健康分数、限流记录以及会话绑定,彻底解决“已删除账号仍被调度”的问题。 - - **[UI 优化] 更新通知本地化 (PR #1484)**: - - **国际化适配**: 彻底移除了更新提示框中的硬编码字符串,实现了对所有 12 种语言的完整支持。 - - **[UI 优化] 导航栏重构与响应式适配 (PR #1493)**: - - **组件解构**: 将单体 Navbar 拆分为更细粒度的模块化组件,提升代码可维护性。 - - **响应式增强**: 优化了布局断点及“刷新配额”按钮的响应式行为。 - * **v4.0.15 (2026-02-03)**: - - **[核心优化] 预热功能增强与误报修复 (PR #1466)**: - - **模式优化**: 移除硬编码模型白名单,支持对所有达到 100% 配额的模型自动触发预热。 - - **准确性修复**: 修复了预热状态的误报问题,确保仅在预热真正成功时记录历史。 - - **功能扩展**: 优化了预热请求的流量日志记录,并跳过不支持预热的 2.5 系列模型。 - - **[核心优化] 思考预算 (Thinking Budget) 全面国际化与优化**: - - **多语言适配**: 补全并优化了中、英、日、韩、俄、西、繁体、阿等多国语言的翻译,确保全球用户体验一致。 - - **UI 细节增强**: 优化了设置项的提示语(Auto Hint / Passthrough Warning),帮助用户更准确地配置模型思考深度。 - * **v4.0.14 (2026-02-02)**: - - **[核心修复] 解决 Web/Docker 部署下 API Key 随机变更问题 (Issue #1460)**: - - **问题修复**: 修复了在没有配置文件的情况下,每次刷新页面都会重新生成 API Key 的 Bug。 - - **逻辑优化**: 优化了配置加载流程,确保首次生成的随机 Key 被正确持久化;同时也确保了 Headless 模式下环境变量(如 `ABV_API_KEY`)的覆盖能够被前端正确获取。 - - **[核心功能] 可配置思考预算 (Thinking Budget) (PR #1456)**: - - **预算控制**: 在系统设置中新增了“思考预算”配置项。 - - **智能适配**: 支持为 Claude 3.7+ 和 Gemini 2.0 Flash Thinking 等模型自定义最大思考 token 限制。 - - **默认优化**: 默认值设置为智能适配模式,确保在大多数场景下不仅能获得完整思考过程,又能避免触发上游 budget 限制。 - * **v4.0.13 (2026-02-02)**: - - **[核心优化] 负载均衡算法升级 (P2C Algorithm) (PR #1433)**: - - **算法升级**: 将原有的 Round-Robin (轮询) 算法升级为 P2C (Power of Two Choices) 负载均衡算法。 - - **性能提升**: 在高并发场景下显著减少了请求等待时间,并优化了后端实例的负载分布,避免了单点过载。 - - **[UI 升级] 响应式导航栏与布局优化 (Responsive Navbar) (PR #1429)**: - - **移动端适配**: 全新设计的响应式导航栏,完美适配移动设备与小屏幕窗口。 - - **视觉增强**: 为导航项添加了直观的图标,提升了整体视觉体验与操作便捷性。 - - **[新功能] 账号配额可视化增强 (Show All Quotas) (PR #1429)**: - - **显示所有配额**: 在账号列表页新增“显示所有配额”开关。开启后可一览 Ultra/Pro/Free/Image 等所有维度的实时配额信息,不再仅显示首要配额。 - - **[国际化] 全面多语言支持完善 (Full i18n Update)**: - - **覆盖率提升**: 补全了繁体中文、日语、韩语、西班牙语、阿拉伯语等 10 种语言的缺失翻译键值。 - - **细节优化**: 修复了“显示所有配额”及 OAuth 授权流程中的提示语翻译缺失问题。 - - **[国际化] 后台任务翻译补全 (Translate Background Tasks) (PR #1421)**: - - **翻译修复**: 修复了后台任务(如标题生成)的相关文本缺少翻译的问题,现在支持所有语言的本地化显示。 - - **归因**: 修复了合并代码时引入的 `ref` 冲突导致移动端/桌面端点击判定异常。 - - **结果**: 语言切换菜单现在可以正常打开和交互。 - - **[Docker/Web 修复] Web 端支持 IP 管理 (IP Security for Web)**: - - **功能补全**: 修复了在 Docker 或 Web 模式下,IP 安全管理功能(日志、黑白名单)因后端路由缺失而无法使用的问题。 - - **API 实现**: 实现了完整的 RESTful 管理接口,确保 Web 前端能正常调用底层安全模块。 - - **体验强化**: 优化了删除操作的参数传递逻辑,解决了部分浏览器下删除黑白名单失灵的问题。 - * **v4.0.12 (2026-02-01)**: - - **[代码重构] 连接器服务优化 (Refactor Connector Service)**: - - **深度优化**: 重写了连接器服务 (`connector.rs`) 的核心逻辑,消除了历史遗留的低效代码。 - - **性能提升**: 优化了连接建立与处理流程,提升了系统的整体稳定性与响应速度。 - * **v4.0.11 (2026-01-31)**: - - **[核心修复] 调整 API 端点顺序与自动阻断 (Fix 403 VALIDATION_REQUIRED)**: - - **端点顺序优化**: 将 Google API 的请求顺序调整为 `Sandbox -> Daily -> Prod`。优先使用宽松环境,从源头减少 403 错误的发生。 - - **智能阻断机制**: 当检测到 `VALIDATION_REQUIRED` (403) 错误时,系统会自动将该账号标记为“临时阻断”状态并持续 10 分钟。期间请求会自动跳过该账号,避免无效重试导致账号被进一步风控。 - - **自动恢复**: 阻断期过后,系统会自动尝试恢复该账号的使用。 - - **[核心修复] 账号状态热重载 (Account Hot-Reload)**: - - **架构统一**: 消除了系统中并存的多个 `TokenManager` 实例,实现了管理后台与反代服务共享单例账号管理器。 - - **实时生效**: 修复了手动启用/禁用账号、账号重排序及批量操作后需要重启应用才能生效的问题。现在所有账号变更都会立即同步至内存账号池。 - - **[核心修复] 配额保护逻辑优化 (PR #1344 补丁)**: - - 进一步优化了配额保护逻辑中对“已禁用”状态与“配额保护”状态的区分逻辑,确保日志记录准确且状态同步实时。 - - **[核心修复] 恢复健康检查接口 (PR #1364)**: - - **路由恢复**: 修复了在 4.0.0 架构迁移中遗失的 `/health` 和 `/healthz` 路由。 - - **响应增强**: 接口现在会返回包含 `"status": "ok"` 和当前应用版本号的 JSON,方便监控系统进行版本匹配和存活检查。 - - **[核心修复] 修复 Gemini Flash 模型思考预算超限 (Fix PR #1355)**: - - **自动限额**: 修复了在 Gemini Flash 思考模型(如 `gemini-2.0-flash-thinking`)中,默认或上游传入的 `thinking_budget` (例如 32k) 超过模型上限 (24k) 导致 API 报错 `400 Bad Request` 的问题。 - - **多协议覆盖**: 此防护已扩展至 **OpenAI、Claude 和原生 Gemini 协议**,全方位拦截不安全的预算配置。 - - **智能截断**: 系统现在会自动检测 Flash 系列模型,并强制将思考预预算限制在安全范围内 (**24,576**),确保请求始终成功,无需用户手动调整客户端配置。 - - **[核心功能] IP 安全与风控系统 (IP Security & Management) (PR #1369 by @大黄)**: - - **可视化工单管理**: 全新的“安全监控”模块,支持图形化管理 IP 黑名单与白名单。 - - **智能封禁策略**: 实现了基于 CIDR 的网段封禁、自动释放时间设置及封禁原因备注功能。 - - **实时访问日志**: 集成了 IP 维度的实时访问日志审计,支持按 IP、时间范围筛选,方便快速定位异常流量。 - - **[UI 优化] 极致的视觉体验**: - - **弹窗美化**: 全面升级了 IP 安全模块的所有弹窗按钮样式,采用实心色块与阴影设计,操作引导更清晰。 - - **布局即兴**: 修复了安全配置页面的滚动条异常与布局错位,优化了标签页切换体验。 - - **[核心功能] 调试控制台 (Debug Console) (PR #1385)**: - - **实时日志流**: 引入了全功能的调试控制台,支持实时捕获并展示后端业务日志。 - - **过滤与搜索**: 支持按日志级别(Info, Debug, Warn, Error)过滤及关键词全局搜索。 - - **交互优化**: 支持一键清空日志、自动滚动开关,并完整适配深色/浅色主题。 - - **后端桥接**: 实现了高性能的日志桥接器,确保日志捕获不影响反代性能。 - * **v4.0.9 (2026-01-30)**: - - **[核心功能] User-Agent 自定义与版本欺骗 (PR #1325)**: - - **动态覆盖**: 支持在“服务配置”中自定义上游请求的 `User-Agent` 头部。这允许用户模拟任意客户端版本(如 Cheat 模式),有效绕过部分地区的版本封锁或风控限制。 - - **智能回退**: 实现了“远程抓取 -> Cargo 版本 -> 硬编码”的三级版本号获取机制。当主版本 API 不可用时,系统会自动解析官网 Changelog 页面获取最新版本号,确保 UA 始终伪装成最新版客户端。 - - **热更新支持**: 修改 UA 配置后即刻生效,无需重启服务。 - - **[核心修复] 解决配额保护状态同步缺陷 (Issue #1344)**: - - **状态实时同步**: 修复了 `check_and_protect_quota()` 函数在处理禁用账号时提前退出的逻辑缺陷。现在即便账号被禁用,系统仍会扫描并实时更新其 `protected_models`(模型级保护列表),确保配额不足的账号在重新启用后不会绕过保护机制继续被使用。 - - **日志路径分离**: 将手动禁用检查从配额保护函数中剥离至调用方,根据不同的跳过原因(手动禁用/配额保护)记录准确的日志,消除用户困惑。 - - **[核心功能] 缓存管理与一键清理 (PR #1346)**: - - **后端集成**: 新增了 `src-tauri/src/modules/cache.rs` 模块,用于计算和管理应用运行期间产生的各类临时文件分布(如翻译缓存、日志指纹等)。 - - **UI 实现**: 在“系统设置”页面新增了“清理缓存”功能。用户可以实时查看缓存占用的空间大小,并支持一键清理,有效解决长期使用后的磁盘占用问题。 - - **[国际化] 新增语言支持 (PR #1346)**: - - 新增了 **西班牙语 (es)** 和 **马来语 (my)** 的完整翻译支持,进一步扩大了应用的全球适用范围。 - - **[国际化] 全语言覆盖**: - - 为新功能补全了 En, Zh, Zh-TW, Ar, Ja, Ko, Pt, Ru, Tr, Vi 等 10 种语言的完整翻译支持。 - - **[国际化] 完善 UI 字符串本地化 (PR #1350)**: - - **全面覆盖**: 补充了 UI 中剩余的硬编码字符串及未翻译项,实现了界面字符串的完全本地化。 - - **清理冗余**: 删除了代码中所有的英文回退 (English fallbacks),强制所有组件通过 i18n 键调用语言包。 - - **语言增强**: 显著提升了日语 (ja) 等语言的翻译准确度,并确保了新 UI 组件在多语言环境下的显示一致性。 - * **v4.0.8 (2026-01-30)**: - - **[核心功能] 记忆窗口位置与大小 (PR #1322)**: 自动恢复上次关闭时的窗口坐标与尺寸,提升使用体验。 - - **[核心修复] 优雅关闭 Admin Server (PR #1323)**: 修复了 Windows 环境下退出后再次启动时,端口 8045 占用导致的绑定失败问题。 - - **[核心功能] 实现全链路调试日志功能 (PR #1308)**: - - **后端集成**: 引入了 `debug_logger.rs`,支持捕获并记录 OpenAI、Claude 及 Gemini 处理器的原始请求、转换后报文及完整流式响应。 - - **动态配置**: 支持热加载日志配置,无需重启服务即可启用/禁用或修改输出目录。 - - **前端交互**: 在“高级设置”中新增“调试日志”开关及自定义输出目录选择器,方便开发者排查协议转换与上游通信问题。 - - **[UI 优化] 优化图表工具提示 (Tooltip) 浮动显示逻辑 (Issue #1263, PR #1307)**: - - **溢出防御**: 优化了 `TokenStats.tsx` 中的 Tooltip 定位算法,确保在小窗口或高缩放比例下,悬浮提示信息始终在可视区域内显示,防止被窗口边界遮挡。 - - **[核心优化] 鲁棒性增强:动态 User-Agent 版本获取及多级回退 (PR #1316)**: - - **动态版本获取**: 支持从远程端点实时拉取版本号,确保 UA 信息的实时性与准确性。 - - **稳延回退链**: 引入“远程端点 -> Cargo.toml -> 硬编码”的三级版本回退机制,极大提升了初始化阶段的鲁棒性。 - - **预编译优化**: 使用 `LazyLock` 预编译正则表达式解析版本号,提升运行效率并降低内存抖动。 - - **可观测性提升**: 添加了结构化日志记录及 VersionSource 枚举,方便开发者追踪版本来源及潜在的获取故障。 - - **[核心修复] 解决 Gemini CLI "Response stopped due to malformed function call." 错误 (PR #1312)**: - - **参数字段对齐**: 将工具声明中的 `parametersJsonSchema` 重命名为 `parameters`,确保与 Gemini 最新 API 规范完全对齐。 - - **参数对齐引擎增强**: 移除了多余的参数包装层,使参数传递更加透明和直接。 - - **容错校验**: 增强了对工具调用响应的鲁棒性,有效防止因参数结构不匹配导致的输出中断。 - - **[核心修复] 解决 Docker/Headless 模式下端口显示为 'undefined' 的问题 (Issue #1305)**: 修复了管理 API `/api/proxy/status` 缺少 `port` 字段且 `base_url` 构造错误的问题,确保前端能正确显示监听地址。 - - **[核心修复] 解决 Docker/Headless 模式下 Web 密码绕过问题 (Issue #1309)**: - - **默认鉴权增强**: 将 `auth_mode` 默认值改为 `auto`。在 Docker 或允许局域网访问的环境下,系统现在会自动激活身份验证,确保 `WEB_PASSWORD` 生效。 - - **环境变量支持**: 新增 `ABV_AUTH_MODE` 和 `AUTH_MODE` 环境变量,允许用户在启动时显式覆盖鉴权模式(支持 `off`, `strict`, `all_except_health`, `auto`)。 - * **v4.0.7 (2026-01-29)**: - - **[性能优化] 优化 Docker 构建流程 (Fix Issue #1271)**: - - **原生架构构建**: 将 AMD64 和 ARM64 的构建任务拆分为独立 Job 并行执行,并移除 QEMU 模拟层,转而使用各架构原生的 GitHub Runner。此举将跨平台构建耗时从 3 小时大幅缩减至 10 分钟以内。 - - - **[性能优化] 解决 Docker 版本在大数据量下的卡顿与崩溃问题 (Fix Issue #1269)**: - - **异步数据库操作**: 将流量日志、Token 统计等所有耗时数据库查询迁移至后台阻塞线程池 (`spawn_blocking`),彻底解决了在查看大型日志文件(800MB+)时可能导致的 UI 卡死及反代服务不可用的问题。 - - **监控逻辑平滑化**: 优化了监控状态切换逻辑,移除冗余的重复启动记录,提升了 Docker 环境下的运行稳定性。 - - **[核心修复] 解决 OpenAI 协议 400 Invalid Argument 错误 (Fix Issue #1267)**: - - **移除激进默认值**: 回滚了 v4.0.6 中为 OpenAI/Claude 协议引入的默认 `maxOutputTokens: 81920` 设置。该值超过了许多旧模型(如 `gemini-3-pro-preview` 或原生 Claude 3.5)的硬性限制,导致请求被直接拒绝。 - - **智能思维配置**: 优化了思维模型检测逻辑,仅对以 `-thinking` 结尾的模型自动注入 `thinkingConfig`,避免了对不支持该参数的标准模型(如 `gemini-3-pro`)产生副作用。 - - **[兼容性修复] 修复 OpenAI Codex (v0.92.0) 调用错误 (Fix Issue #1278)**: - - **字段清洗**: 自动过滤 Codex 客户端在工具定义中注入的非标准 `external_web_access` 字段,消除了 Gemini API 返回的 400 Invalid Argument 错误. - - **容错增强**: 增加了对工具 `name` 字段的强制校验。当客户端发送缺失名称的无效工具定义时,代理层现在会自动跳过并记录警告,而不是直接让请求失败。 - - **[核心功能] 自适应熔断器 (Adaptive Circuit Breaker)**: - - **模型级隔离**: 实现了基于 `account_id:model` 的复合 Key 限流追踪,确保单一模型的配额耗尽不会导致整个账号被锁定。 - - **动态退避策略**: 支持用户自定义 `[60, 300, 1800, 7200]` 等多级退避阶梯,自动根据失败次数增加锁定时间。 - - **配置热更新**: 配合 `TokenManager` 内存缓存,实现配置修改后反代服务即刻生效,无需重启。 - - **管理 UI 集成**: 在 API 反代页面新增了完整的控制面板,支持一键开关及手动清除限流记录。 - - **[核心优化] 完善日志清理与冗余压制 (Fix Issue #1280)**: - - **自动空间回收**: 引入基于体积的清理机制,当日志目录超过 1GB 时自动触发清理,并将占用降至 512MB 以内。相比原有的按天清理,能从根本上防止因日志爆发导致的磁盘撑爆问题。 - - **高频日志瘦身**: 将 OpenAI 处理器报文详情、TokenManager 账号池轮询等高频产生的日志级别从 INFO 降级为 DEBUG。现在 INFO 级别仅保留简洁的请求摘要。 - * **v4.0.6 (2026-01-28)**: - - **[核心修复] 彻底解决 Google OAuth "Account already exists" 错误**: - - **持久化升级**: 将授权成功后的保存逻辑从“仅新增”升级为 `upsert` (更新或新增) 模式。现在重新授权已存在的账号会平滑更新其 Token 和项目信息,不再弹出报错。 - - **[核心修复] 修复 Docker/Web 模式下手动回填授权码失效问题**: - - **Flow 状态预初始化**: 在 Web 模式生成授权链接时,后端会同步初始化 OAuth Flow 状态。这确保了在 Docker 等无法自动跳转的环境下,手动复制回填授权码或 URL 能够被后端正确识别并处理。 - - **[体验优化] 统一 Web 与桌面端的 OAuth 持久化路径**: 重构了 `TokenManager`,确保所有平台共用同一套健壮的账号核验与存储逻辑。 - - **[性能优化] 优化限流恢复机制 (PR #1247)**: - - **自动清理频率**: 将限流记录的后台自动清理间隔从 60 秒缩短至 15 秒,大幅提升了触发 429 或 503 错误后的业务恢复速度。 - - **智能同步清理**: 优化了单个或全部账号刷新逻辑,确保刷新账号的同时即刻清除本地限流锁定,使最新配额能立即投入使用。 - - **渐进式容量退避**: 针对 `ModelCapacityExhausted` 错误(如 503),将原有的固定 15 秒重试等待优化为 `[5s, 10s, 15s]` 阶梯式策略,显著减少了偶发性容量波动的等待时间。 - - **[核心修复] 窗口标题栏深色模式适配 (PR #1253)**: 修复了在系统切换为深色模式时,应用标题栏(Titlebar)未能同步切换配色,导致视觉不统一的问题。 - - **[核心修复] 提升 Opus 4.5 默认输出上限 (Fix Issue #1244)**: - - **突破限制**: 将 Claude 和 OpenAI 协议的默认 `max_tokens` 从 16k 提升至 **81,920** (80k)。 - - **解决截断**: 彻底解决了 Opus 4.5 等模型在开启思维模式时,因默认 Budget 限制导致输出被锁定在 48k 左右的截断问题。现在无需任何配置即可享受完整的长文本输出能力。 - - **[核心修复] 修复账号删除后的幽灵数据问题 (Ghost Account Fix)**: - - **同步重载**: 修复了账号文件被删除后,反代服务的内存缓存未同步更新,导致已删账号仍参与轮询的严重 Bug。 - - **即时生效**: 现在单删或批量删除账号后,会强制触发反代服务重载,确保内存中的账号列表与磁盘实时一致。 - - **[核心修复] Cloudflared 隧道启动问题修复 (Fix PR #1238)**: - - **启动崩溃修复**: 移除了不支持的命令行参数 (`--no-autoupdate` / `--loglevel`),解决了 cloudflared 进程启动即退出的问题。 - - **URL 解析修正**: 修正了命名隧道 URL 提取时的字符串偏移量错误,确保生成的访问链接格式正确。 - - **Windows 体验优化**: 为 Windows 平台添加了 `DETACHED_PROCESS` 标志,实现了隧道的完全静默后台运行,消除了弹窗干扰。 - * **v4.0.5 (2026-01-28)**: - - **[核心修复] 彻底解决 Docker/Web 模式 Google OAuth 400 错误 (Google OAuth Fix)**: - - **协议对齐**: 强制所有模式(包括 Docker/Web)使用 `localhost` 作为 OAuth 重定向 URI,绕过了 Google 对私网 IP 和非 HTTPS 环境的拦截策略。 - - **流程优化**: 配合已有的“手动授权码回填”功能,确保即使在远程服务器部署环境下,用户也能顺利完成 Google 账号的授权与添加。 - - **[功能增强] 新增阿拉伯语支持与 RTL 布局适配 (PR #1220)**: - - **国际化拓展**: 新增完整的阿拉伯语 (`ar`) 翻译支持。 - - **RTL 布局**: 实现了自动检测并适配从右向左 (Right-to-Left) 的 UI 布局。 - - **排版优化**: 引入了 Effra 字体家族,显著提升了阿拉伯语文本的可读性与美观度。 - - **[功能增强] 手动清除限流记录 (Clear Rate Limit Records)**: - - **管理 UI 集成**: 在“代理设置 -> 账号轮换与会话调度”区域新增了“清除限流记录”按钮,支持桌面端与 Web 端调用,允许用户手动清除所有账号的本地限流锁(429/503 记录)。 - - **账号列表联动**: 实现了配额与限流的智能同步。现在刷新账号额度(单个或全部)时,会自动清除本地限流状态,确保最新的额度信息能立即生效。 - - **后端核心逻辑**: 在 `RateLimitTracker` 和 `TokenManager` 中底层实现了手动与自动触发的清除逻辑,确保高并发下的状态一致性。 - - **API 支持**: 新增了对应的 Tauri 命令与 Admin API (`DELETE /api/proxy/rate-limits`),方便开发者进行编程化管理与集成。 - - **强制重试**: 配合清除操作,可强制下一次请求忽略之前的退避时间,直接尝试连接上游,帮助在网络恢复后快速恢复业务。 - * **v4.0.4 (2026-01-27)**: - - **[功能增强] 深度集成 Gemini 图像生成与多协议支持 (PR #1203)**: - - **OpenAI 兼容性增强**: 支持通过标准 OpenAI Images API (`/v1/images/generate`) 调用 Gemini 3 图像模型,支持 `size`、`quality` 等参数。 - - **多协议集成**: 增强了 Claude 和 OpenAI Chat 接口,支持直接传递图片生成参数,并实现了自动宽高比计算与 4K/2K 质量映射。 - - **文档补全**: 新增 `docs/gemini-3-image-guide.md`,提供完整的 Gemini 图像生成集成指南。 - - **稳定性优化**: 优化了通用工具函数 (`common_utils.rs`) 和 Gemini/OpenAI 映射逻辑,确保大尺寸 Payload 传输稳定。 - - **[核心修复] 对齐 OpenAI 重试与限流逻辑 (PR #1204)**: - - **逻辑对齐**: 重构了 OpenAI 处理器的重试、限流及账号轮换逻辑,使其与 Claude 处理器保持一致,显著提升了高并发下的稳定性。 - - **热重载优化**: 确保 OpenAI 请求在触发 429 或 503 错误时能精准执行退避策略并自动切换可用账号。 - - **[核心修复] 修复 Web OAuth 账号持久化问题 (Web Persistence Fix)**: - - **索引修复**: 解决了在 Web 管理界面通过 OAuth 添加的账号虽然文件已生成,但未同步更新到全局账号索引 (`accounts.json`),导致重启后或桌面端无法识别的问题。 - - **锁机制统一**: 重构了 `TokenManager` 的保存逻辑,复用了 `modules::account` 的核心方法,确保了文件锁与索引更新的原子性。 - - **[核心修复] 解决 Google OAuth 非 Localhost 回调限制 (Fix Issue #1186)**: - - **问题背景**: Google 不支持在 OAuth 流程中使用非 localhost 私网 IP 作为回调地址,即便注入 `device_id` 也会报“不安全的应用版本”警告。 - - **解决方案**: 引入了标准化的“手动 OAuth 提交”流程。当浏览器无法自动回调至本地(如远程部署或非 Localhost 环境)时,用户可直接复制回调链接或授权码至应用内完成授权。 - - **体验增强**: 重构了手动提交界面,集成了全语言国际化支持(9 国语言)与 UI 优化,确保在任何网络环境下都能顺利添加账号。 - - **[核心修复] 解决 Google Cloud Code API 429 错误 (Fix Issue #1176)**: - - **智能降级**: 默认将 API 流量迁移至更稳定的 Daily/Sandbox 环境,避开生产环境 (`cloudcode-pa.googleapis.com`) 当前频繁的 429 错误。 - - **稳健性提升**: 实现了 Sandbox -> Daily -> Prod 的三级降级策略,确保主业务流程在极端网络环境下的高可用性。 - - **[核心优化] 账号调度算法升级 (Algorithm Upgrade)**: - - **健康评分系统 (Health Score)**: 引入了 0.0 到 1.0 的实时健康分机制。请求失败(如 429/5xx)将显著扣分,使受损账号自动降级;成功请求则逐步回升,实现账号状态的智能自愈。 - - **三级智能排序**: 调度优先级重构为 `订阅等级 > 剩余配额 > 健康分`。确保在同等级、同配额情况下,始终优先通过历史表现最稳定的账号。 - - **微延迟 (Throttle Delay)**: 针对极端限流场景,当所有账号均被封锁且有账号在 2 秒内即将恢复时,系统将自动执行毫秒级挂起等待而非直接报错。极大提升了高并发下的成功率,并增强了会话粘性。 - - **全量接口适配**: 重构了 `TokenManager` 核心接口,并完成了全量处理器(Claude, Gemini, OpenAI, Audio, Warmup)的同步适配,确保调度层变更对业务层透明。 - - **[核心修复] 固定账号模式持久化 (PR #1209)**: - - **问题背景**: 之前版本在重启服务后,固定账号模式(Fixed Account Mode)的开关状态会被重置。 - - **修复内容**: 实现了设置的持久化存储,确保用户偏好在重启后依然生效。 - - **[核心修复] 速率限制毫秒级解析 (PR #1210)**: - - **问题背景**: 部分上游服务返回的 `Retry-After` 或速率限制头部包含带小数点的毫秒值,导致解析失败。 - - **修复内容**: 增强了时间解析逻辑,支持兼容浮点数格式的时间字段,提高了对非标准上游的兼容性。 - * **v4.0.3 (2026-01-27)**: - - **[功能增强] 提高请求体限制以支持大体积图片 Payload (PR #1167)**: - - 将默认请求体大小限制从 2MB 提升至 **100MB**,解决多图并发传输时的 413 (Payload Too Large) 错误。 - - 新增环境变量 `ABV_MAX_BODY_SIZE`,支持用户根据需求动态调整最大限制。 - - 服务启动时自动输出当前生效的 Body Limit 日志,便于排查。 - - **[核心修复] 解决 Google OAuth 'state' 参数缺失导致的授权失败 (Issue #1168)**: - - 修复了添加 Google 账号时可能出现的 "Agent execution terminated" 错误。 - - 实现了随机 `state` 参数的生成与回调验证,增强了 OAuth 流程的安全性和兼容性。 - - 确保在桌面端和 Web 模式下的授权流程均符合 OAuth 2.0 标准。 - - **[核心修复] 解决 Docker/Web 模式下代理开关及账号变动需重启生效的问题 (Issue #1166)**: - - 实现了代理开关状态的持久化存储,确保容器重启后状态保持一致。 - - 在账号增删、切换、重排及导入后自动触发 Token 管理器热加载,使变更立即在反代服务中生效。 - - 优化了账号切换逻辑,自动清除旧会话绑定,确保请求立即路由到新账号。 - * **v4.0.2 (2026-01-26)**: - - **[核心修复] 解决开启“访问授权”导致的重复认证与 401 循环 (Fix Issue #1163)**: - - 修正了后端鉴权中间件逻辑,确保在鉴权关闭模式(Off/Auto)下管理接口不再强制拦截。 - - 增强了健康检查路径 (`/api/health`) 的免鉴权豁免,避免 UI 加载初期因状态检测失败触发登录。 - - 在前端请求层引入了 401 异常频率限制(防抖锁),彻底解决了大批量请求失败导致的 UI 弹窗抖动。 - - **[核心修复] 解决切换账号后会话无法持久化保存 (Fix Issue #1159)**: - - 增强了数据库注入逻辑,在切换账号时同步更新身份标识(Email)并清除旧的 UserID 缓存。 - - 解决了因 Token 与身份标识不匹配导致客户端无法正确关联或保存新会话的问题。 - - **[核心修复] Docker/Web 模式下模型映射持久化 (Fix Issue #1149)**: - - 修复了在 Docker 或 Web 部署模式下,管理员通过 API 修改的模型映射配置(Model Mapping)无法保存到硬盘的问题。 - - 确保 `admin_update_model_mapping` 接口正确调用持久化逻辑,配置在重启容器后依然生效。 - - **[架构优化] MCP 工具支持架构全面升级 (Schema Cleaning & Tool Adapters)**: - - **约束语义回填 (Constraint Hints)**: - - 实现了智能约束迁移机制,在删除 Gemini 不支持的约束字段(`minLength`, `pattern`, `format` 等)前,自动将其转化为描述提示。 - - 新增 `CONSTRAINT_FIELDS` 常量和 `move_constraints_to_description` 函数,确保模型能通过描述理解原始约束。 - - 示例: `{"minLength": 5}` → `{"description": "[Constraint: minLen: 5]"}` - - **anyOf/oneOf 智能扁平化增强**: - - 重写 `extract_best_schema_from_union` 函数,使用评分机制选择最佳类型(object > array > scalar)。 - - 在合并后自动添加 `"Accepts: type1 | type2"` 提示到描述中,保留所有可能类型的信息。 - - 新增 `get_schema_type_name` 函数,支持显式类型和结构推断。 - - **插件化工具适配器层 (Tool Adapter System)**: - - 创建 `ToolAdapter` trait,为不同 MCP 工具提供定制化 Schema 处理能力。 - - 实现 `PencilAdapter`,自动为 Pencil 绘图工具的视觉属性(`cornerRadius`, `strokeWidth`)和路径参数添加说明。 - - 建立全局适配器注册表,支持通过 `clean_json_schema_for_tool` 函数应用工具特定优化。 - - **高性能缓存层 (Schema Cache)**: - - 实现基于 SHA-256 哈希的 Schema 缓存机制,避免重复清洗相同的 Schema。 - - 采用 LRU 淘汰策略,最大缓存 1000 条,内存占用 < 10MB。 - - 提供 `clean_json_schema_cached` 函数和缓存统计功能,预计性能提升 60%+。 - - **影响范围**: - - ✅ 显著提升 MCP 工具(如 Pencil)的 Schema 兼容性和模型理解能力 - - ✅ 为未来添加更多 MCP 工具(filesystem, database 等)奠定了插件化基础 - - ✅ 完全向后兼容,所有 25 项测试通过 - - **[安全增强] Web UI 管理后台密码与 API Key 分离 (Fix Issue #1139)**: - - **独立密码配置**: 支持通过 `ABV_WEB_PASSWORD` 或 `WEB_PASSWORD` 环境变量设置独立的管理后台登录密码。 - - **智能鉴权逻辑**: - - 管理接口优先验证独立密码,未设置时自动回退验证 `API_KEY`(确保向后兼容)。 - - AI 代理接口严格仅允许使用 `API_KEY` 进行认证,实现权限隔离。 - - **配置 UI 支持**: 在“仪表盘-服务配置”中新增管理密码编辑项,支持一键找回或修改。 - - **日志引导**: Headless 模式启动时会清晰打印 API Key 与 Web UI Password 的状态及查看方式。 - * **v4.0.1 (2026-01-26)**: - - **[UX 优化] 主题与语言切换平滑度**: - - 解决了主题和语言切换时的 UI 卡顿问题,将配置持久化逻辑与状态更新解耦。 - - 优化了导航栏中的 View Transition API 使用,确保视觉更新不阻塞操作。 - - 将窗口背景同步调用改为异步,避免 React 渲染延迟。 - - **[核心修复] 反代服务启动死锁**: - - 修复了启动反代服务时会阻塞状态轮询请求的竞态/死锁问题。 - - 引入了原子启动标志和非阻塞状态检查,确保 UI 在服务初始化期间保持响应。 - * **v4.0.0 (2026-01-25)**: - - **[重大架构] 深度迁移至 Tauri v2 (Tauri v2 Migration)**: - - 全面适配 Tauri v2 核心 API,包括系统托盘、窗口管理与事件系统。 - - 解决了多个异步 Trait 动态派发与生命周期冲突问题,后端性能与稳定性显著提升。 - - **[部署革新] 原生 Headless Docker 模式 (Native Headless Docker)**: - - 实现了“纯后端”Docker 镜像,彻底移除了对 VNC、noVNC 或 XVFB 的依赖,大幅降低内存与 CPU 占用。 - - 支持直接托管前端静态资源,容器启动后即可通过浏览器远程管理。 - - **[部署修复] Arch Linux 安装脚本修复 (PR #1108)**: - - 修复了 `deploy/arch/PKGBUILD.template` 中硬编码 `data.tar.zst` 导致的提取失败问题。 - - 实现了基于通配符的动态压缩格式识别,确保兼容不同版本的 `.deb` 包。 - - **[管理升级] 全功能 Web 管理界面 (Web-based Console)**: - - 重写了管理后台,使所有核心功能(账号管理、API 反代监控、OAuth 授权、模型映射)均可在浏览器端完成。 - - 补全了 Web 模式下的 OAuth 回调处理,支持 `ABV_PUBLIC_URL` 自定义,完美适配远程 VPS 或 NAS 部署场景。 - - **[项目规范化] 结构清理与单元化 (Project Normalization)**: - - 清理了冗余的 `deploy` 目录及其旧版脚本,项目结构更加现代。 - - 规范化 Docker 镜像名称为 `antigravity-manager`,并整合专属的 `docker/` 目录与部署手册。 - - **[API 增强] 流量日志与监控优化**: - - 优化了流量日志的实时监控体验,补全了 Web 模式下的轮询机制与统计接口。 - - 精确化管理 API 路由占位符命名,提升了 API 的调用精确度。 - - **[用户体验] 监控页面布局与深色模式优化 (PR #1105)**: - - **布局重构**: 优化了流量日志页面的容器布局,采用固定最大宽度与响应式边距,解决了在大屏显示器下的内容过度拉伸问题,视觉体验更加舒适。 - - **深色模式一致性**: 将日志详情弹窗的配色方案从硬编码的 Slate 色系迁移至 Base 主题色系,确保与全局深色模式风格无缝统一,提升了视觉一致性。 - - **[用户体验] 自动更新体验优化**: - - **智能降级**: 修复了当原生更新包未就绪(如 Draft Release)时点击更新无反应的问题。现在系统会自动检测并提示用户,同时优雅降级至浏览器下载模式,确保持续可更新。 - - **[核心修复] 深度优化 Signature Cache 与 Rewind 检测 (PR #1094)**: - - **400 错误自愈**: 增强了思考块签名的清洗逻辑。系统现在能自动识别因服务器重启导致的“无主签名”,并在发送给上游前主动将其剥离,从根本上杜绝了由此引发了 `400 Invalid signature` 报错。 - - **Rewind (回退) 检测机制**: 升级缓存层,引入消息计数(Message Count)校验。当用户回退对话历史并重新发送时,系统会自动重置签名状态,确保对话流的合法性。 - - **全链路适配**: 优化了 Claude、Gemini 及 z.ai (Anthropic) 的数据链路,确保消息计数在流式与非流式请求中均能精准传播。 - - **[OpenAI 鲁棒性增强] 优化重试策略与模型级限流 (PR #1093)**: - - **鲁棒重试**: 强制最小 2 次请求尝试,确保单账号模式下也能有效应对瞬时网络抖动;移除了配额耗尽的硬中断,允许自动轮换账号。 - - **模型级限流**: 引入模型级限流隔离,避免单个模型限流锁定整个账号,确保账号下其他模型可用。 - - **接口修复**: 修复了 TokenManager 异步接口的 Email/ID 混用漏洞,确保限流记录准确。 - - **[系统鲁棒性] 统一重试与退避调度中心 (Unified Retry & Backoff Hub)**: - - **逻辑归一化**: 将散落在各协议处理器中的重试逻辑抽象至 `common.rs`,实现全局统一调度。 - - **强制退避延迟**: 彻底修复了原先逻辑中解析不到 `Retry-After` 就立即重试导致封号的问题。现在所有处理器在重试前必须通过共享模块执行物理等待,有效保护 IP 信誉。 - - **激进参数调整**: 针对 Google/Anthropic 频率限制,将 429 和 503 的初始退避时间显著上调至 **5s-10s**,大幅降低生产环境风控风险。 - - **[CLI 同步优化] 解决 Token 冲突与模型配置清理 (PR #1054)**: - - **自动冲突解决**: 在设置 `ANTHROPIC_API_KEY` 时自动移除冲突的 `ANTHROPIC_AUTH_TOKEN`,解决 Claude CLI 同步报错问题。 - - **环境变量清理**: 同步时自动移除 `ANTHROPIC_MODEL` 等可能干扰模型输出的环境变量,确保 CLI 使用标准模型。 - - **配置健壮性**: 优化了 API Key 为空时的处理方式,避免无效配置干扰。 - - **[核心优化] 用量缩放功能默认关闭与联动机制 (Usage Scaling Default Off)**: - - **默认关闭**: 基于用户反馈,将"启用用量缩放"功能从默认开启改为默认关闭,回归透明模式。 - - **联动机制**: 建立了缩放与自动压缩 (L1/L2/L3) 的联动关系。只有当用户主动开启缩放时,才同步激活自动压缩逻辑。 - - **解决痛点**: 修复了用户反馈的"缩放致盲"问题 - 默认模式下客户端能看到真实 Token 用量,在接近 200k 时触发原生 `/compact` 提示,避免死锁。 - - **功能定位**: 将缩放+压缩重新定义为"激进扩容模式",仅供处理超大型项目时手动开启,提升系统稳定性与可预测性。 - - **⚠️ 升级提醒**: 从旧版本升级的用户,建议在"设置 → 实验性功能"中手动关闭"启用用量缩放",以获得更稳定透明的体验。 - - **[协议优化] 全协议自动流式转换 (Auto-Stream Conversion)**: - - **全链路覆盖**: 对 OpenAI (Chat/Legacy/Codex) 和 Gemini 协议实现了强制内部流式化转换。即使客户端请求非流式 (`stream: false`),后端也会自动建立流式连接与上游通信,极大提升了连接稳定性和配额利用率。 - - **智能聚合**: 实现了高性能的流式聚合器,在兼容旧版客户端的同时,还能在后台实时捕获 Thinking 签名,有效解决了非流式请求下签名丢失导致后续工具调用失败的问题。 - - **[核心修复] 错误日志元数据补全 (Log Metadata Fix)**: - - **问题背景**: 之前版本在 429/503 等严重错误(如账号耗尽)发生时,日志记录中遗漏了 `mapped_model` 和 `account_email` 字段,导致无法定位出错的具体模型和账号。 - - **修复内容**: 在 OpenAI 和 Claude 协议的所有错误退出路径(包括 Token 获取失败、转换异常、重试耗尽)中强制注入了元数据 Header。现在即使请求失败,流量日志也能准确显示目标模型和上下文信息,极大提升了排查效率。 - - - * **v4.0.0 (2026-01-25)**: - - **[核心功能] 后台任务模型可配置 (Background Model Configuration)**: - - **功能增强**: 允许用户自定义“后台任务”(如标题生成、摘要压缩)使用的模型。不再强制绑定 `gemini-2.5-flash`。 - - **UI 更新**: 在“模型映射”页面新增了“后台任务模型”配置项,支持从下拉菜单中选择任意可用模型(如 `gemini-3-flash`)。 - - **路由修复**: 修复了后台任务可能绕过用户自定义映射的问题。现在 `internal-background-task` 会严格遵循用户的重定向规则。 - - **[重要通告] 上游模型容量预警 (Capacity Warning)**: - - **容量不足**: 接获大量反馈,上游 Google 的 `gemini-2.5-flash` 和 `gemini-2.5-flash-lite` 模型当前正处于极度容量受限状态 (Rate Limited / Capacity Exhausted)。 - - **建议操作**: 为保证服务可用性,建议用户暂时在“自定义映射”中将上述两个模型重定向至其他模型(如 `gemini-3-flash` 或 `gemini-3-pro-high`),直到上游恢复。 - - **[核心修复] Windows 启动参数支持 (PR #973)**: - - **问题修复**: 修复了 Windows 平台下启动参数(如内网穿透配置等)无法正确解析生效的问题。感谢 @Mag1cFall 的贡献。 - - **[核心修复] Claude 签名校验增强 (PR #1009)**: - - **功能优化**: 增强了 Claude 模型的签名校验逻辑,修复了在长对话或复杂工具调用场景下可能出现的 400 错误。 - - **兼容性提升**: 引入最小签名长度校验,并对合法长度的未知签名采取信任策略,大幅提升了 JSON 工具调用的稳定性。 - - **[国际化] 越南语翻译优化 (PR #1017)**: - - **翻译精简**: 对关于页面等区域的越南语翻译进行了精简与标点优化。 - - **[国际化] 土耳其语托盘翻译增强 (PR #1023)**: - - **功能优化**: 为系统托盘菜单增加了完整的土耳其语翻译支持,提升了土耳其语用户的操作体验。 - - **[功能增强] 多语言支持与 I18n 设置 (PR #1029)**: - - **新增语言支持**: 增加了葡萄牙语、日语、越南语、土耳其语、俄语等多国语言的更完整支持。 - - **I18n 设置面板**: 在设置页面新增了语言选择器,支持即时切换应用显示语言。 - - **[国际化] 韩语支持与界面优化 (New)**: - - **韩语集成**: 新增了完整的韩语 (`ko`) 翻译支持,现在可以在设置中选择韩语界面。 - - **UI 交互升级**: 重构了顶部导航栏的语言切换器,由原来的单次点击循环切换升级为更直观的下拉菜单,展示语言缩写与全称,提升了多语言环境下的操作体验。 - * **v3.3.49 (2026-01-22)**: - - **[核心修复] Thinking 后中断与 0 Token 防御 (Fix Thinking Interruption)**: - - **问题背景**: 针对 Gemini 等模型在输出 Thinking 内容后流意外中断,导致 Claude 客户端收到 0 Token 响应并报错死锁的问题。 - - **防御机制**: - - **状态追踪**: 实时监测流式响应中是否“只想未说”(已发送 Thinking 但未发送 Content)。 - - **自动兜底**: 当检测到此类中断时,系统会自动闭合 Thinking 块,注入系统提示信息,并模拟正常的 Usage 数据,确保客户端能优雅结束会话。 - - **[核心修复] 移除 Flash Lite 模型以修复 429 错误 (Fix 429 Errors)**: - - **问题背景**: 今日监测发现 `gemini-2.5-flash-lite` 频繁出现 429 错误,具体原因为 **上游 Google 容器容量耗尽 (MODEL_CAPACITY_EXHAUSTED)**,而非通常的账号配额不足。 - - **紧急修复**: 将所有系统内部默认的 `gemini-2.5-flash-lite` 调用(如后台标题生成、L3 摘要压缩)及预设映射全部替换为更稳定的 `gemini-2.5-flash`。 - - **用户提醒**: 如果您在“自定义映射”或“预设”中手动使用了 `gemini-2.5-flash-lite`,请务必修改为其他模型,否则可能会持续遇到 429 错误。 - - **[性能优化] 设置项即时生效 (Fix PR #949)**: - - **即时生效**: 修复了语言切换需要手动点击保存的问题。现在修改语言设置会立即应用到整个 UI。 - - **[代码清理] 后端架构重构与优化 (PR #950)**: - - **架构精简**: 深度重构了代理层的 Mapper 和 Handler 逻辑,移除了冗余模块(如 `openai/collector.rs`),显著提升了代码的可维护性。 - - **稳定性增强**: 优化了 OpenAI 与 Claude 协议的转换链路,统一了图片配置解析逻辑,并加固了上下文管理器的健壮性。 - - **[核心修复] 设置项同步策略更新**: - - **状态同步**: 修正了主题切换的即时应用逻辑,并解决了 `App.tsx` 与 `Settings.tsx` 之间的状态冲突,确保配置加载过程中的 UI 一致性。 - - **[核心优化] 上下文压缩与 Token 节省**: - - **由于 Claude CLI 在恢复历史记录时会发送大量上下文,现已将压缩阈值改为可配置并降低默认值。** - - **L3 摘要重置阈值由 90% 降至 70%,在 token 堆积过多前提前进行压缩节省额度。** - - **前端 UI 增强:在实验性设置中新增 L1/L2/L3 压缩阈值滑块,支持动态自定义。** - - **[功能增强] API 监控看板功能升级 (PR #951)**: - - **账号筛选**: 新增按账号筛选流量日志的功能,支持在大流量环境下精准追踪特定账号的调用情况。 - - **详情深度增强**: 监控详情页现在可以完整显示请求协议(OpenAI/Anthropic/Gemini)、使用账号、映射后的物理模型等关键元数据。 - - **UI 与国际化**: 优化了监控详情的布局,并补全了 8 种语言的相关翻译。 - - **[JSON Schema 优化] 递归收集 $defs 并完善回退处理 (PR #953)**: - - **递归收集**: 添加了 `collect_all_defs()` 以递归方式从所有模式层级收集 `$defs`/`definitions`,解决了嵌套定义丢失的问题。 - - **引用平坦化**: 始终运行 `flatten_refs()` 以捕获并处理孤立的 `$ref` 字段。 - - **回退机制**: 为未解析的 `$ref` 添加了回退逻辑,将其转换为带有描述性提示的字符串类型。 - - **稳定性增强**: 新增了针对嵌套定义和未解析引用的测试用例,确保 Schema 处理的健壮性。 - - **[核心修复] 账号索引保护 (Fix Issue #929)**: - - **安全加固**: 移除了加载失败时的自动删除逻辑,防止在升级或环境异常时意外丢失账号索引,确保用户数据安全。 - - **[核心优化] 路由器与模型映射深度优化 (PR #954)**: - - **路由器确定性优先级**: 修复了路由器在处理多通配符模式时的不确定性问题,实现了基于模式长度和复杂度的确定性匹配优先级。 - - - **[稳定性增强] OAuth 回调与解析优化 (Fix #931, #850, #778)**: - - **鲁棒解析**: 优化了本地回调服务器的 URL 解析逻辑,不再依赖单一分割符,提升了不同浏览器下的兼容性。 - - **调试增强**: 增加了原始请求 (Raw Request) 记录功能,当授权失败时可直接在日志中查看原始数据,方便定位网络拦截问题。 - - **[网络优化] OAuth 通信质量提升 (Issue #948, #887)**: - - **延时保障**: 将授权请求超时时间延长至 60 秒,大幅提升了在代理环境下的 Token 交换成功率。 - - **错误指引**: 针对 Google API 连接超时或重置的情况,新增了明确的中文代理设置建议,降低排查门槛。 - - **[体验优化] 上游代理配置校验与提示增强 (Contributed by @zhiqianzheng)**: - - **配置校验**: 当用户启用上游代理但未填写代理地址时,保存操作将被阻止并显示明确的错误提示,避免无效配置导致的连接失败。 - - **重启提醒**: 成功保存代理配置后,系统会提示用户需要重启应用才能使配置生效,降低用户排查成本。 - - **多语言支持**: 新增简体中文、繁体中文、英文、日语的相关翻译。 - - * **v3.3.48 (2026-01-21)**: - - **[核心修复] Windows 控制台闪烁问题 (Fix PR #933)**: - - **问题背景**: Windows 平台在启动或执行后台命令时,偶尔会弹出短暂的 CMD 窗口,影响用户体验。 - - **修复内容**: 在 `cloudflared` 进程创建逻辑中添加 `CREATE_NO_WINDOW` 标志,确保所有后台进程静默运行。 - - **影响范围**: 解决了 Windows 用户在启动应用或 CLI 交互时的窗口闪烁问题。 - * **v3.3.47 (2026-01-21)**: - - **[核心修复] 图片生成 API 参数映射增强 (Fix Issue #911)**: - - **功能**: 支持从 OpenAI 参数 (`size`, `quality`) 解析配置,支持动态宽高比计算,`quality: hd` 自动映射为 4K 分辨率。 - - **影响**: 显著提升 Images API 兼容性,OpenAI 与 Claude 协议均受支持。 - - **[功能增强] Cloudflared 内网穿透支持 (PR #923)**: - - **核心功能**: 集成 `cloudflared` 隧道支持,允许用户在无公网 IP 或处于复杂内网环境下,通过 Cloudflare 隧道一键发布 API 服务。 - - **易用性优化**: 前端新增 Cloudflared 配置界面,支持状态监控、日志查看及一键开关隧道。 - - **国际化补全**: 补全了繁体中文、英文、日文、韩文、越南语、土耳其语、俄语等 8 国语言的 Cloudflared 相关翻译。 - - **[核心修复] 解决 Git 合并冲突导致的启动失败**: - - **修复内容**: 解决了 `src-tauri/src/proxy/handlers/claude.rs` 中因多进程并行合并产生的 `<<<<<<< HEAD` 冲突标记。 - - **影响范围**: 恢复了后端服务的编译能力,修复了应用启动即崩溃的问题。 - - **[核心优化] 三层渐进式上下文压缩 (3-Layer Progressive Context PCC)**: - - **背景**: 长对话场景下频繁触发 "Prompt is too long" 错误,手动 `/compact` 操作繁琐,且现有压缩策略会破坏 LLM 的 KV Cache,导致成本飙升 - - **解决方案 - 多层渐进式压缩策略**: - - **Layer 1 (60% 压力)**: 工具消息智能裁剪 - - 删除旧的工具调用/结果消息,保留最近 5 轮交互 - - **完全不破坏 KV Cache**(只删除消息,不修改内容) - - 压缩率:60-90% - - **Layer 2 (75% 压力)**: Thinking 内容压缩 + 签名保留 - - 压缩 `assistant` 消息中的 Thinking 块文本内容(替换为 "...") - - **完整保留 `signature` 字段**,解决 Issue #902(签名丢失导致 400 错误) - - 保护最近 4 条消息不被压缩 - - 压缩率:70-95% - - **Layer 3 (90% 压力)**: Fork 会话 + XML 摘要 - - 使用 `gemini-2.5-flash-lite` 生成 8 节 XML 结构化摘要(成本极低) - - 提取并保留最后一个有效 Thinking 签名 - - 创建新的消息序列:`[User: XML摘要] + [Assistant: 确认] + [用户最新消息]` - - **完全不破坏 Prompt Cache**(前缀稳定,只追加) - - 压缩率:86-97% - - **技术实现**: - - **新增模块**: `context_manager.rs` 中实现 Token 估算、工具裁剪、Thinking 压缩、签名提取等核心功能 - - **辅助函数**: `call_gemini_sync()` - 可复用的同步上游调用函数 - - **XML 摘要模板**: 8 节结构化摘要(目标、技术栈、文件状态、代码变更、调试历史、计划、偏好、签名) - - **渐进式触发**: 按压力等级自动触发,每次压缩后重新估算 Token 用量 - - **成本优化**: - - Layer 1: 完全无成本(不破坏缓存) - - Layer 2: 低成本(仅破坏部分缓存) - - Layer 3: 极低成本(摘要生成使用 flash-lite,新会话完全缓存友好) - - **综合节省**: 86-97% Token 成本,同时保持签名链完整性 - - **用户体验**: - - 自动化:无需手动 `/compact`,系统自动处理 - - 透明化:详细日志记录每层压缩的触发和效果 - - 容错性:Layer 3 失败时返回友好错误提示 - - **影响范围**: 解决长对话场景下的上下文管理问题,显著降低 API 成本,确保工具调用链完整性 - - **[核心优化] 上下文估算与缩放算法增强 (PR #925)**: - - **背景**: 在 Claude Code 等长对话场景下,固定的 Token 估算算法(3.5 字符/token)在中英文混排时误差极大,导致三层压缩逻辑无法及时触发,最终仍会报 "Prompt is too long" 错误 - - **解决方案 - 动态校准 + 多语言感知**: - - **多语言感知估算**: - - **ASCII/英文**: 约为 4 字符/Token(针对代码和英文文档优化) - - **Unicode/CJK (中日韩)**: 约为 1.5 字符/Token(针对 Gemini/Claude 分词特点) - - **安全余量**: 在计算结果基础上额外增加 15% 的安全冗余 - - **动态校准器 (`estimation_calibrator.rs`)**: - - **自学习机制**: 记录每次请求的"估算 Token 数"与 Google API 返回的"实际 Token 数" - - **校准因子**: 使用指数移动平均 (EMA, 60% 旧比例 + 40% 新比例) 维护校准系数 - - **保守初始化**: 初始校准系数为 2.0,确保系统运行初期极其保守地触发压缩 - - **自动收敛**: 根据实际数据自动修正,使估算值越来越接近真实值 - - **整合三层压缩框架**: - - 在所有估算环节(初始估算、Layer 1/2/3 后重新估算)使用校准后的 Token 数 - - 每层压缩后记录详细的校准因子日志,便于调试和监控 - - **技术实现**: - - **新增模块**: `estimation_calibrator.rs` - 全局单例校准器,线程安全 - - **修改文件**: `claude.rs`, `streaming.rs`, `context_manager.rs` - - **校准数据流**: 流式响应收集器 → 提取真实 Token 数 → 更新校准器 → 下次请求使用新系数 - - **用户体验**: - - **透明化**: 日志中显示原始估算值、校准后估算值、校准因子,便于理解系统行为 - - **自适应**: 系统会根据用户的实际使用模式(中英文比例、代码量等)自动调整 - - **精准触发**: 压缩逻辑基于更准确的估算值,大幅降低"漏判"和"误判"概率 - - **影响范围**: 显著提升上下文管理的精准度,解决 Issue #902 和 #867 中反馈的自动压缩失效问题,确保长对话稳定性 - - **[关键修复] Thinking 签名恢复逻辑优化**: - - **背景**: 在重试场景下,签名检查逻辑未检查 Session Cache,导致错误禁用 Thinking 模式,产生 0 token 请求和响应失败 - - **问题表现**: - - 重试时显示 "No valid signature found for function calls. Disabling thinking" - - 流量日志显示 `I: 0, O: 0` (实际请求成功但 Token 未记录) - - 客户端可能无法接收到响应内容 - - **修复内容**: - - **扩展签名检查范围**: `has_valid_signature_for_function_calls()` 现在检查 Session Cache - - **检查优先级**: Global Store → **Session Cache (新增)** → Message History - - **详细日志**: 添加签名来源追踪日志,便于调试 - - **技术实现**: - - 修改 `request.rs` 中的签名验证逻辑 - - 新增 `session_id` 参数传递到签名检查函数 - - 添加 `[Signature-Check]` 系列日志用于追踪签名恢复过程 - - **影响**: 解决重试场景下的 Thinking 模式降级问题,确保 Token 统计准确性,提升长会话稳定性 - - **[核心修复] 通用参数对齐引擎 (Universal Parameter Alignment Engine)**: - - **背景**: 解决 Gemini API 在调用工具(Tool Use)时因参数类型不匹配产生的 `400 Bad Request` 错误。 - - **修复内容**: - - **实现参数对齐引擎**: 在 `json_schema.rs` 中实现 `fix_tool_call_args`,基于 JSON Schema 自动将字符串类型的数字/布尔值转换为目标类型,并处理非法字段。 - - **多协议重构**: 同步重构了 OpenAI 和 Claude 协议层,移除了硬编码的工具参数修正逻辑,改用统一的对齐引擎。 - - **解决问题**: 修复了 `local_shell_call`、`apply_patch` 等工具在多级反代或特定客户端下参数被错误格式化为字符串导致的异常。 - - **影响**: 显著提升了工具调用的稳定性,减少了上游 API 的 400 错误。 - - **[功能增强] 画图模型配额保护支持 (Fix Issue #912)**: - - **问题背景**: 用户反馈画图模型(G3 Image)没有配额保护功能,导致配额耗尽的账号仍被用于画图请求 - - **修复内容**: - - **后端配置**: 在 `config.rs` 的 `default_monitored_models()` 中添加 `gemini-3-pro-image`,与智能预热和配额关注列表保持一致 - - **前端 UI**: 在 `QuotaProtection.tsx` 中添加画图模型选项,调整布局为一行4个模型(与智能预热保持一致) - - **影响范围**: - - ✅ 向后兼容:已有配置不受影响,新用户或重置配置后会自动包含画图模型 - - ✅ 完整保护:现在所有4个核心模型(Gemini 3 Flash、Gemini 3 Pro High、Claude 4.5 Sonnet、Gemini 3 Pro Image)都受配额保护监控 - - ✅ 自动触发:当画图模型配额低于阈值时,账号会自动加入保护列表,避免继续消耗 - - **[传输层优化] 流式响应防缓冲优化 (Streaming Response Anti-Buffering)**: - - **背景**: 在 Nginx 等反向代理后部署时,流式响应可能被代理缓冲,导致客户端延迟增加 - - **修复内容**: - - **添加 X-Accel-Buffering Header**: 在所有流式响应中注入 `X-Accel-Buffering: no` 头部 - - **多协议覆盖**: Claude (`/v1/messages`)、OpenAI (`/v1/chat/completions`) 和 Gemini 原生协议全部支持 - - **技术细节**: - - 修改文件: `claude.rs:L877`, `openai.rs:L314`, `gemini.rs:L240` - - 该 Header 告诉 Nginx 等反向代理不要缓冲流式响应,直接透传给客户端 - - **影响**: 显著降低反向代理场景下的流式响应延迟,提升用户体验 - - **[错误恢复增强] 多协议签名错误自愈提示词 (Multi-Protocol Signature Error Recovery)**: - - **背景**: 当 Thinking 模式下出现签名错误时,仅剔除签名可能导致模型生成空响应或简单的 "OK" - - **修复内容**: - - **Claude 协议增强**: 在现有签名错误重试逻辑中追加修复提示词,引导模型重新生成完整响应 - - **OpenAI 协议实现**: 新增 400 签名错误检测和修复提示词注入逻辑 - - **Gemini 协议实现**: 新增 400 签名错误检测和修复提示词注入逻辑 - - **修复提示词**: - ``` - [System Recovery] Your previous output contained an invalid signature. - Please regenerate the response without the corrupted signature block. - ``` - - **技术细节**: - - Claude: `claude.rs:L1012-1030` - 增强现有逻辑,支持 String 和 Array 消息格式 - - OpenAI: `openai.rs:L391-427` - 完整实现,使用 `OpenAIContentBlock::Text` 类型 - - Gemini: `gemini.rs:L17, L299-329` - 修改函数签名支持可变 body,注入修复提示词 - - **影响**: - - ✅ 提升错误恢复成功率:模型收到明确指令,避免生成无意义响应 - - ✅ 多协议一致性:所有 3 个协议具有相同的错误恢复能力 - - ✅ 用户体验改善:减少因签名错误导致的对话中断 - * **v3.3.46 (2026-01-20)**: - - **[功能增强] Token 使用统计 (Token Stats) 深度优化与国际化标准化 (PR #892)**: - - **UI/UX 统一**: 实现了自定义 Tooltip 组件,统一了面积图、柱状图和饼图的悬浮提示样式,增强了深色模式下的对比度与可读性。 - - **视觉细节磨砂**: 优化了图表光标和网格线,移除冗余的 hover 高亮,使图表界面更加清爽专业。 - - **自适应布局**: 改进了图表容器的 Flex 布局,确保在不同窗口尺寸下均能填充满垂直空间,消除了图表下方的留白。 - - **分账号趋势统计**: 新增了“按账号查看”模式,支持通过饼图和趋势图直观分析各账号的 Token 消耗占比与活跃度。 - - **国际化 (i18n) 标准化**: 解决了 `ja.json`、`zh-TW.json`、`vi.json`、`ru.json`、`tr.json` 等多国语言文件中的键值重复警告。补全了 `account_trend`、`by_model` 等缺失翻译,确保 8 种语言下的 UI 展现高度一致。 - - **[核心修复] 移除 [DONE] 停止序列以防止输出截断 (PR #889)**: - - **问题背景**: `[DONE]` 是 SSE (Server-Sent Events) 协议的标准结束标记,在代码和文档中经常出现。将其作为 `stopSequence` 会导致模型在解释 SSE 相关内容时输出被意外截断。 - - **修复内容**: 从 Gemini 请求的 `stopSequences` 数组中移除了 `"[DONE]"` 标记。 - - **技术说明**: - - Gemini 流的真正结束由 `finishReason` 字段控制,无需依赖 `stopSequence` - - SSE 层面的 `"data: [DONE]"` 已在 `mod.rs` 中单独处理 - - **影响范围**: 解决了模型在生成包含 SSE 协议说明、代码示例等内容时被提前终止的问题 (Issue #888)。 - - **[部署优化] Docker 镜像构建双模适配 (Default/China Mode)**: - - **双模架构**: 引入 `ARG USE_CHINA_MIRROR` 构建参数。默认模式保持原汁原味的 Debian 官方源(适合海外/云构建);开启后自动切换为清华大学 (TUNA) 镜像源(适合国内环境)。 - - **灵活性大幅提升**: 解决了硬编码国内源导致海外构建缓慢的问题,同时保留了国内用户的加速体验。 - - **[稳定性修复] VNC 与容器启动逻辑加固 (PR #881)**: - - **僵尸进程清理**: 优化了 `start.sh` 中的 cleanup 逻辑,改用 `pkill` 精准查杀 Xtigervnc 和 websockify 进程,并清理 `/tmp/.X11-unix` 锁文件,解决了重启后 VNC 无法连接的各种边缘情况。 - - **健康检查升级**: 将 Healthcheck 检查项扩展到 websockify 和主程序,确保容器状态更真实地反映服务可用性。 - - **重大修复**: 修复了 OpenAI 协议请求返回 404 的问题,并解决了 Codex (`/v1/responses`) 接收复杂对象数组 `input` 或 `apply_patch` 等自定义工具(缺失 Schema)时导致上游返回 400 (`INVALID_ARGUMENT`) 的兼容性缺陷。 - - **思维模型优化**: 解决了 Claude 3.7 Thinking 模型在历史消息缺失思维链时强制报错的问题,实现了智能协议降级与占位块注入。 - - **协议补全**: 补全了 OpenAI Legacy 接口的 Token 统计响应与 Header 注入,支持 `input_text` 类型内容块,并将 `developer` 角色适配为系统指令。 - - **requestId 统一**: 统一所有 OpenAI 路径下的 `requestId` 前缀为 `agent-`,解决部分客户端的 ID 识别问题。 - - **[核心修复] JSON Schema 数组递归清理修复 (解决 Gemini API 400 错误)**: - - **问题背景**: Gemini API 不支持 `propertyNames`、`const` 等 JSON Schema 字段。虽然已有白名单过滤逻辑,但由于 `clean_json_schema_recursive` 函数缺少对 `Value::Array` 类型的递归处理,导致嵌套在 `anyOf`、`oneOf` 或 `items` 数组内部的非法字段无法被清除,触发 `Invalid JSON payload received. Unknown name "propertyNames"/"const"` 错误。 - - **修复内容**: - - **增加 anyOf/oneOf 合并前的递归清洗**: 在合并 `anyOf`/`oneOf` 分支之前,先递归清洗每个分支内部的内容,确保合并的分支已被清理,防止非法字段在合并过程中逃逸。 - - **增加通用数组递归处理分支**: 为 `match` 语句增加 `Value::Array` 分支,确保所有数组类型的值(包括 `items`、`enum` 等)都会被递归清理,覆盖所有可能包含 Schema 定义的数组字段。 - - **测试验证**: 新增 3 个测试用例验证修复效果,所有 14 个测试全部通过,无回归。 - - **影响范围**: 解决了复杂工具定义(如 MCP 工具)中嵌套数组结构导致的 400 错误,确保 Gemini API 调用 100% 兼容。 - * **v3.3.45 (2026-01-19)**: - - **[核心功能] 解决 Claude/Gemini SSE 中断与 0-token 响应问题 (Issue #859)**: - - **增强型预读 (Peek) 逻辑**: 在向客户端发送 200 OK 响应前,代理现在会循环预读并跳过所有心跳包(SSE ping)及空数据块,确认收到有效业务内容后再建立连接。 - - **智能重试触发**: 若在预读阶段检测到空响应、超时(60s)或流异常中断,系统将自动触发账号轮换和重试机制,解决了长延迟模型下的静默失败。 - - **协议一致性增强**: 为 Gemini 协议补齐了缺失的预读逻辑;同时将 Claude 心跳间隔优化为 30s,减少了生成长文本时的连接干扰。 - - **[核心功能] 固定账号模式集成 (PR #842)**: - - **后端增强**: 在代理核心中引入了 `preferred_account_id` 支持,允许通过 API 或 UI 强制锁定特定账号进行请求调度。 - - **UI 交互更新**: 在 API 反代页面新增“固定账号”切换与账号选择器,支持实时锁定当前会话的出口账号。 - - **调度优化**: 在“固定账号模式”下优先级高于传统轮询,确保特定业务场景下的会话连续性。 - - **[国际化] 全语言翻译补全与清理**: - - **8 语言覆盖**: 补全了中、英、繁中、日、土、越、葡、俄等 8 种语言中关于“固定账号模式”的所有 i18n 翻译项。 - - **冗余清理**: 修复了 `ja.json` 和 `vi.json` 中由于历史 PR 累积导致的重复键(Duplicate Keys)警告,提升了翻译规范性。 - - **标点同步**: 统一清除了各语言翻译中误用的全角标点,确保 UI 展示的一致性。 - - **[核心功能] 客户端热更新与 Token 统计系统 (PR #846 by @lengjingxu)**: - - **热更新 (Native Updater)**: 集成 Tauri v2 原生更新插件,支持自动检测、下载、安装及重启,实现客户端无感升级。 - - **Token 消费可视化**: 新增基于 SQLite 实现的 Token 统计持久化模块,支持按小时/日/周维度查看总消耗及各账号占比。 - - **UI/UX 增强**: 优化了图表悬浮提示 (Tooltip) 在深色模式下的对比度,隐藏了冗余的 hover 高亮;补全了 8 语言完整翻译并修复了硬编码图例。 - - **集成修复**: 在本地合并期间修复了 PR 原始代码中缺失插件配置导致的启动崩溃故障。 - - **[系统加速] 启用清华大学 (TUNA) 镜像源**: 优化了 Dockerfile 构建流程,大幅提升国内环境下的插件安装速度。 - - **[部署优化] 官方 Docker 与 noVNC 支持 (PR #851)**: - - **全功能容器化**: 为 headless 环境提供完整的 Docker 部署方案,内置 Openbox 桌面环境。 - - **Web VNC 集成**: 集成 noVNC,支持通过浏览器直接访问图形界面进行 OAuth 授权(内置 Firefox ESR)。 - - **自愈启动流**: 优化了 `start.sh` 启动逻辑,支持自动清理 X11 锁文件及服务崩溃自动退出,提升生产环境稳定性。 - - **多语言适配**: 内置 CJK 字体,确保 Docker 环境下中文字符正常显示。 - - **资源限制优化**: 统一设置 `shm_size: 2gb`,解决容器内浏览器及图形界面崩溃问题。 - - **[核心功能] 修复账号切换时的设备指纹同步问题**: - - **路径探测改进**: 优化了 `storage.json` 的探测时机,确保在进程关闭前准确获取路径,兼容自定义数据目录。 - - **自动隔离生成**: 针对未绑定指纹的账号,在切换时会自动生成并绑定唯一的设备标识,实现账号间的指纹隔离。 - - **[UI 修复] 修复账号管理页条数显示不准确问题 (Issue #754)**: - - **逻辑修正**: 强制分页条数默认最低为 10 条,解决了小窗口下自动变为 5 条或 9 条的不直觉体验。 - - **持久化增强**: 实现了分页大小的 `localStorage` 持久化,用户手动选择的条数将永久锁定并覆盖自动模式。 - - **UI 一致性**: 确保右下角分页选项与列表实际展示条数始终保持一致。 - * **v3.3.44 (2026-01-19)**: - - **[核心稳定性] 动态思维剥离 (Dynamic Thinking Stripping) - 解决 Prompt 过长与签名错误**: - - **问题背景**: 在 Deep Thinking 模式下,长对话会导致两类致命错误: - - `Prompt is too long`: 历史 Thinking Block 累积导致 Token 超限 - - `Invalid signature`: 代理重启后内存签名缓存丢失,旧签名被 Google 拒收 - - **解决方案 - Context Purification (上下文净化)**: - - **新增 `ContextManager` 模块**: 实现 Token 估算与历史清洗逻辑 - - **分级清洗策略**: - - `Soft` (60%+ 压力): 保留最近 2 轮 Thinking,剥离更早历史 - - `Aggressive` (90%+ 压力): 移除所有历史 Thinking Block - - **差异化限额**: Flash 模型 (1M) 与 Pro 模型 (2M) 采用不同触发阈值 - - **签名同步清除**: 清洗 Thinking 时自动移除 `thought_signature`,避免签名校验失败 - - **透明度增强**: 响应头新增 `X-Context-Purified: true` 标识,便于调试 - - **性能优化**: 基于字符数的轻量级 Token 估算,对请求延迟影响 \u003c 5ms - - **影响范围**: 解决 Deep Thinking 模式下的两大顽疾,释放 40%-60% Context 空间,确保长对话稳定性 - * **v3.3.43 (2026-01-18)**: - - **[国际化] 设备指纹对话框全量本地化 (PR #825, 感谢 @IamAshrafee)**: - - 解决了设备指纹(Device Fingerprint)对话框中残留的硬编码中文字符串问题。 - - 补全了英、繁、日等 8 种语言的翻译骨架,提升全球化体验。 - - **[日语优化] 日语翻译补全与术语修正 (PR #822, 感谢 @Koshikai)**: - - 补全了 50 多个缺失的翻译键,覆盖配额保护、HTTP API、更新检查等核心设置。 - - 优化了技术术语,使日语表达更自然(例如:`pro_low` 译为“低消費”)。 - - **[翻译修复] 越南语拼写错误修正 (PR #798, 感谢 @vietnhatthai)**: - - 修复了越南语设置中 `refresh_msg` 的拼写错误(`hiện đài` -> `hiện tại`)。 - - **[兼容性增强] 新增 Google API Key 原生支持 (PR #831)**: - - **支持 `x-goog-api-key` 请求头**: - - 认证中间件现在支持识别 `x-goog-api-key` 头部。 - - 提高了与 Google 官方 SDK 及第三方 Google 风格客户端的兼容性,无需再手动修改 Header 为 `x-api-key`。 - * **v3.3.42 (2026-01-18)**: - - **[流量日志增强] 协议自动识别与流式响应整合 (PR #814)**: - - **协议标签分类**: 流量日志列表现在可以根据 URI 自动识别并标注协议类型(OpenAI 绿色、Anthropic 橙色、Gemini 蓝色),使请求来源一目了然。 - - **流式数据全整合**: 解决了流式响应在日志中仅显示 `[Stream Data]` 的问题。现在会自动拦截并聚合流式数据包,将分散的 `delta` 片段还原为完整的回复内容和“思考”过程,大幅提升调试效率。 - - **多语言适配**: 补全了流量日志相关功能在 8 种语言环境下的 i18n 翻译。 - - **[重大修复] Gemini JSON Schema 清洗策略深度重构 (Issue #815)**: - - **解决属性丢失问题**: 实现了“最佳分支合并”逻辑。在处理工具定义的 `anyOf`/`oneOf` 结构时,会自动识别并提取内容最丰富的分支属性向上合并,解决了模型报错 `malformed function call` 的顽疾。 - - **稳健的白名单机制**: 采用针对 Gemini API 的严格白名单过滤策略,剔除不支持的校验字段,确保 API 调用 100% 兼容(从根本上杜绝 400 错误)。 - - **约束信息迁移 (Description Hints)**: 在移除 `minLength`, `pattern`, `format` 等字段前,自动将其转为文字描述追加到 `description` 中,确保模型依然能感知参数约束。 - - **Schema 上下文检测锁**: 新增安全检查逻辑,确保清洗器仅在处理真正的 Schema 时执行。通过“精准锁”保护了 `request.rs` 中的工具调用结构,确保历史修复逻辑(如布尔值转换、Shell 数组转换)在重构后依然稳如磐石。 - * **v3.3.41 (2026-01-18)**: - - **Claude 协议核心兼容性修复 (Issue #813)**: - - **连续 User 消息合并**: 实现了 `merge_consecutive_messages` 逻辑,在请求进入 Proxy 时自动合并具有相同角色的连续消息流。解决了因 Spec/Plan 模式切换导致的角色交替违规产生的 400 Bad Request 错误。 - - **EnterPlanMode 协议对齐**: 针对 Claude Code 的 `EnterPlanMode` 工具调用,强制清空冗余参数,确保完全符合官方协议,解决了激活 Plan Mode 时的指令集校验失败问题。 - - **代理鲁棒性增强**: - - 增强了工具调用链的自愈能力。当模型因幻觉产生错误路径尝试时,Proxy 现能提供标准的错误反馈引导模型转向正确路径。 - * **v3.3.40 (2026-01-18)**: - - **API 400 错误深度修复 (Grep/Thinking 稳定性改进)**: - - **修复流式块顺序违规**: 解决了 "Found 'text' instead of 'thinking'" 400 错误。修正了 `streaming.rs` 中在文字块后非法追加思维块的逻辑,改由缓存机制实现静默同步。 - - **思维签名自愈增强**: 在 `claude.rs` 中扩展了 400 错误捕获关键词,覆盖了签名失效、顺序违规和协议不匹配场景。一旦触发,代理会自动执行消息降级并快速重试,实现用户无感知的异常自愈。 - - **搜索工具参数深度对齐**: 修正了 `Grep` 和 `Glob` 工具的参数映射逻辑,将 `query` 准确映射为 `path` (Claude Code Schema),并支持默认注入执行路径 `.`。 - - **工具名重映射策略优化**: 改进了重命名逻辑,仅针对 `search` 等模型幻觉进行修正,避免破坏原始工具调用签名。 - - **签名缺失自动补完**: 针对 LS、Bash、TodoWrite 等工具调用缺失 `thought_signature` 的情况,自动注入通用校验占位符,确保协议链路畅通。 - - **架构健壮性优化**: - - 增强了全局递归清理函数 `clean_cache_control_from_messages`,确保 `cache_control` 不会干扰 Vertex AI/Anthropic 严格模式。 - - 完善了错误日志系统,建立了详细的场景对照表并记录于 [docs/client_test_examples.md](docs/client_test_examples.md)。 - * **v3.3.39 (2026-01-17)**: - - **代理深度优化 (Gemini 稳定性增强)**: - - **Schema 净化器升级**:支持 `allOf` 合并、智能联合类型选择、Nullable 自动过滤及空对象参数补全,解决复杂工具定义导致的 400 错误。 - - **搜索工具自愈**:实现 `Search` 到 `grep` 的自动重映射,并引入 **Glob-to-Include 迁移**(自动将 `**/*.rs` 等 Glob 模式移至包含参数),解决 Claude Code `Error searching files` 报错。 - - **参数别名补全**:统一 `search_code_definitions` 等相关工具的参数映射逻辑,并强制执行布尔值类型转换。 - - **Shell 调用加固**:强制 `local_shell_call` 的 `command` 参数返回数组,增强与 Google API 的兼容性。 - - **动态 Token 约束**:自动根据 `thinking_budget` 调整 `maxOutputTokens`,确保满足 API 强约束;精简停止序列 (Stop Sequences) 以提升流式输出质量。 - - **Thinking 模式稳定性大幅提升**: - - 引入跨模型家族签名校验,自动识别并降级不兼容的思维链签名,防止 400 Bad Request 错误。 - - 增强“会话自愈 (Session Healing)”逻辑,支持自动补全被中断的工具循环,确保满足 Google/Vertex AI 的严苛结构要求。 - - **高可用性增强**: - - 优化自动端点降级 (Endpoint Fallback) 逻辑,在 429 或 5xx 错误时更平滑地切换至备用 API 端点。 - - **修复 macOS "Too many open files" 错误 (Issue #784)**: - - 引入全局共享 HTTP 客户端连接池,大幅减少 Socket 句柄占用。 - - 针对 macOS 系统自动提升文件描述符限制 (RLIMIT_NOFILE) 至 4096,增强高并发稳定性。 - * **v3.3.38 (2026-01-17)**: - - **CLI 同步增强与探测修复 (Fix CLI-Sync Detection)**: - - **探测路径扩展**: 优化了二进制检测逻辑。新增对 `~/.local/bin` (curl 安装常用路径)、`~/.npm-global/bin` 以及 `~/bin` 的扫描。 - - **nvm 多版本支持**: 引入对 `nvm` 目录的深度扫描,支持自动识别不同 Node.js 版本下安装的 CLI 工具,解决 M1 芯片用户手动安装检测不到的问题。 - - **原子化文件操作**: 采用临时文件写入 + 原子替换机制,确保同步过程中断不会损坏原始配置文件。 - - **Thinking Signature 深度修复与会话自愈 (Fix Issue #752)**: - - **鲁棒重试逻辑**: 修正了重试计次逻辑,确保单账号用户在遇到签名错误时也能触发内部重试,提高了自动修复的触发率。 - - **主动签名剥离**: 引入 `is_retry`状态,在重试请求中强制剥离所有历史签名。配合严苛的模型家族校验(Gemini 1.5/2.0 不再混用签名),杜绝了无效签名导致的 400 错误。 - - **会话自愈 (Session Healing)**: 针对剥离签名后可能出现的“裸工具结果”结构错误,实现了智能消息注入机制,通过合成上下文满足 Vertex AI 的结构校验限制。 - - **配额关注列表 (Fix PR #783)**: - - **自定义显示**: 在「设置 -> 账号」中新增模型配额关注列表,支持用户自定义主表格显示的特定模型配额,未选中模型仅在详情弹窗中展示。 - - **布局优化**: 针对该板块实现了响应式 4 列网格布局,并在 UI 风格上与“额度保护”保持一致。 - - **中转稳定性增强**: 增强了对 529 Overloaded 等上游过载错误的识别与退避重试,提升了极端负载下的任务成功率。 - * **v3.3.37 (2026-01-17)**: - - **后端兼容性修复 (Fix PR #772)**: - - **向后兼容性增强**: 为 `StickySessionConfig` 添加了 `#[serde(default)]` 属性,确保旧版本的配置文件(缺少粘性会话字段)能够被正确加载,避免了反序列化错误。 - - **用户体验优化 (Fix PR #772)**: - - **配置加载体验升级**: 在 `ApiProxy.tsx` 中引入了独立的加载状态和错误处理机制。现在,在获取配置时用户会看到加载动画,如果加载失败,系统将展示明确的错误信息并提供重试按钮,取代了之前的空白或错误状态。 - - **macOS Monterey 沙盒权限修复 (Fix Issue #468)**: - - **问题根源**: 在 macOS Monterey (12.x) 等旧版本系统上,应用沙盒策略阻止了读取全局偏好设置 (`kCFPreferencesAnyApplication`),导致无法正确检测默认浏览器,进而拦截了 OAuth 跳转。 - - **修复内容**: 在 `Entitlements.plist` 中添加了 `com.apple.security.temporary-exception.shared-preference.read-only` 权限例外,显式允许读取全局配置。 - * **v3.3.36 (2026-01-17)**: - - **Claude 协议核心稳定性修复**: - - **修复 "回复 OK" 死循环 (History Poisoning Fix)**: - - **问题根源**: 修复了 `is_warmup_request` 检测逻辑中的严重缺陷。旧逻辑会扫描最近 10 条历史消息,一旦历史记录中包含任何一条 "Warmup" 消息(无论是用户发送还是后台心跳残留),系统就会误判所有后续的用户输入(如 "continue")为 Warmup 请求并强制回复 "OK"。 - - **修复内容**: 将检测范围限制为仅检查**最新**的一条消息。现在只有当前请求确实是 Warmup 心跳时才会被拦截,解决了用户在多轮对话中被 "OK" 卡死的问题。 - - **影响范围**: 极大提升了 Claude Code CLI 及 Cherry Studio 等客户端在长时间会话下的可用性。 - - **修复 Cache Control 注入 (Fix Issue #744)**: - - **问题根源**: Claude 客户端在 Thinking 块中注入了非标准的 `cache_control: {"type": "ephemeral"}` 字段,导致 Google API 返回 `Extra inputs are not permitted` 400 错误。 - - **修复内容**: 实现了全局递归清理函数 `clean_cache_control_from_messages`,并将其集成到 Anthropic (z.ai) 转发路径中,确保在发送给上游 API 前移除所有 `cache_control` 字段。 - - **签名错误防御体系全面验证**: - - **隐式修复 (Implicit Fixes)**: 经过深度代码审计,确认此前报告的一系列签名相关 Issue (#755, #654, #653, #639, #617) 已被 v3.3.35 的**严格签名验证**、**自动降级**及**Base64 智能解码**机制所覆盖和修复。现在的系统对缺失、损坏或编码错误的签名具有极高的容错性。 - - **智能预热逻辑修复 (Fix Issue #760)**: - - **问题根源**: 修复了自动预热调度器中的一段遗留代码,该代码错误地将 `gemini-2.5-flash` 的配额状态强制映射给 `gemini-3-flash`。 - - **现象**: 这会导致当 `gemini-2.5-flash` 仍有额度(如 100%)但 `gemini-3-flash` 已耗尽(0%)时,系统误判 `gemini-3-flash` 也为满额并触发预热,造成“无额度却预热”的幽灵请求。 - - **修复内容**: 移除了所有硬编码的 `2.5 -> 3` 映射逻辑。现在的预热调度器严格检查每个模型自身的配额百分比,只有当该模型实测为 100% 时才会触发预热。 - - **移除 Gemini 2.5 Pro 模型 (Fix Issue #766)**: - - **原因**: 鉴于 `gemini-2.5-pro` 模型的可靠性问题,已将其从支持列表中移除。 - - **迁移**: 所有 `gpt-4` 系列别名(如 `gpt-4`, `gpt-4o`)已重新映射至 `gemini-2.5-flash`,确保服务连续性。 - - **影响**: 之前通过别名使用 `gemini-2.5-pro` 的用户将自动路由至 `gemini-2.5-flash`。前端不再显示该模型。 - - **CLI 同步安全与备份增强 (Fix Issue #756 & #765)**: - - **智能备份与还原**: 引入了自动备份机制。在执行同步覆盖前,系统会自动将用户现有的配置文件备份为 `.antigravity.bak`。“恢复”功能现已升级,能智能检测备份文件,并优先提供“恢复原有配置”选项,而非单一的重置默认。 - - **操作二次确认**: 为“立即同步配置”操作增加了二次确认弹窗,有效防止误触导致本地个性化配置(如登录态)丢失。 - - **CLI 检测增强**: 优化了 macOS 平台下的 CLI(如 Claude Code)检测逻辑。即使二进制文件不在系统 `PATH` 中,只要存在于标准安装路径,也能被正确识别并调用。 - - **Windows 控制台闪烁修复 (PR #769, 感谢 @i-smile)**: - - **无窗口运行**: 修复了在 Windows 平台上执行 CLI 同步命令(如 `where` 检测)时会短暂弹出控制台窗口的问题。通过添加 `CREATE_NO_WINDOW` 标志,现在所有后台检测命令都将静默执行。 - - **Auth UI 状态显示修复 (PR #769, 感谢 @i-smile)**: - - **状态准确性**: 修正了 API 反代页面中认证状态的显示逻辑。现在当 `auth_mode` 为 `off` 时,UI 会正确显示“Disabled”状态,而不是一直显示“Enabled”。 - * **v3.3.35 (2026-01-16)**: - - **CLI 同步功能重大增强 (CLI Sync Enhancements)**: - - **多配置文件支持**: 现已支持同步每个 CLI 的多个配置文件,确保环境配置更完整。涵盖 Claude Code (`settings.json`, `.claude.json`)、Codex (`auth.json`, `config.toml`) 及 Gemini CLI (`.env`, `settings.json`, `config.json`)。 - - **Claude 免登录特权**: 同步时会自动在 `~/.claude.json` 中注入 `"hasCompletedOnboarding": true`,帮助新用户直接跳过 Claude CLI 的初始登录/引导步骤。 - - **多文件查阅体验**: 配置查看详情页升级为“标签页”模式,支持在一个弹窗内顺畅切换并查看该 CLI 关联的所有本地配置文件。 - - **UI/UX 深度细节优化**: - - **弹窗体验统一**: 将“恢复默认配置”的确认框由原生浏览器弹窗替换为应用主题一致的 `ModalDialog`。 - - **图表与显示优化**: 优化了恢复按钮图标 (RotateCcw);精简了状态标签文案并强制不换行,解决了高分屏或窄窗口下的布局错位问题。 - - **版本号精简**: 改进了 CLI 版本号提取逻辑,界面仅保留纯数字版本(如 v0.86.0),视觉更加清爽。 - - **Claude 思考签名持久化修复 (Fix Issue #752)**: - - **问题根源**: - - **响应收集侧**:v3.3.34 中流式响应收集器 (`collector.rs`) 在处理 `content_block_start` 事件时遗漏了 `thinking` 块的 `signature` 字段,导致签名丢失。 - - **请求转换侧**:历史消息中的签名未经验证直接发送给 Gemini,导致跨模型切换或冷启动时出现 `Invalid signature in thinking block` 错误。 - - **修复内容**: - - **响应收集器**:在 `collector.rs` 中添加了 `signature` 字段的提取和持久化逻辑,并补充了单元测试 `test_collect_thinking_response_with_signature`。 - - **请求转换器**:在 `request.rs` 中实施严格签名验证,只使用已缓存且兼容的签名。未知或不兼容的签名会导致 thinking 块自动降级为普通文本,避免发送无效签名。 - - **回退机制**:实现智能回退重试逻辑。如果签名验证失效或上游 API 拒绝(400错误),系统会自动清除所有 thinking 块并强制重试,确保用户请求总是成功。 - - **影响范围**: 解决了 `Invalid signature in thinking block` 错误,支持跨模型切换和冷启动场景,确保 Thinking 模型在所有模式下稳定工作。 - - **API 监控数据实时同步修复 (Pull Request #747, Thanks to @xycxl)**: - - **问题根源**: 修复了 API 监控页面因事件监听器重复注册和状态不同步导致的日志重复显示、计数器不准等问题。 - - **修复内容**: - - **数据去重**: 引入 `pendingLogsRef` 和 ID 排重机制,杜绝日志列表中出现重复条目。 - - **精准计数**: 实现了前后端状态的严格同步,每次接收新日志都从后端获取权威的 `totalCount`,确保页码和总数准确无误。 - - **防抖优化**: 优化了日志更新的防抖逻辑,减少 React 重渲染次数,提升页面流畅度。 - - **功能重命名**: 将“调用记录”重命名为“流量日志”,并恢复路由为 `/monitor`,使功能定位更加直观。 - * **v3.3.34 (2026-01-16)**: - - **OpenAI Codex/Responses 协议修复 (Fix Issue #742)**: - - **400 Invalid Argument 修复**: - - **问题根源**: `/v1/responses` 等专有接口在请求体中仅包含 `instructions` 或 `input` 而缺失 `messages` 字段时,转换逻辑未覆盖全场景,导致 Gemini 接收到空 Body。 - - **修复内容**: 在 `handle_completions` 中反向移植了聊天接口的“请求标准化”逻辑。现在系统会强制检测 Codex 特有字段(`instructions`/`input`),即使 `messages` 为空或缺失,也会自动将其转化为标准的 System/User 消息对,确保上游请求合法。 - - **429/503 高级重试与账号轮换支持**: - - **逻辑对齐**: 将 Claude 处理器中验证过的“智能指数退避”与“多维账号轮换”策略完整移植到了 OpenAI Completions 接口。 - - **效果**: 现在 Codex 接口在遇到限流或服务器过载时,会自动执行毫秒级切换,不再直接抛出错误,极大提升了 VS Code 插件等工具的稳定性。 - - **会话粘性 (Session Stickiness) 支持**: - - **功能扩展**: 补全了 OpenAI 协议下的 `session_id` 提取与调度逻辑。现在无论是 Chat 还是 Codex 接口,只要是同一段对话,系统都会尽量将其调度到同一个 Google 账号上。 - - **性能红利**: 这将显著提升 Google Prompt Caching 的命中率,从而大幅加快响应速度并节省计算资源。 - - **Claude 思考签名编码修复 (Fix Issue #726)**: - - **问题根源**: 修复了 v3.3.33 中引入的 Regression,该版本错误地对已经 Base64 编码的 `thoughtSignature` 进行了二次编码,导致 Google Vertex AI 无法正确校验签名而返回 `Invalid signature` 错误。 - - **修复内容**: 移除了 `Thinking`、`ToolUse` 和 `ToolResult` 处理逻辑中多余的 Base64 编码步骤,确保签名以原始格式正确透传给上游。 - - **影响范围**: 解决了使用 Thinking 模型(如 Claude 4.5 Opus / Sonnet)在多轮对话中触发的 400 签名错误,以及由此导致的 "Error searching files" 任务卡死问题 (Issue #737)。 - - **API 监控看板刷新修复 (Fix Issue #735)**: - - **问题根源**: 修复了 `ProxyMonitor` 组件中因 Closure 导致的事件监听失效问题,该问题导致新请求无法自动显示在列表中。 - - **修复内容**: 引入 `useRef` 优化事件缓冲逻辑,并新增手动刷新按钮作为备份方案;同时在 Tauri 权限配置中显式允许了事件监听。 - - **严格分组配额保护修复 (Strict Grouped Quota Protection Fix - Core Thanks to @Mag1cFall PR #746)**: - - **问题根源**: 修复了在严格匹配模式下,配额保护逻辑因大小写敏感和前端 UI 键名映射缺失而失效的问题。之前版本中 `gemini-pro` 等 UI 简写键名无法匹配到后端定义的 `gemini-3-pro-high` 严格组。 - - **修复内容**: - - **即时大小写归一化**: 恢复了后端 `normalize_to_standard_id` 的大小写不敏感匹配,确保 `Gemini-3-Pro-High` 等变体能被正确识别。 - - **UI 键名智能映射**: 在前端 `isModelProtected` 中增加了对 `gemini-pro/flash` 等 UI 列名的自动映射,确保 UI 上的锁图标能正确反映后端保护状态。 - - **影响范围**: 解决了 Gemini 3 Pro/Flash 和 Claude 4.5 Sonnet 在严格分组模式下的锁图标显示问题,确保配额耗尽时能直观提示用户。 - - **OpenAI 协议 Usage 统计修复 (Pull Request #749, Thanks to @stillyun)**: - - **问题根源**: 在 OpenAI 协议转换过程中,未将 Gemini 返回的 `usageMetadata` 映射到 OpenAI 格式的 `usage` 字段,导致 Kilo 等客户端显示 Token 使用量为 0。 - - **修复内容**: - - **数据模型补全**: 为 `OpenAIResponse` 增加了标准的 `usage` 字段。 - - **全链路映射**: 实现了从流式 (SSE) 和非流式响应中提取并映射 `prompt_tokens`、`completion_tokens` 及 `total_tokens` 的逻辑。 - - **影响范围**: 解决了 Kilo Editor、Claude Code 等工具在使用 OpenAI 协议时无法统计 Token 用量的问题。 - - **Linux 主题切换崩溃修复 (Pull Request #750, Thanks to @infinitete)**: - - **修复内容**: - - 在 Linux 平台禁用不兼容的 `setBackgroundColor` 调用。 - - 针对 WebKitGTK 环境禁用 View Transition API 以防止透明窗口崩溃。 - - 启动时自动调整 GTK 窗口 alpha 通道以增强稳定性。 - - **影响范围**: 解决了 Linux 用户在切换深色/浅色模式时可能遇到的程序卡死或硬崩溃问题。 - * **v3.3.33 (2026-01-15)**: - - **Codex 兼容性与模型映射修复 (Fix Issue #697)**: - - **Instructions 参数支持**: 修复了对 `instructions` 参数的处理逻辑,确保其作为系统指令(System Instructions)正确注入,提升与 Codex 等工具的兼容性。 - - **自动 Responses 格式检测**: 在 OpenAI 处理器中新增智能检测逻辑,自动识别并转换 `instructions` 或 `input` 字段触发的 Responses 模式,无需客户端手动切换。 - - **模型映射恢复与归一化**: 恢复了 `gemini-3-pro-low/high/pro` 统一归一化为内部别名 `gemini-3-pro-preview` 的逻辑,并确保在上游请求时正确还原为物理模型名 `high`。 - - **Opus 映射增强**: 优化了系统默认映射,自动识别 `opus` 关键字模型并确保其默认路由至高性能 Pro 预览线路。 - - **OpenAI 工具调用与思考内容修复 (Fix Issue #710)**: - - **保留工具调用 ID**: 修复了 OpenAI 格式转换过程中丢失 `tool_use.id` 的问题,确保 `functionCall` 和 `functionResponse` 均保留原始 ID,解决了调用 Claude 模型时的 `Field required` 错误。 - - **思考内容 (Reasoning) 原生支持**: 增加了对 OpenAI 消息中 `reasoning_content` 的支持,将其正确映射为内部 `thought` 部分并注入思维链签名,显著提升了“思考型”模型的视觉回显效果。 - - **工具响应格式优化**: 修复了 `tool` 角色消息中可能产生的冗余 Part 冲突,确保请求报文严格符合上游校验规范。 - - **外部提供商智能兜底修复 (Fix Issue #703)**: 修复了"仅兜底"模式在 Google 账号额度耗尽时无法自动切换到外部提供商的问题。 - - **核心问题**: 原判断逻辑只检查 Google 账号数量是否为 0,而不检查账号的实际可用性(限流状态、配额保护状态),导致账号存在但不可用时直接返回 429 错误。 - - **解决方案**: 实现智能账号可用性检查机制,在 `TokenManager` 中新增 `has_available_account()` 方法,综合判断账号的限流状态和配额保护状态。 - - **修改文件**: - - `token_manager.rs`: 新增 `has_available_account()` 方法,检查是否存在未被限流且未被配额保护的可用账号 - - `handlers/claude.rs`: 优化 Fallback 模式判断逻辑,从简单的 `google_accounts == 0` 改为智能的可用性检查 - - **行为改进**: 当所有 Google 账号因限流、配额保护或其他原因不可用时,系统会自动切换到外部提供商,实现真正的智能兜底。 - - **影响范围**: 此修复确保了外部提供商(如智谱 API)的"仅兜底"模式能够正确工作,显著提升了多账号场景下的服务可用性。 - - **配额保护模型名称归一化修复 (Fix Issue #685)**: 修复了配额保护功能因模型名称不匹配而失效的问题。 - - **核心问题**: Quota API 返回的模型名称(如 `gemini-2.5-flash`)与用户在 UI 勾选的标准名称(如 `gemini-3-flash`)不一致,导致精确字符串匹配失败,保护机制无法触发。 - - **解决方案**: 实现了统一的模型名称归一化引擎 `normalize_to_standard_id`,将所有物理模型名映射到 3 个标准保护 ID: - - `gemini-3-flash`: 所有 Flash 变体 (1.5-flash, 2.5-flash, 3-flash 等) - - `gemini-3-pro-high`: 所有 Pro 变体 (1.5-pro, 2.5-pro 等) - - `claude-sonnet-4-5`: 所有 Claude Sonnet 变体 (3-5-sonnet, sonnet-4-5 等) - - **修改文件**: - - `model_mapping.rs`: 新增归一化函数 - - `account.rs`: 配额更新时归一化模型名并存储标准 ID - - `token_manager.rs`: 请求拦截时归一化 `target_model` 进行匹配 - - **联网降级场景**: 即使请求因联网搜索被降级为 `gemini-2.5-flash`,依然能正确归一化为 `gemini-3-flash` 并触发保护。 - - **影响范围**: 解决了配额保护失效问题,确保所有 3 个监控模型的保护功能正常工作。 - - **新增账号导入功能 (#682)**: 支持通过导出的 JSON 文件批量导入已有的账号,完善了账号迁移闭环。 - - **新增葡萄牙语与俄语支持 (#691, #713)**: 现已支持葡萄牙语(巴西)与俄语本地化。 - - **代理监控增强 (#676)**: 在代理监控详情页中为请求和响应载荷新增了“复制”按钮,并支持自动 JSON 格式化。 - - **i18n 修复与界面文案优化 (#671, #713)**: 修正了日语 (ja)、土耳其语 (tr) 和俄语 (ru) 中遗漏和错位的翻译文案。 - - **全局 HTTP API (#696)**: 新增本地 HTTP 服务端口(默认 19527),支持外部工具(如 VS Code 插件)直接通过 API 进行账号切换、配额刷新和设备绑定。 - - **代理监控升级 (#704)**: 全面重构监控面板,引入后端分页查询(支持搜索过滤),解决了大量日志导致的界面卡顿问题;开放 `GET /logs` 接口供外部调用。 - - **预热策略优化 (#699)**: 预热请求新增唯一 `session_id`,并将 `max_tokens` 限制为 8,`temperature` 设置为 0,以降低资源消耗并避免 429 错误。 - - **预热逻辑修复与优化**: 修复了手动触发预热未记录历史导致自动调度重复预热的问题;优化调度器自动跳过“反代禁用”状态的账号。 - - **性能模式调度优化 (PR #706)**: 在“性能优先”调度模式下,现在会跳过默认的 60秒全局锁定机制,显著提升高并发场景下的账号轮转效率。 - - **限流记录自动清理 (PR #701)**: 引入了每分钟执行的后台清理任务,自动移除超过 1 小时的过期失败记录,解决长期运行后因历史记录累积导致的“无可用账号”误报问题。 - - **API Monitor 锁定修复 (Fix Issue #708)**: 启用 SQLite WAL 模式并优化连接配置,解决了高并发场景下因数据库锁定导致的监控数据滞后和代理服务 400/429 错误。 - - **Claude 提示词过滤优化 (#712)**: 修复了在过滤 Claude Code 冗余默认提示词时,误删用户自定义指令 (Instructions from: ...) 的问题,确保个性化配置在长对话场景下仍能正确生效。 - - **Claude 思维块排序策略优化 (Fix Issue #709)**: 解决了开启思维模式时由于块顺序错位(Text 出现在 Thinking 前)导致的 `INVALID_ARGUMENT` 报错。 - - **三段式强制分区**: 实现 `[Thinking, Text, ToolUse]` 严格顺序校验。 - - **自动降级网关**: 在单条消息内,一旦出现非思维内容,后续思维块自动降级为文本,确保协议合规。 - - **合并后二次重排**: 在 Assistant 消息合并逻辑后增加强制重排序,堵死因消息拼接导致的排序漏洞。 - * **v3.3.32 (2026-01-15)**: - - **核心调度与稳定性优化 (Fix Issue #630, #631 - 核心致谢 @lbjlaq PR #640)**: - - **配额漏洞与绕过修复**: 解决了在高并发或特定重试场景下,配额保护机制可能被绕过的潜在漏洞。 - - **限流 Key 匹配优化**: 增强了 `TokenManager` 中限流记录的匹配精准度,解决了在多实例或复杂网络环境下可能出现的速率限制判定不一致问题。 - - **账号禁用逻辑加固**: 修复了手动禁用账号在某些缓存生命周期内未立即从调度池中剥离的问题,确保“禁用即生效”。 - - **账号状态重置机制**: 完善了账号失败计数器在成功请求后的重置策略,避免账号因历史波动被长期误锁定。 - * **v3.3.31 (2026-01-14)**: - - **配额保护失效修复 (Fix Issue #631)**: - - **内存状态同步**: 修复了加载账号触发配额保护时,内存状态未立即同步的问题,确保保护机制即时生效。 - - **全场景覆盖**: 在“粘性会话 (Sticky Session)”和“60秒锁定 (60s Window Lock)”逻辑中补充了配额保护检查,防止受限账号被错误复用。 - - **代码优化**: 修复了 `token_manager.rs` 中的部分编译警告。 - - **Claude 工具调用重复报错修复 (Fix Issue #632)**: - - **弹性修复优化**: 改进了 `Elastic-Recovery` 逻辑,在注入占位结果前增加全量消息 ID 预扫描,避免了 `Found multiple tool_result blocks with id` 错误。 - - **Anthropic 协议对齐**: 确保生成的请求包严格符合 Anthropic 对工具调用 ID 唯一性的要求。 - * **v3.3.30 (2026-01-14)**: - - **模型级配额保护 (Issue #621)**: - - **隔离优化**: 解决了因单个模型配额耗尽而禁用整个账号的问题。现在配额保护仅针对受限的具体模型,账号仍可处理其他模型的请求。 - - **自动迁移**: 新系统会自动将旧版因配额保护被全局禁用的账号恢复,并平滑转为模型级限制。 - - **全协议支持项目**: 已同步更新 Claude, OpenAI (Chat/DALL-E), Gemini, Audio 处理器的路由逻辑。 - - **Gemini 参数幻觉修复 (PR #622)**: - - **参数纠错**: 修复了 Gemini 模型将 `pattern` 参数错误放置在 `description` 或 `query` 字段的问题,增加了自动重映射逻辑。 - - **布尔值强制转换**: 增加了对 `yes`/`no`、`-n` 等非标准布尔值的自动转换支持,解决了 `lineNumbers` 等参数因类型错误导致的调用失败。 - - **影响范围**: 显著提升了 Gemini 模型在 Claude Code CLI 及其他工具调用场景下的稳定性和兼容性。 - - **代码清理与警告修复 (PR #628)**: - - **消除编译器警告**: 修复了多个未使用的导入和变量警告,移除了冗余代码,保持代码库整洁。 - - **跨平台兼容性**: 针对 Windows/macOS/Linux 不同平台的代码路径进行了宏标记优化。 - - **API 密钥自定义编辑功能 (Issue #627)**: - - **自定义密钥支持**: API 反代页面的"API 密钥"配置项现在支持直接编辑,用户可以输入自定义密钥,适合多实例部署场景。 - - **保留自动生成**: 保留了原有的"重新生成"功能,用户可以选择自动生成或手动输入。 - - **格式验证**: 添加了密钥格式验证(必须以 `sk-` 开头,长度至少 10 个字符),防止无效输入。 - - **多语言支持**: 为所有 6 种支持的语言(简体中文、英文、繁体中文、日语、土耳其语、越南语)添加了完整的国际化翻译。 - * **v3.3.29 (2026-01-14)**: - - **OpenAI 流式响应 Function Call 支持修复 (Fix Issue #602, #614)**: - - **问题背景**: OpenAI 接口的流式响应 (`stream: true`) 中缺少 Function Call 处理逻辑,导致客户端无法接收到工具调用信息。 - - **根本原因**: `create_openai_sse_stream` 函数只处理了文本内容、思考内容和图片,完全缺少对 `functionCall` 的处理。 - - **修复内容**: - - 添加工具调用状态追踪变量 (`emitted_tool_calls`),防止重复发送 - - 在 parts 循环中添加 `functionCall` 检测和转换逻辑 - - 构建符合 OpenAI 规范的 `delta.tool_calls` 数组 - - 使用哈希算法生成稳定的 `call_id` - - 包含完整的工具调用信息 (`index`, `id`, `type`, `function.name`, `function.arguments`) - - **影响范围**: 此修复确保了流式请求能够正确返回工具调用信息,与非流式响应和 Codex 流式响应的行为保持一致。所有使用 `stream: true` + `tools` 参数的客户端现在可以正常接收 Function Call 数据。 - - **智能阈值回归 (Smart Threshold Recovery) - 解决 Issue #613**: - - **核心逻辑**: 实现了一种感知上下文负载的动态 Token 报告机制。 - - **修复内容**: - - **三阶段缩放**: 在低负载(0-70%)保持高效压缩;在中负载(70-95%)平滑降低压缩率;在接近 100% 极限时真实上报(回归至 195k 左右)。 - - **模型感应**: 处理器自动识别 1M (Flash) 和 2M (Pro) 的物理上下文界限。 - - **400 错误拦截**: 即使触发物理溢出,代理层也会拦截 `Prompt is too long` 错误,并返回友好的中文/英文修复指引,引导用户执行 `/compact`。 - - **影响范围**: 解决了 Claude Code 在长对话场景下因不知道真实 Token 用量而拒绝压缩,最终导致 Gemini 服务端报错的问题。 - - **Playwright MCP 连通性与稳定性增强 (参考 [Antigravity2Api](https://github.com/znlsl/Antigravity2Api)) - 解决 Issue #616**: - - **SSE 心跳保活**: 引入 15 秒定时心跳 (`: ping`),解决长耗时工具调用导致的连接超时断开问题。 - - **MCP XML Bridge**: 实现双向协议转换逻辑(指令注入 + 标签拦截),显著提升 MCP 工具(如 Playwright)在不稳定链路下的连通性。 - - **上下文激进瘦身**: - - **指令过滤**: 自动识别并移除 Claude Code 注入的冗余系统说明(~1-2k tokens)。 - - **任务去重**: 剔除 tool_result 后重复的任务回显文本,物理减少 Context 占用。 - - **智能 HTML 清理与截断**: - - **深度剥离**: 针对浏览器快照自动移除 ` - - -
-
-

Authorization Successful

-

You can close this window now. The application should refresh automatically.

- -
- 💡 Did it not refresh? - If the application is running in a container or remote environment, you may need to manually copy the link below: - -
-
- - - - "# - ))) - } - Err(e) => { - error!("OAuth exchange failed: {}", e); - Ok(Html(format!( - r#"

Authorization Failed

Error: {}

"#, - e - ))) - } - } -} - -async fn admin_prepare_oauth_url_web( - headers: HeaderMap, - State(state): State, -) -> Result, (StatusCode, Json)> { - let port = state.security.read().await.port; - let host = headers.get("host").and_then(|h| h.to_str().ok()); - let proto = headers - .get("x-forwarded-proto") - .and_then(|h| h.to_str().ok()); - let redirect_uri = get_oauth_redirect_uri(port, host, proto); - - let state_str = uuid::Uuid::new_v4().to_string(); - - // 初始化授权流状态,以及后台处理器 - let (auth_url, mut code_rx) = crate::modules::oauth_server::prepare_oauth_flow_manually( - redirect_uri.clone(), - state_str.clone(), - ) - .map_err(|e| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ErrorResponse { error: e }), - ) - })?; - - // 启动后台任务处理回调/手动提交的代码 - let token_manager = state.token_manager.clone(); - let redirect_uri_clone = redirect_uri.clone(); - tokio::spawn(async move { - match code_rx.recv().await { - Some(Ok(code)) => { - crate::modules::logger::log_info( - "Consuming manually submitted OAuth code in background", - ); - // 为 Web 回调提供简化的后端处理流程 - match crate::modules::oauth::exchange_code(&code, &redirect_uri_clone).await { - Ok(token_resp) => { - // Success! Now add/upsert account - if let Some(refresh_token) = &token_resp.refresh_token { - match token_manager.get_user_info(refresh_token).await { - Ok(user_info) => { - if let Err(e) = token_manager - .add_account(&user_info.email, refresh_token) - .await - { - crate::modules::logger::log_error(&format!( - "Failed to save account in background OAuth: {}", - e - )); - } else { - crate::modules::logger::log_info(&format!( - "Successfully added account {} via background OAuth", - user_info.email - )); - } - } - Err(e) => { - crate::modules::logger::log_error(&format!( - "Failed to fetch user info in background OAuth: {}", - e - )); - } - } - } else { - crate::modules::logger::log_error( - "Background OAuth error: Google did not return a refresh_token.", - ); - } - } - Err(e) => { - crate::modules::logger::log_error(&format!( - "Background OAuth exchange failed: {}", - e - )); - } - } - } - Some(Err(e)) => { - crate::modules::logger::log_error(&format!("Background OAuth flow error: {}", e)); - } - None => { - crate::modules::logger::log_info("Background OAuth flow channel closed"); - } - } - }); - - Ok(Json(serde_json::json!({ - "url": auth_url, - "state": state_str - }))) -} - -/// 辅助函数:获取 OAuth 重定向 URI -/// 强制使用 localhost,以绕过 Google 2.0 政策对 IP 地址和非 HTTPS 环境的拦截。 -/// 只有在显式设置了 ABV_PUBLIC_URL (例如用户配置了 HTTPS 域名) 时才会使用外部地址。 -fn get_oauth_redirect_uri(port: u16, _host: Option<&str>, _proto: Option<&str>) -> String { - if let Ok(public_url) = std::env::var("ABV_PUBLIC_URL") { - let base = public_url.trim_end_matches('/'); - format!("{}/auth/callback", base) - } else { - // 强制返回 localhost。远程部署时,用户可通过回填功能完成授权。 - format!("http://localhost:{}/auth/callback", port) - } -} - -// ============================================================================ -// Security / IP Management Handlers -// ============================================================================ - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct IpAccessLogQuery { - #[serde(default = "default_page")] - page: usize, - #[serde(default = "default_page_size")] - page_size: usize, - search: Option, - #[serde(default)] - blocked_only: bool, -} - -fn default_page() -> usize { 1 } -fn default_page_size() -> usize { 50 } - -#[derive(Serialize)] -struct IpAccessLogResponse { - logs: Vec, - total: usize, -} - -async fn admin_get_ip_access_logs( - Query(q): Query, -) -> Result)> { - let offset = (q.page.max(1) - 1) * q.page_size; - let logs = security_db::get_ip_access_logs( - q.page_size, - offset, - q.search.as_deref(), - q.blocked_only, - ).map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - - let total = logs.len(); // Simple total - - Ok(Json(IpAccessLogResponse { logs, total })) -} - -async fn admin_clear_ip_access_logs() -> Result)> { - security_db::clear_ip_access_logs() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - Ok(StatusCode::OK) -} - -#[derive(Serialize)] -struct IpStatsResponse { - total_requests: usize, - unique_ips: usize, - blocked_requests: usize, - top_ips: Vec, -} - -async fn admin_get_ip_stats() -> Result)> { - let stats = security_db::get_ip_stats() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - let top_ips = security_db::get_top_ips(10, 24) - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - - let response = IpStatsResponse { - total_requests: stats.total_requests as usize, - unique_ips: stats.unique_ips as usize, - blocked_requests: stats.blocked_count as usize, - top_ips, - }; - Ok(Json(response)) -} - -#[derive(Deserialize)] -struct IpTokenStatsQuery { - limit: Option, - hours: Option, -} - -async fn admin_get_ip_token_stats( - Query(q): Query, -) -> Result)> { - let stats = proxy_db::get_token_usage_by_ip( - q.limit.unwrap_or(100), - q.hours.unwrap_or(720) - ).map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - Ok(Json(stats)) -} - -async fn admin_get_ip_blacklist() -> Result)> { - let list = security_db::get_blacklist() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - Ok(Json(list)) -} - -#[derive(Deserialize)] -struct AddBlacklistRequest { - ip_pattern: String, - reason: Option, - expires_at: Option, -} - -async fn admin_add_ip_to_blacklist( - Json(req): Json, -) -> Result)> { - security_db::add_to_blacklist( - &req.ip_pattern, - req.reason.as_deref(), - req.expires_at, - "manual", - ).map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - - Ok(StatusCode::CREATED) -} - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct RemoveIpRequest { - ip_pattern: String, -} - -async fn admin_remove_ip_from_blacklist( - Query(q): Query, -) -> Result)> { - let entries = security_db::get_blacklist() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - - if let Some(entry) = entries.iter().find(|e| e.ip_pattern == q.ip_pattern) { - security_db::remove_from_blacklist(&entry.id) - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - } else { - return Err((StatusCode::NOT_FOUND, Json(ErrorResponse { error: format!("IP pattern {} not found", q.ip_pattern) }))); - } - - Ok(StatusCode::OK) -} - -async fn admin_clear_ip_blacklist() -> Result)> { - let entries = security_db::get_blacklist() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - for entry in entries { - security_db::remove_from_blacklist(&entry.ip_pattern) - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - } - Ok(StatusCode::OK) -} - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct CheckIpQuery { - ip: String, -} - -async fn admin_check_ip_in_blacklist( - Query(q): Query, -) -> Result)> { - let result = security_db::is_ip_in_blacklist(&q.ip) - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - Ok(Json(serde_json::json!({ "result": result }))) -} - -async fn admin_get_ip_whitelist() -> Result)> { - let list = security_db::get_whitelist() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - Ok(Json(list)) -} - -#[derive(Deserialize)] -struct AddWhitelistRequest { - ip_pattern: String, - description: Option, -} - -async fn admin_add_ip_to_whitelist( - Json(req): Json, -) -> Result)> { - security_db::add_to_whitelist( - &req.ip_pattern, - req.description.as_deref(), - ).map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - Ok(StatusCode::CREATED) -} - -async fn admin_remove_ip_from_whitelist( - Query(q): Query, -) -> Result)> { - let entries = security_db::get_whitelist() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - - if let Some(entry) = entries.iter().find(|e| e.ip_pattern == q.ip_pattern) { - security_db::remove_from_whitelist(&entry.id) - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - } else { - return Err((StatusCode::NOT_FOUND, Json(ErrorResponse { error: format!("IP pattern {} not found", q.ip_pattern) }))); - } - Ok(StatusCode::OK) -} - -async fn admin_clear_ip_whitelist() -> Result)> { - let entries = security_db::get_whitelist() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - for entry in entries { - security_db::remove_from_whitelist(&entry.ip_pattern) - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - } - Ok(StatusCode::OK) -} - -async fn admin_check_ip_in_whitelist( - Query(q): Query, -) -> Result)> { - let result = security_db::is_ip_in_whitelist(&q.ip) - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e })))?; - Ok(Json(serde_json::json!({ "result": result }))) -} - -async fn admin_get_security_config( - State(state): State, -) -> Result)> { - let app_config = crate::modules::config::load_app_config() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e.to_string() })))?; - - Ok(Json(app_config.proxy.security_monitor)) -} - -#[derive(Deserialize)] -struct UpdateSecurityConfigWrapper { - config: crate::proxy::config::SecurityMonitorConfig, -} - -async fn admin_update_security_config( - State(state): State, - Json(payload): Json, -) -> Result)> { - let config = payload.config; - let mut app_config = crate::modules::config::load_app_config() - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e.to_string() })))?; - - app_config.proxy.security_monitor = config.clone(); - - crate::modules::config::save_app_config(&app_config) - .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: e.to_string() })))?; - - { - let mut sec = state.security.write().await; - *sec = crate::proxy::ProxySecurityConfig::from_proxy_config(&app_config.proxy); - tracing::info!("[Security] Runtime security config hot-reloaded via Web API"); - } - - Ok(StatusCode::OK) -} - -// --- Debug Console Handlers --- - -async fn admin_enable_debug_console() -> impl IntoResponse { - crate::modules::log_bridge::enable_log_bridge(); - StatusCode::OK -} - -async fn admin_disable_debug_console() -> impl IntoResponse { - crate::modules::log_bridge::disable_log_bridge(); - StatusCode::OK -} - -async fn admin_is_debug_console_enabled() -> impl IntoResponse { - Json(crate::modules::log_bridge::is_log_bridge_enabled()) -} - -async fn admin_get_debug_console_logs() -> impl IntoResponse { - let logs = crate::modules::log_bridge::get_buffered_logs(); - Json(logs) -} - -async fn admin_clear_debug_console_logs() -> impl IntoResponse { - crate::modules::log_bridge::clear_log_buffer(); - StatusCode::OK -} - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct OpencodeSyncStatusRequest { - proxy_url: String, -} - -async fn admin_get_opencode_sync_status( - Json(payload): Json, -) -> Result)> { - crate::proxy::opencode_sync::get_opencode_sync_status(payload.proxy_url) - .await - .map(Json) - .map_err(|e| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ErrorResponse { error: e }), - ) - }) -} - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct OpencodeSyncRequest { - proxy_url: String, - api_key: String, - #[serde(default)] - sync_accounts: bool, -} - -async fn admin_execute_opencode_sync( - Json(payload): Json, -) -> Result)> { - crate::proxy::opencode_sync::execute_opencode_sync( - payload.proxy_url, - payload.api_key, - Some(payload.sync_accounts), - ) - .await - .map(|_| StatusCode::OK) - .map_err(|e| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ErrorResponse { error: e }), - ) - }) -} - -async fn admin_execute_opencode_restore( -) -> Result)> { - crate::proxy::opencode_sync::execute_opencode_restore() - .await - .map(|_| StatusCode::OK) - .map_err(|e| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ErrorResponse { error: e }), - ) - }) -} - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct GetOpencodeConfigRequest { - file_name: Option, -} - -async fn admin_get_opencode_config_content( - Json(payload): Json, -) -> Result)> { - let file_name = payload.file_name; - tokio::task::spawn_blocking(move || crate::proxy::opencode_sync::read_opencode_config_content(file_name)) - .await - .map_err(|e| ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ErrorResponse { error: e.to_string() }), - ))? - .map(Json) - .map_err(|e| ( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ErrorResponse { error: e }), - )) -} diff --git a/src-tauri/src/proxy/server/admin/accounts.rs b/src-tauri/src/proxy/server/admin/accounts.rs new file mode 100644 index 000000000..fa8e73c75 --- /dev/null +++ b/src-tauri/src/proxy/server/admin/accounts.rs @@ -0,0 +1,590 @@ +//! Account management admin handlers +//! +//! Handles CRUD operations for accounts, OAuth flow, device binding, and quota management. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + response::IntoResponse, + Json, +}; + +use crate::modules::{account, logger}; +use crate::proxy::server::types::{ + AccountListResponse, AccountResponse, AddAccountRequest, AppState, BindDeviceRequest, + ErrorResponse, ModelQuota, QuotaResponse, ReorderRequest, BulkDeleteRequest, + SubmitCodeRequest, SwitchRequest, ToggleProxyRequest, to_account_response, +}; + +// ============================================================================ +// Account CRUD +// ============================================================================ + +pub async fn list_accounts( + State(state): State, +) -> Result)> { + let accounts = state.account_service.list_accounts().await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let current_id = state.account_service.get_current_id().ok().flatten(); + + let account_responses: Vec = accounts + .into_iter() + .map(|acc| { + let is_current = current_id.as_ref().map(|id| id == &acc.id).unwrap_or(false); + let quota = acc.quota.map(|q| QuotaResponse { + models: q + .models + .into_iter() + .map(|m| ModelQuota { + name: m.name, + percentage: m.percentage, + reset_time: m.reset_time, + }) + .collect(), + last_updated: q.last_updated, + subscription_tier: q.subscription_tier, + is_forbidden: q.is_forbidden, + }); + + AccountResponse { + id: acc.id, + email: acc.email, + name: acc.name, + is_current, + disabled: acc.disabled, + disabled_reason: acc.disabled_reason, + disabled_at: acc.disabled_at, + proxy_disabled: acc.proxy_disabled, + proxy_disabled_reason: acc.proxy_disabled_reason, + proxy_disabled_at: acc.proxy_disabled_at, + protected_models: acc.protected_models.into_iter().collect(), + quota, + device_bound: acc.device_profile.is_some(), + last_used: acc.last_used, + } + }) + .collect(); + + Ok(Json(AccountListResponse { + current_account_id: current_id, + accounts: account_responses, + })) +} + +pub async fn get_current_account( + State(state): State, +) -> Result)> { + let current_id = state.account_service.get_current_id().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let response = if let Some(id) = current_id { + let acc = account::load_account(&id).ok(); + acc.map(|acc| { + let quota = acc.quota.map(|q| QuotaResponse { + models: q + .models + .into_iter() + .map(|m| ModelQuota { + name: m.name, + percentage: m.percentage, + reset_time: m.reset_time, + }) + .collect(), + last_updated: q.last_updated, + subscription_tier: q.subscription_tier, + is_forbidden: q.is_forbidden, + }); + + AccountResponse { + id: acc.id, + email: acc.email, + name: acc.name, + is_current: true, + disabled: acc.disabled, + disabled_reason: acc.disabled_reason, + disabled_at: acc.disabled_at, + proxy_disabled: acc.proxy_disabled, + proxy_disabled_reason: acc.proxy_disabled_reason, + proxy_disabled_at: acc.proxy_disabled_at, + protected_models: acc.protected_models.into_iter().collect(), + quota, + device_bound: acc.device_profile.is_some(), + last_used: acc.last_used, + } + }) + } else { + None + }; + + Ok(Json(response)) +} + +pub async fn add_account( + State(state): State, + Json(payload): Json, +) -> Result)> { + let account = state + .account_service + .add_account(&payload.refresh_token) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // [FIX #1166] Reload TokenManager after account change + if let Err(e) = state.token_manager.load_accounts().await { + logger::log_error(&format!("[API] Failed to reload accounts after adding: {}", e)); + } + + let current_id = state.account_service.get_current_id().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(to_account_response(&account, ¤t_id))) +} + +pub async fn delete_account( + State(state): State, + Path(account_id): Path, +) -> Result)> { + state.account_service.delete_account(&account_id).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // [FIX #1166] Reload TokenManager after deletion + if let Err(e) = state.token_manager.load_accounts().await { + logger::log_error(&format!( + "[API] Failed to reload accounts after deletion: {}", + e + )); + } + + Ok(StatusCode::NO_CONTENT) +} + +pub async fn switch_account( + State(state): State, + Json(payload): Json, +) -> Result)> { + { + let switching = state.switching.read().await; + if *switching { + return Err(( + StatusCode::CONFLICT, + Json(ErrorResponse { + error: "Another switch operation is already in progress".to_string(), + }), + )); + } + } + + { + let mut switching = state.switching.write().await; + *switching = true; + } + + let account_id = payload.account_id.clone(); + logger::log_info(&format!("[API] Starting account switch: {}", account_id)); + + let result = state.account_service.switch_account(&account_id).await; + + { + let mut switching = state.switching.write().await; + *switching = false; + } + + match result { + Ok(()) => { + logger::log_info(&format!("[API] Account switch successful: {}", account_id)); + + // [FIX #1166] Sync memory state after switch + state.token_manager.clear_all_sessions(); + if let Err(e) = state.token_manager.load_accounts().await { + logger::log_error(&format!( + "[API] Failed to reload accounts after switch: {}", + e + )); + } + + Ok(StatusCode::OK) + } + Err(e) => { + logger::log_error(&format!("[API] Account switch failed: {}", e)); + Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )) + } + } +} + +pub async fn refresh_all_quotas( +) -> Result)> { + logger::log_info("[API] Starting refresh of all account quotas"); + let stats = account::refresh_all_quotas_logic().await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + Ok(Json(stats)) +} + +pub async fn delete_accounts( + Json(payload): Json, +) -> Result)> { + crate::modules::account::delete_accounts(&payload.account_ids).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(StatusCode::OK) +} + +pub async fn reorder_accounts( + State(state): State, + Json(payload): Json, +) -> Result)> { + crate::modules::account::reorder_accounts(&payload.account_ids).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // [FIX #1166] Reload TokenManager after reorder + if let Err(e) = state.token_manager.load_accounts().await { + logger::log_error(&format!( + "[API] Failed to reload accounts after reorder: {}", + e + )); + } + + Ok(StatusCode::OK) +} + +pub async fn fetch_account_quota( + Path(account_id): Path, +) -> Result)> { + let mut account = crate::modules::load_account(&account_id).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let quota = crate::modules::account::fetch_quota_with_retry(&mut account) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + ) + })?; + + crate::modules::update_account_quota(&account_id, quota.clone()).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + Ok(Json(quota)) +} + +pub async fn toggle_proxy_status( + State(state): State, + Path(account_id): Path, + Json(payload): Json, +) -> Result)> { + crate::modules::account::toggle_proxy_status( + &account_id, + payload.enable, + payload.reason.as_deref(), + ) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // Sync to running proxy service + let _ = state.token_manager.reload_account(&account_id).await; + + Ok(StatusCode::OK) +} + +// ============================================================================ +// OAuth Handlers +// ============================================================================ + +pub async fn prepare_oauth_url( + State(state): State, +) -> Result)> { + let url = state + .account_service + .prepare_oauth_url() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(serde_json::json!({ "url": url }))) +} + +pub async fn start_oauth_login( + State(state): State, +) -> Result)> { + let account = state + .account_service + .start_oauth_login() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + let current_id = state.account_service.get_current_id().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(to_account_response(&account, ¤t_id))) +} + +pub async fn complete_oauth_login( + State(state): State, +) -> Result)> { + let account = state + .account_service + .complete_oauth_login() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + let current_id = state.account_service.get_current_id().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(to_account_response(&account, ¤t_id))) +} + +pub async fn cancel_oauth_login( + State(state): State, +) -> Result)> { + state.account_service.cancel_oauth_login(); + Ok(StatusCode::OK) +} + +pub async fn submit_oauth_code( + State(state): State, + Json(payload): Json, +) -> Result)> { + state + .account_service + .submit_oauth_code(payload.code, payload.state) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(StatusCode::OK) +} + +// ============================================================================ +// Device Binding Handlers +// ============================================================================ + +pub async fn bind_device( + Path(account_id): Path, + Json(payload): Json, +) -> Result)> { + let result = account::bind_device_profile(&account_id, &payload.mode).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + Ok(Json(serde_json::json!({ + "success": true, + "message": "Device fingerprint bound successfully", + "device_profile": result, + }))) +} + +pub async fn get_device_profiles( + State(_state): State, + Path(account_id): Path, +) -> Result)> { + let profiles = account::get_device_profiles(&account_id).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(profiles)) +} + +pub async fn list_device_versions( + State(_state): State, + Path(account_id): Path, +) -> Result)> { + let profiles = account::get_device_profiles(&account_id).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(profiles)) +} + +pub async fn preview_generate_profile( +) -> Result)> { + let profile = crate::modules::device::generate_profile(); + Ok(Json(profile)) +} + +pub async fn bind_device_profile_with_profile( + State(_state): State, + Path(account_id): Path, + Json(profile): Json, +) -> Result)> { + let result = + account::bind_device_profile_with_profile(&account_id, profile, None).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(result)) +} + +pub async fn restore_original_device( +) -> Result)> { + let msg = account::restore_original_device().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(msg)) +} + +pub async fn restore_device_version( + State(_state): State, + Path((account_id, version_id)): Path<(String, String)>, +) -> Result)> { + let profile = account::restore_device_version(&account_id, &version_id).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(profile)) +} + +pub async fn delete_device_version( + State(_state): State, + Path((account_id, version_id)): Path<(String, String)>, +) -> Result)> { + account::delete_device_version(&account_id, &version_id).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(StatusCode::NO_CONTENT) +} + +// ============================================================================ +// Warmup Handlers +// ============================================================================ + +pub async fn warm_up_all_accounts( +) -> Result)> { + let result = crate::commands::quota::warm_up_all_accounts() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + ) + })?; + Ok(Json(result)) +} + +pub async fn warm_up_account( + Path(account_id): Path, +) -> Result)> { + let result = crate::commands::quota::warm_up_account(account_id) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + ) + })?; + Ok(Json(result)) +} + +// ============================================================================ +// Export Accounts Handler +// ============================================================================ + +#[derive(serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExportAccountsRequest { + pub account_ids: Vec, +} + +pub async fn export_accounts( + State(_state): State, + Json(payload): Json, +) -> Result)> { + let response = account::export_accounts_by_ids(&payload.account_ids).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + Ok(Json(response)) +} diff --git a/src-tauri/src/proxy/server/admin/import.rs b/src-tauri/src/proxy/server/admin/import.rs new file mode 100644 index 000000000..ee1cb39f5 --- /dev/null +++ b/src-tauri/src/proxy/server/admin/import.rs @@ -0,0 +1,138 @@ +//! Import and migration admin handlers +//! +//! Handles account import from various sources (v1, DB, custom paths). + +use axum::{ + extract::State, + http::StatusCode, + response::IntoResponse, + Json, +}; + +use crate::modules::{account, migration}; +use crate::proxy::server::types::{AppState, CustomDbRequest, ErrorResponse, to_account_response, AccountResponse}; + +// ============================================================================ +// Import Handlers +// ============================================================================ + +pub async fn import_v1_accounts( + State(state): State, +) -> Result)> { + let accounts = migration::import_from_v1().await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // [FIX #1166] Reload after import + let _ = state.token_manager.load_accounts().await; + + let current_id = state.account_service.get_current_id().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let responses: Vec = accounts + .iter() + .map(|a| to_account_response(a, ¤t_id)) + .collect(); + + Ok(Json(responses)) +} + +pub async fn import_from_db( + State(state): State, +) -> Result)> { + let account = migration::import_from_db().await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // [FIX #1166] Reload after import + let _ = state.token_manager.load_accounts().await; + + let current_id = state.account_service.get_current_id().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + Ok(Json(to_account_response(&account, ¤t_id))) +} + +pub async fn import_custom_db( + State(state): State, + Json(payload): Json, +) -> Result)> { + let account = migration::import_from_custom_db_path(payload.path) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // [FIX #1166] Reload after import + let _ = state.token_manager.load_accounts().await; + + let current_id = state.account_service.get_current_id().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + Ok(Json(to_account_response(&account, ¤t_id))) +} + +pub async fn sync_account_from_db( + State(state): State, +) -> Result)> { + // Logic from sync_account_from_db command + let db_refresh_token = match migration::get_refresh_token_from_db() { + Ok(token) => token, + Err(_e) => { + return Ok(Json(None)); + } + }; + + let curr_account = account::get_current_account().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + if let Some(acc) = curr_account { + if acc.token.refresh_token == db_refresh_token { + return Ok(Json(None)); + } + } + + let account = migration::import_from_db().await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // [FIX #1166] Reload TokenManager after sync + let _ = state.token_manager.load_accounts().await; + + let current_id = state.account_service.get_current_id().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + Ok(Json(Some(to_account_response(&account, ¤t_id)))) +} diff --git a/src-tauri/src/proxy/server/admin/mod.rs b/src-tauri/src/proxy/server/admin/mod.rs new file mode 100644 index 000000000..c147aa4f6 --- /dev/null +++ b/src-tauri/src/proxy/server/admin/mod.rs @@ -0,0 +1,16 @@ +//! Admin API handlers module +//! +//! This module organizes all admin API handlers into logical submodules. + +pub mod accounts; +pub mod import; +pub mod proxy; +pub mod stats; +pub mod system; + +// Re-export all handlers for convenient access +pub use accounts::*; +pub use import::*; +pub use proxy::*; +pub use stats::*; +pub use system::*; diff --git a/src-tauri/src/proxy/server/admin/proxy.rs b/src-tauri/src/proxy/server/admin/proxy.rs new file mode 100644 index 000000000..c745ca383 --- /dev/null +++ b/src-tauri/src/proxy/server/admin/proxy.rs @@ -0,0 +1,601 @@ +//! Proxy control admin handlers +//! +//! Handles proxy service control, session management, rate limiting, and monitoring. + +use axum::{ + extract::{Path, State}, + http::StatusCode, + response::IntoResponse, + Json, +}; + +use crate::modules::logger; +use crate::proxy::server::types::{ + AppState, ErrorResponse, LogsFilterQuery, OpencodeConfigContentRequest, OpencodeSyncRequest, + OpencodeSyncStatusRequest, UpdateMappingWrapper, +}; + +// ============================================================================ +// Proxy Service Control +// ============================================================================ + +pub async fn get_proxy_status( + State(state): State, +) -> Result)> { + let active_accounts = state.token_manager.len(); + let is_running = { *state.is_running.read().await }; + + Ok(Json(serde_json::json!({ + "running": is_running, + "port": state.port, + "base_url": format!("http://127.0.0.1:{}", state.port), + "active_accounts": active_accounts, + }))) +} + +pub async fn start_proxy_service(State(state): State) -> impl IntoResponse { + // 1. Persist config (fix #1166) + if let Ok(mut config) = crate::modules::config::load_app_config() { + config.proxy.auto_start = true; + let _ = crate::modules::config::save_app_config(&config); + } + + // 2. Load accounts if first start + if let Err(e) = state.token_manager.load_accounts().await { + logger::log_error(&format!("[API] Failed to load accounts on start: {}", e)); + } + + let mut running = state.is_running.write().await; + *running = true; + logger::log_info("[API] Proxy service enabled (persisted)"); + StatusCode::OK +} + +pub async fn stop_proxy_service(State(state): State) -> impl IntoResponse { + // 1. Persist config (fix #1166) + if let Ok(mut config) = crate::modules::config::load_app_config() { + config.proxy.auto_start = false; + let _ = crate::modules::config::save_app_config(&config); + } + + let mut running = state.is_running.write().await; + *running = false; + logger::log_info("[API] Proxy service disabled (Axum mode / persisted)"); + StatusCode::OK +} + +pub async fn update_model_mapping( + State(state): State, + Json(payload): Json, +) -> Result)> { + let config = payload.config; + + // 1. Hot-update memory state + { + let mut mapping = state.custom_mapping.write().await; + *mapping = config.custom_mapping.clone(); + } + + // 2. Persist to disk (fix #1149) + let mut app_config = crate::modules::config::load_app_config().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + app_config.proxy.custom_mapping = config.custom_mapping; + + crate::modules::config::save_app_config(&app_config).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + logger::log_info("[API] Model mapping hot-updated and saved via API"); + Ok(StatusCode::OK) +} + +pub async fn generate_api_key() -> impl IntoResponse { + let new_key = format!("sk-{}", uuid::Uuid::new_v4().to_string().replace("-", "")); + Json(new_key) +} + +// ============================================================================ +// Session & Rate Limit Management +// ============================================================================ + +pub async fn clear_proxy_session_bindings(State(state): State) -> impl IntoResponse { + state.token_manager.clear_all_sessions(); + logger::log_info("[API] Cleared all session bindings"); + StatusCode::OK +} + +pub async fn clear_all_rate_limits(State(state): State) -> impl IntoResponse { + state.token_manager.clear_all_rate_limits(); + logger::log_info("[API] Cleared all rate limit records"); + StatusCode::OK +} + +pub async fn clear_rate_limit( + State(state): State, + Path(account_id): Path, +) -> impl IntoResponse { + let cleared = state.token_manager.clear_rate_limit(&account_id); + if cleared { + logger::log_info(&format!( + "[API] Cleared rate limit for account {}", + account_id + )); + StatusCode::OK + } else { + StatusCode::NOT_FOUND + } +} + +// ============================================================================ +// Monitor Control +// ============================================================================ + +pub async fn set_proxy_monitor_enabled( + State(state): State, + Json(payload): Json, +) -> impl IntoResponse { + let enabled = payload + .get("enabled") + .and_then(|v| v.as_bool()) + .unwrap_or(false); + + // [FIX #1269] Only log when state actually changes + if state.monitor.is_enabled() != enabled { + state.monitor.set_enabled(enabled); + logger::log_info(&format!("[API] Monitor state set to: {}", enabled)); + } + + StatusCode::OK +} + +pub async fn get_proxy_stats( + State(state): State, +) -> Result)> { + let stats = state.monitor.get_stats().await; + Ok(Json(stats)) +} + +// ============================================================================ +// Logs Management +// ============================================================================ + +pub async fn get_proxy_logs_filtered( + axum::extract::Query(params): axum::extract::Query, +) -> Result)> { + let res = tokio::task::spawn_blocking(move || { + crate::modules::proxy_db::get_logs_filtered( + ¶ms.filter, + params.errors_only, + params.limit, + params.offset, + ) + }) + .await; + + match res { + Ok(Ok(logs)) => Ok(Json(logs)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_proxy_logs_count_filtered( + axum::extract::Query(params): axum::extract::Query, +) -> Result)> { + let res = tokio::task::spawn_blocking(move || { + crate::modules::proxy_db::get_logs_count_filtered(¶ms.filter, params.errors_only) + }) + .await; + + match res { + Ok(Ok(count)) => Ok(Json(count)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn clear_proxy_logs() -> impl IntoResponse { + let _ = tokio::task::spawn_blocking(|| { + if let Err(e) = crate::modules::proxy_db::clear_logs() { + logger::log_error(&format!("[API] Failed to clear proxy logs: {}", e)); + } + }) + .await; + logger::log_info("[API] Cleared all proxy logs"); + StatusCode::OK +} + +pub async fn get_proxy_log_detail( + Path(log_id): Path, +) -> Result)> { + let res = tokio::task::spawn_blocking(move || { + crate::modules::proxy_db::get_log_detail(&log_id) + }) + .await; + + match res { + Ok(Ok(log)) => Ok(Json(log)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +// ============================================================================ +// z.ai Integration +// ============================================================================ + +pub async fn fetch_zai_models( + Path(_id): Path, + Json(payload): Json, +) -> Result)> { + let zai_config = payload.get("zai").ok_or_else(|| { + ( + StatusCode::BAD_REQUEST, + Json(ErrorResponse { + error: "Missing zai config".to_string(), + }), + ) + })?; + + let api_key = zai_config + .get("api_key") + .and_then(|v| v.as_str()) + .unwrap_or(""); + let base_url = zai_config + .get("base_url") + .and_then(|v| v.as_str()) + .unwrap_or("https://api.z.ai"); + + let client = reqwest::Client::new(); + let resp = client + .get(format!("{}/v1/models", base_url)) + .header("Authorization", format!("Bearer {}", api_key)) + .send() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + ) + })?; + + let data: serde_json::Value = resp.json().await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + ) + })?; + + let models = data + .get("data") + .and_then(|v| v.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|m| m.get("id").and_then(|id| id.as_str().map(|s| s.to_string()))) + .collect::>() + }) + .unwrap_or_default(); + + Ok(Json(models)) +} + +// ============================================================================ +// Cloudflared Handlers +// ============================================================================ + +pub async fn cloudflared_get_status( + State(state): State, +) -> Result)> { + state + .cloudflared_state + .ensure_manager() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let lock = state.cloudflared_state.manager.read().await; + if let Some(manager) = lock.as_ref() { + let (installed, version) = manager.check_installed().await; + let mut status = manager.get_status().await; + status.installed = installed; + status.version = version; + if !installed { + status.running = false; + status.url = None; + } + Ok(Json(status)) + } else { + Ok(Json( + crate::modules::cloudflared::CloudflaredStatus::default(), + )) + } +} + +pub async fn cloudflared_install( + State(state): State, +) -> Result)> { + state + .cloudflared_state + .ensure_manager() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let lock = state.cloudflared_state.manager.read().await; + if let Some(manager) = lock.as_ref() { + let status = manager.install().await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(status)) + } else { + Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: "Manager not initialized".to_string(), + }), + )) + } +} + +pub async fn cloudflared_start( + State(state): State, + Json(payload): Json, +) -> Result)> { + state + .cloudflared_state + .ensure_manager() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let lock = state.cloudflared_state.manager.read().await; + if let Some(manager) = lock.as_ref() { + let status = manager.start(payload.config).await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(status)) + } else { + Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: "Manager not initialized".to_string(), + }), + )) + } +} + +pub async fn cloudflared_stop( + State(state): State, +) -> Result)> { + state + .cloudflared_state + .ensure_manager() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let lock = state.cloudflared_state.manager.read().await; + if let Some(manager) = lock.as_ref() { + let status = manager.stop().await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(status)) + } else { + Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: "Manager not initialized".to_string(), + }), + )) + } +} + +// ============================================================================ +// CLI Sync Handlers +// ============================================================================ + +use crate::proxy::server::types::{ + CliConfigContentRequest, CliRestoreRequest, CliSyncRequest, CliSyncStatusRequest, +}; + +pub async fn get_cli_sync_status( + Json(payload): Json, +) -> Result)> { + crate::proxy::cli_sync::get_cli_sync_status(payload.app_type, payload.proxy_url) + .await + .map(Json) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + }) +} + +pub async fn execute_cli_sync( + Json(payload): Json, +) -> Result)> { + crate::proxy::cli_sync::execute_cli_sync(payload.app_type, payload.proxy_url, payload.api_key) + .await + .map(|_| StatusCode::OK) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + }) +} + +pub async fn execute_cli_restore( + Json(payload): Json, +) -> Result)> { + crate::proxy::cli_sync::execute_cli_restore(payload.app_type) + .await + .map(|_| StatusCode::OK) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + }) +} + +pub async fn get_cli_config_content( + Json(payload): Json, +) -> Result)> { + crate::proxy::cli_sync::get_cli_config_content(payload.app_type, payload.file_name) + .await + .map(Json) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + }) +} + +pub async fn get_opencode_sync_status( + Json(payload): Json, +) -> Result)> { + crate::proxy::opencode_sync::get_opencode_sync_status(payload.proxy_url) + .await + .map(Json) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + }) +} + +pub async fn execute_opencode_sync( + Json(payload): Json, +) -> Result)> { + crate::proxy::opencode_sync::execute_opencode_sync( + payload.proxy_url, + payload.api_key, + Some(payload.sync_accounts), + ) + .await + .map(|_| StatusCode::OK) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + }) +} + +pub async fn execute_opencode_restore( +) -> Result)> { + crate::proxy::opencode_sync::execute_opencode_restore() + .await + .map(|_| StatusCode::OK) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + }) +} + +pub async fn get_opencode_config_content( + Json(payload): Json, +) -> Result)> { + crate::proxy::opencode_sync::get_opencode_config_content(payload.file_name) + .await + .map(Json) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + }) +} + +// ============================================================================ +// [FIX #820] Preferred Account Handlers +// ============================================================================ + +pub async fn get_preferred_account(State(state): State) -> impl IntoResponse { + let pref = state.token_manager.get_preferred_account().await; + Json(pref) +} + +#[derive(serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SetPreferredAccountRequest { + pub account_id: Option, +} + +pub async fn set_preferred_account( + State(state): State, + Json(payload): Json, +) -> impl IntoResponse { + state + .token_manager + .set_preferred_account(payload.account_id) + .await; + StatusCode::OK +} diff --git a/src-tauri/src/proxy/server/admin/stats.rs b/src-tauri/src/proxy/server/admin/stats.rs new file mode 100644 index 000000000..bae510caf --- /dev/null +++ b/src-tauri/src/proxy/server/admin/stats.rs @@ -0,0 +1,240 @@ +//! Token statistics admin handlers +//! +//! Handles all token usage statistics endpoints. + +use axum::{http::StatusCode, response::IntoResponse, Json}; + +use crate::modules::{logger, token_stats}; +use crate::proxy::server::types::{ErrorResponse, StatsPeriodQuery}; + +// ============================================================================ +// Token Statistics +// ============================================================================ + +pub async fn get_token_stats_hourly( + axum::extract::Query(p): axum::extract::Query, +) -> Result)> { + let hours = p.hours.unwrap_or(24); + let res = tokio::task::spawn_blocking(move || token_stats::get_hourly_stats(hours)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_token_stats_daily( + axum::extract::Query(p): axum::extract::Query, +) -> Result)> { + let days = p.days.unwrap_or(7); + let res = tokio::task::spawn_blocking(move || token_stats::get_daily_stats(days)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_token_stats_weekly( + axum::extract::Query(p): axum::extract::Query, +) -> Result)> { + let weeks = p.weeks.unwrap_or(4); + let res = tokio::task::spawn_blocking(move || token_stats::get_weekly_stats(weeks)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_token_stats_by_account( + axum::extract::Query(p): axum::extract::Query, +) -> Result)> { + let hours = p.hours.unwrap_or(168); + let res = tokio::task::spawn_blocking(move || token_stats::get_account_stats(hours)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_token_stats_summary( + axum::extract::Query(p): axum::extract::Query, +) -> Result)> { + let hours = p.hours.unwrap_or(168); + let res = tokio::task::spawn_blocking(move || token_stats::get_summary_stats(hours)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_token_stats_by_model( + axum::extract::Query(p): axum::extract::Query, +) -> Result)> { + let hours = p.hours.unwrap_or(168); + let res = tokio::task::spawn_blocking(move || token_stats::get_model_stats(hours)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_token_stats_model_trend_hourly( +) -> Result)> { + let res = + tokio::task::spawn_blocking(|| token_stats::get_model_trend_hourly(24)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_token_stats_model_trend_daily( +) -> Result)> { + let res = + tokio::task::spawn_blocking(|| token_stats::get_model_trend_daily(7)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_token_stats_account_trend_hourly( +) -> Result)> { + let res = + tokio::task::spawn_blocking(|| token_stats::get_account_trend_hourly(24)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn get_token_stats_account_trend_daily( +) -> Result)> { + let res = + tokio::task::spawn_blocking(|| token_stats::get_account_trend_daily(7)).await; + + match res { + Ok(Ok(stats)) => Ok(Json(stats)), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn clear_token_stats() -> impl IntoResponse { + let res = tokio::task::spawn_blocking(|| { + // Clear databases (brute force) + if let Ok(path) = token_stats::get_db_path() { + let _ = std::fs::remove_file(path); + } + let _ = token_stats::init_db(); + }) + .await; + + match res { + Ok(_) => { + logger::log_info("[API] Cleared all token statistics"); + StatusCode::OK + } + Err(e) => { + logger::log_error(&format!("[API] Failed to clear token stats: {}", e)); + StatusCode::INTERNAL_SERVER_ERROR + } + } +} diff --git a/src-tauri/src/proxy/server/admin/system.rs b/src-tauri/src/proxy/server/admin/system.rs new file mode 100644 index 000000000..000e6d162 --- /dev/null +++ b/src-tauri/src/proxy/server/admin/system.rs @@ -0,0 +1,295 @@ +//! System and configuration admin handlers +//! +//! Handles configuration management, update checks, autostart, and file operations. + +use axum::{ + extract::State, + http::StatusCode, + response::IntoResponse, + Json, +}; +use std::path::Path; + +use crate::proxy::server::types::{AppState, ErrorResponse, SaveConfigWrapper, SaveFileRequest}; + +fn validate_save_path(path: &str) -> Result<(), String> { + if path.trim().is_empty() { + return Err("File path cannot be empty".to_string()); + } + + let normalized = path.replace('\\', "/").to_ascii_lowercase(); + if normalized.contains("../") || normalized.contains("..\\") || normalized.ends_with("/..") { + return Err("Path traversal is not allowed".to_string()); + } + + let forbidden_prefixes = [ + "/etc/", + "/proc/", + "/sys/", + "/dev/", + "/root/", + "/var/spool/cron", + "c:/windows", + "c:/programdata", + ]; + + if forbidden_prefixes + .iter() + .any(|prefix| normalized.starts_with(prefix)) + { + return Err("Access to system-sensitive path is denied".to_string()); + } + + if let Some(parent) = Path::new(path).parent() { + if !parent.exists() { + return Err("Target directory does not exist".to_string()); + } + } + + Ok(()) +} + +// ============================================================================ +// Configuration +// ============================================================================ + +pub async fn get_config() -> Result)> { + let cfg = crate::modules::config::load_app_config().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(cfg)) +} + +pub async fn save_config( + State(state): State, + Json(payload): Json, +) -> Result)> { + let new_config = payload.config; + + // 1. Persist to disk + crate::modules::config::save_app_config(&new_config).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // 2. Hot-update memory state + // Update model mapping + { + let mut mapping = state.custom_mapping.write().await; + *mapping = new_config.clone().proxy.custom_mapping; + } + + // Update upstream proxy + { + let mut proxy = state.upstream_proxy.write().await; + *proxy = new_config.clone().proxy.upstream_proxy; + } + + // Update security policy + { + let mut security = state.security.write().await; + *security = crate::proxy::ProxySecurityConfig::from_proxy_config(&new_config.proxy); + } + + // Update z.ai config + { + let mut zai = state.zai.write().await; + *zai = new_config.clone().proxy.zai; + } + + // Update experimental config + { + let mut exp = state.experimental.write().await; + *exp = new_config.clone().proxy.experimental; + } + + Ok(StatusCode::OK) +} + +// ============================================================================ +// Update Management +// ============================================================================ + +pub async fn get_update_settings() -> impl IntoResponse { + match crate::modules::update_checker::load_update_settings() { + Ok(s) => Json(serde_json::to_value(s).unwrap_or_default()), + Err(_) => Json(serde_json::json!({ + "auto_check": true, + "last_check_time": 0, + "check_interval_hours": 24 + })), + } +} + +pub async fn should_check_updates( +) -> Result)> { + let settings = crate::modules::update_checker::load_update_settings().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + ) + })?; + let should = crate::modules::update_checker::should_check_for_updates(&settings); + Ok(Json(should)) +} + +pub async fn check_for_updates( +) -> Result)> { + let info = crate::modules::update_checker::check_for_updates() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(Json(info)) +} + +pub async fn update_last_check_time( +) -> Result)> { + crate::modules::update_checker::update_last_check_time().map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(StatusCode::OK) +} + +pub async fn save_update_settings(Json(settings): Json) -> impl IntoResponse { + if let Ok(s) = + serde_json::from_value::(settings) + { + let _ = crate::modules::update_checker::save_update_settings(&s); + StatusCode::OK + } else { + StatusCode::BAD_REQUEST + } +} + +// ============================================================================ +// Autostart +// ============================================================================ + +pub async fn is_auto_launch_enabled() -> impl IntoResponse { + // Note: Autostart requires tauri::AppHandle, not available in Axum State easily. + // Return false in Web mode. + Json(false) +} + +pub async fn toggle_auto_launch(Json(_payload): Json) -> impl IntoResponse { + // Note: Autostart requires tauri::AppHandle. + StatusCode::NOT_IMPLEMENTED +} + +// ============================================================================ +// HTTP API Settings +// ============================================================================ + +pub async fn get_http_api_settings() -> impl IntoResponse { + Json(serde_json::json!({ "enabled": true, "port": 8045 })) +} + +pub async fn save_http_api_settings( + Json(payload): Json, +) -> Result)> { + crate::modules::http_api::save_settings(&payload).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + Ok(StatusCode::OK) +} + +// ============================================================================ +// File Operations +// ============================================================================ + +pub async fn get_data_dir_path() -> impl IntoResponse { + match crate::modules::account::get_data_dir() { + Ok(p) => Json(p.to_string_lossy().to_string()), + Err(e) => Json(format!("Error: {}", e)), + } +} + +pub async fn save_text_file( + Json(payload): Json, +) -> Result)> { + if let Err(e) = validate_save_path(&payload.path) { + return Err((StatusCode::BAD_REQUEST, Json(ErrorResponse { error: e }))); + } + + let res = tokio::task::spawn_blocking(move || { + std::fs::write(&payload.path, &payload.content) + }) + .await; + + match res { + Ok(Ok(_)) => Ok(StatusCode::OK), + Ok(Err(e)) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + Err(e) => Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + )), + } +} + +pub async fn open_folder() -> Result)> { + crate::commands::system::open_data_folder() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + ) + })?; + Ok(StatusCode::OK) +} + +pub async fn get_antigravity_path( +) -> Result)> { + let path = crate::commands::system::get_antigravity_path(Some(true)) + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + ) + })?; + Ok(Json(path)) +} + +pub async fn get_antigravity_args( +) -> Result)> { + let args = crate::commands::system::get_antigravity_args() + .await + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: e.to_string(), + }), + ) + })?; + Ok(Json(args)) +} diff --git a/src-tauri/src/proxy/server/mod.rs b/src-tauri/src/proxy/server/mod.rs new file mode 100644 index 000000000..0b7842105 --- /dev/null +++ b/src-tauri/src/proxy/server/mod.rs @@ -0,0 +1,437 @@ +//! Axum Server Module +//! +//! This module provides the HTTP server implementation for the proxy service. +//! It has been refactored from a monolithic 2000+ line file into organized submodules. + +pub mod admin; +pub mod oauth; +pub mod routes; +pub mod types; + +// Re-export main types for external use +pub use types::AppState; + +use crate::proxy::TokenManager; +use dashmap::DashSet; +use std::sync::atomic::AtomicUsize; +use std::sync::Arc; +use std::sync::OnceLock; +use tokio::sync::oneshot; +use tokio::sync::RwLock; +use tracing::{debug, error}; + +// ============================================================================= +// [FIX] Global queue for pending account reloads +// When update_account_quota updates protected_models, account ID is added here +// TokenManager checks and processes these accounts in get_token +// +// [PERF] Using DashSet instead of std::sync::RwLock to avoid blocking tokio workers +// ============================================================================= + +static PENDING_RELOAD_ACCOUNTS: OnceLock> = OnceLock::new(); + +fn get_pending_reload_accounts() -> &'static DashSet { + PENDING_RELOAD_ACCOUNTS.get_or_init(DashSet::new) +} + +/// Trigger account reload signal (called by update_account_quota) +pub fn trigger_account_reload(account_id: &str) { + let pending = get_pending_reload_accounts(); + pending.insert(account_id.to_string()); + tracing::debug!( + "[Quota] Queued account {} for TokenManager reload", + account_id + ); +} + +/// Get and clear pending reload accounts (called by TokenManager) +pub fn take_pending_reload_accounts() -> Vec { + let pending = get_pending_reload_accounts(); + let accounts: Vec = pending.iter().map(|r| r.clone()).collect(); + if !accounts.is_empty() { + pending.clear(); + tracing::debug!( + "[Quota] Taking {} pending accounts for reload", + accounts.len() + ); + } + accounts +} + +/// Axum server instance +#[derive(Clone)] +pub struct AxumServer { + shutdown_tx: Arc>>>, + custom_mapping: Arc>>, + proxy_state: Arc>, + security_state: Arc>, + pub security_monitor_state: Arc>, + zai_state: Arc>, + experimental: Arc>, + debug_logging: Arc>, + pub cloudflared_state: Arc, + pub is_running: Arc>, + pub upstream: Arc, + /// [FIX] Exposed TokenManager for proxy service reuse + pub token_manager: Arc, +} + +impl AxumServer { + /// Update model mapping (hot-reload) + pub async fn update_mapping(&self, config: &crate::proxy::config::ProxyConfig) { + { + let mut m = self.custom_mapping.write().await; + *m = config.custom_mapping.clone(); + } + tracing::debug!("Model mapping (Custom) hot-reloaded"); + } + + /// Update upstream proxy configuration + pub async fn update_proxy(&self, new_config: crate::proxy::config::UpstreamProxyConfig) { + let mut proxy = self.proxy_state.write().await; + *proxy = new_config.clone(); + + // [FIX] Also update underlying reqwest Client + self.upstream.rebuild_client(Some(new_config)).await; + + tracing::info!("Upstream proxy config hot-reloaded (including HTTP Client)"); + } + + /// Update security configuration + pub async fn update_security(&self, config: &crate::proxy::config::ProxyConfig) { + let mut sec = self.security_state.write().await; + *sec = crate::proxy::ProxySecurityConfig::from_proxy_config(config); + tracing::info!("Proxy security config hot-reloaded"); + } + + /// Update z.ai configuration + pub async fn update_zai(&self, config: &crate::proxy::config::ProxyConfig) { + let mut zai = self.zai_state.write().await; + *zai = config.zai.clone(); + tracing::info!("z.ai config hot-reloaded"); + } + + /// Update experimental configuration + pub async fn update_experimental(&self, config: &crate::proxy::config::ProxyConfig) { + let mut exp = self.experimental.write().await; + *exp = config.experimental.clone(); + tracing::info!("Experimental config hot-reloaded"); + } + + /// Update debug logging configuration + pub async fn update_debug_logging(&self, config: &crate::proxy::config::ProxyConfig) { + let mut dbg_cfg = self.debug_logging.write().await; + *dbg_cfg = config.debug_logging.clone(); + tracing::info!("Debug logging config hot-reloaded"); + } + + /// Update security monitor config (IP blacklist/whitelist) + pub async fn update_security_monitor(&self, config: &crate::proxy::config::ProxyConfig) { + let mut sec_mon = self.security_monitor_state.write().await; + *sec_mon = config.security_monitor.clone(); + tracing::info!("[Security] IP filtering config hot-reloaded"); + } + + /// Update User-Agent configuration (hot-reload) + pub async fn update_user_agent(&self, config: &crate::proxy::config::ProxyConfig) { + self.upstream + .set_user_agent_override(config.user_agent_override.clone()) + .await; + tracing::info!("User-Agent config hot-reloaded: {:?}", config.user_agent_override); + } + + /// Set running state + pub async fn set_running(&self, running: bool) { + let mut r = self.is_running.write().await; + *r = running; + tracing::info!("Proxy service running state updated to: {}", running); + } + + /// Start the Axum server + pub async fn start( + host: String, + port: u16, + token_manager: Arc, + custom_mapping: std::collections::HashMap, + _request_timeout: u64, + upstream_proxy: crate::proxy::config::UpstreamProxyConfig, + user_agent_override: Option, + security_config: crate::proxy::ProxySecurityConfig, + zai_config: crate::proxy::ZaiConfig, + monitor: Arc, + experimental_config: crate::proxy::config::ExperimentalConfig, + debug_logging: crate::proxy::config::DebugLoggingConfig, + integration: crate::modules::integration::SystemManager, + cloudflared_state: Arc, + ) -> Result<(Self, tokio::task::JoinHandle<()>), String> { + let custom_mapping_state = Arc::new(tokio::sync::RwLock::new(custom_mapping)); + let proxy_state = Arc::new(tokio::sync::RwLock::new(upstream_proxy.clone())); + let security_state = Arc::new(RwLock::new(security_config)); + let zai_state = Arc::new(RwLock::new(zai_config)); + let provider_rr = Arc::new(AtomicUsize::new(0)); + let zai_vision_mcp_state = + Arc::new(crate::proxy::zai_vision_mcp::ZaiVisionMcpState::new()); + let experimental_state = Arc::new(RwLock::new(experimental_config)); + let debug_logging_state = Arc::new(RwLock::new(debug_logging)); + let is_running_state = Arc::new(RwLock::new(true)); + + // Create upstream client once and share between AppState and AxumServer + let upstream_client = Arc::new(crate::proxy::upstream::client::UpstreamClient::new(Some( + upstream_proxy.clone(), + ))); + + // Initialize User-Agent override if configured + if user_agent_override.is_some() { + upstream_client.set_user_agent_override(user_agent_override).await; + } + + let state = AppState { + token_manager: token_manager.clone(), + custom_mapping: custom_mapping_state.clone(), + request_timeout: 300, // 5 minutes + thought_signature_map: Arc::new(tokio::sync::Mutex::new( + std::collections::HashMap::new(), + )), + upstream_proxy: proxy_state.clone(), + upstream: upstream_client.clone(), + zai: zai_state.clone(), + provider_rr: provider_rr.clone(), + zai_vision_mcp: zai_vision_mcp_state, + monitor: monitor.clone(), + experimental: experimental_state.clone(), + debug_logging: debug_logging_state.clone(), + switching: Arc::new(RwLock::new(false)), + integration: integration.clone(), + account_service: Arc::new(crate::modules::account_service::AccountService::new( + integration.clone(), + )), + security: security_state.clone(), + cloudflared_state: cloudflared_state.clone(), + is_running: is_running_state.clone(), + port, + }; + + // Build routes + use crate::proxy::middleware::{ + admin_auth_middleware, auth_middleware, cors_layer, ip_filter_middleware, + monitor_middleware, service_status_middleware, SecurityState, + }; + + // Create security monitor state for IP filtering + let security_monitor_state: SecurityState = Arc::new(RwLock::new( + crate::proxy::config::SecurityMonitorConfig::default(), + )); + + // Initialize security database + if let Err(e) = crate::modules::security_db::init_db() { + tracing::warn!("[Security] Failed to initialize security database: {}", e); + } + + // 1. Build proxy routes (AI endpoints with auth) + let proxy_routes = routes::build_proxy_routes() + .layer(axum::middleware::from_fn_with_state( + state.clone(), + auth_middleware, + )) + .layer(axum::middleware::from_fn_with_state( + state.clone(), + monitor_middleware, + )); + + // 2. Build admin routes (forced auth) + let admin_routes = routes::build_admin_routes().layer( + axum::middleware::from_fn_with_state(state.clone(), admin_auth_middleware), + ); + + // 3. Combine and apply global layers + let max_body_size: usize = std::env::var("ABV_MAX_BODY_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(100 * 1024 * 1024); // Default 100MB + tracing::info!("Request body size limit: {} MB", max_body_size / 1024 / 1024); + + let app = axum::Router::new() + .nest("/api", admin_routes) + .merge(proxy_routes) + // Public routes (no auth) + .route("/auth/callback", axum::routing::get(oauth::handle_oauth_callback)) + // Health check endpoint (no IP filter) + .route("/healthz", axum::routing::get(routes::health_check)) + // Apply global monitoring and status layers + .layer(axum::middleware::from_fn(ip_filter_middleware)) + .layer(axum::Extension(security_monitor_state.clone())) + .layer(axum::middleware::from_fn_with_state( + state.clone(), + service_status_middleware, + )) + .layer(cors_layer()) + .layer(axum::extract::DefaultBodyLimit::max(max_body_size)) + .with_state(state.clone()); + + // Static file hosting (for Headless/Docker mode) + let dist_path = std::env::var("ABV_DIST_PATH").unwrap_or_else(|_| "dist".to_string()); + let app = if std::path::Path::new(&dist_path).exists() { + tracing::info!("Hosting static assets from: {}", dist_path); + app.fallback_service( + tower_http::services::ServeDir::new(&dist_path).fallback( + tower_http::services::ServeFile::new(format!("{}/index.html", dist_path)), + ), + ) + } else { + app + }; + + // Bind address + let addr = format!("{}:{}", host, port); + let listener = tokio::net::TcpListener::bind(&addr) + .await + .map_err(|e| format!("Failed to bind address {}: {}", addr, e))?; + + tracing::info!("Proxy server started on http://{}", addr); + + // Create shutdown channel + let (shutdown_tx, mut shutdown_rx) = oneshot::channel::<()>(); + + let server_instance = Self { + shutdown_tx: Arc::new(tokio::sync::Mutex::new(Some(shutdown_tx))), + custom_mapping: custom_mapping_state.clone(), + proxy_state, + security_state, + security_monitor_state: security_monitor_state.clone(), + zai_state, + experimental: experimental_state.clone(), + debug_logging: debug_logging_state.clone(), + cloudflared_state, + is_running: is_running_state, + upstream: upstream_client, + token_manager: token_manager.clone(), + }; + + // [PERF] Connection limiter to prevent resource exhaustion under high load + // Default: 10K concurrent connections (configurable via ABV_MAX_CONNECTIONS env) + let max_connections: usize = std::env::var("ABV_MAX_CONNECTIONS") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(10_000); + let connection_semaphore = Arc::new(tokio::sync::Semaphore::new(max_connections)); + tracing::info!( + "Connection limiter initialized: max {} concurrent connections", + max_connections + ); + + // Start server in a new task + let handle = tokio::spawn(async move { + use hyper_util::rt::{TokioExecutor, TokioIo}; + use hyper_util::service::TowerToHyperService; + + // [PERF] Track active connections for monitoring + let active_connections = Arc::new(AtomicUsize::new(0)); + + loop { + tokio::select! { + res = listener.accept() => { + match res { + Ok((stream, remote_addr)) => { + // [PERF] Acquire semaphore permit before spawning + let permit = match connection_semaphore.clone().try_acquire_owned() { + Ok(p) => p, + Err(_) => { + // Connection limit reached - reject gracefully + tracing::warn!( + "Connection limit reached ({} active), rejecting new connection from {}", + active_connections.load(std::sync::atomic::Ordering::Relaxed), + remote_addr + ); + // Drop stream immediately to reject connection + drop(stream); + continue; + } + }; + + let io = TokioIo::new(stream); + let active_count = active_connections.clone(); + active_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + + // [FIX] Inject ConnectInfo for real IP extraction + use tower::util::ServiceExt; + use hyper::body::Incoming; + let app_with_info = app.clone().map_request(move |mut req: axum::http::Request| { + req.extensions_mut().insert(axum::extract::ConnectInfo(remote_addr)); + req + }); + + let service = TowerToHyperService::new(app_with_info); + + tokio::task::spawn(async move { + // [PERF] Try HTTP/2 first via auto-detection, fallback to HTTP/1.1 + // Using hyper's auto HTTP version detection + let result = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new()) + .http1() + .keep_alive(true) + .http2() + .max_concurrent_streams(250) + .serve_connection_with_upgrades(io, service) + .await; + + if let Err(err) = result { + debug!("Connection handler finished or errored: {:?}", err); + } + + // [PERF] Release connection count and permit + active_count.fetch_sub(1, std::sync::atomic::Ordering::Relaxed); + drop(permit); + }); + } + Err(e) => { + error!("Failed to accept connection: {:?}", e); + } + } + } + _ = &mut shutdown_rx => { + tracing::info!("Proxy server stopped listening, waiting for active connections to drain..."); + + // [PERF] Graceful shutdown: wait for active connections to complete + // Maximum wait time: 30 seconds + let drain_start = std::time::Instant::now(); + let max_drain_time = std::time::Duration::from_secs(30); + + loop { + let active = active_connections.load(std::sync::atomic::Ordering::Relaxed); + if active == 0 { + tracing::info!("All connections drained successfully"); + break; + } + + if drain_start.elapsed() > max_drain_time { + tracing::warn!( + "Graceful shutdown timeout reached with {} active connections, forcing shutdown", + active + ); + break; + } + + tracing::debug!("Waiting for {} active connections to drain...", active); + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + } + + break; + } + } + } + }); + + Ok((server_instance, handle)) + } + + /// Stop the server with graceful connection draining + pub fn stop(&self) { + let tx_mutex = self.shutdown_tx.clone(); + tokio::spawn(async move { + let mut lock = tx_mutex.lock().await; + if let Some(tx) = lock.take() { + let _ = tx.send(()); + tracing::info!("Axum server stop signal sent (graceful shutdown initiated)"); + } + }); + } +} diff --git a/src-tauri/src/proxy/server/oauth.rs b/src-tauri/src/proxy/server/oauth.rs new file mode 100644 index 000000000..c017e0286 --- /dev/null +++ b/src-tauri/src/proxy/server/oauth.rs @@ -0,0 +1,240 @@ +//! OAuth callback handlers +//! +//! Handles OAuth callback processing and Web-based OAuth flow. + +use axum::{ + extract::{Query, State}, + http::{HeaderMap, StatusCode}, + response::{Html, Json}, +}; +use tracing::error; + +use crate::proxy::server::types::{AppState, ErrorResponse}; + +// ============================================================================ +// OAuth Types +// ============================================================================ + +#[derive(serde::Deserialize)] +pub struct OAuthParams { + pub code: String, + pub state: Option, + #[allow(dead_code)] + pub scope: Option, +} + +// ============================================================================ +// OAuth Callback Handler +// ============================================================================ + +pub async fn handle_oauth_callback( + Query(params): Query, + headers: HeaderMap, + State(state): State, +) -> Result, StatusCode> { + let code = params.code; + + // Exchange token + let port = state.security.read().await.port; + let host = headers.get("host").and_then(|h| h.to_str().ok()); + let proto = headers.get("x-forwarded-proto").and_then(|h| h.to_str().ok()); + let redirect_uri = get_oauth_redirect_uri(port, host, proto); + + match state.token_manager.exchange_code(&code, &redirect_uri).await { + Ok(refresh_token) => { + match state.token_manager.get_user_info(&refresh_token).await { + Ok(user_info) => { + let email = user_info.email; + if let Err(e) = state.token_manager.add_account(&email, &refresh_token).await { + error!("Failed to add account: {}", e); + return Ok(Html(format!( + r#"

Authorization Failed

Failed to save account: {}

"#, + e + ))); + } + } + Err(e) => { + error!("Failed to get user info: {}", e); + return Ok(Html(format!( + r#"

Authorization Failed

Failed to get user info: {}

"#, + e + ))); + } + } + + // Success HTML + Ok(Html(format!(r#" + + + + Authorization Successful + + + +
+
OK
+

Authorization Successful

+

You can close this window now. The application should refresh automatically.

+ +
+ Did it not refresh? + If the application is running in a container or remote environment, you may need to manually copy the link below: + +
+
+ + + + "#))) + } + Err(e) => { + error!("OAuth exchange failed: {}", e); + Ok(Html(format!( + r#"

Authorization Failed

Error: {}

"#, + e + ))) + } + } +} + +// ============================================================================ +// Web OAuth URL Preparation +// ============================================================================ + +pub async fn prepare_oauth_url_web( + headers: HeaderMap, + State(state): State, +) -> Result, (StatusCode, Json)> { + let port = state.security.read().await.port; + let host = headers.get("host").and_then(|h| h.to_str().ok()); + let proto = headers.get("x-forwarded-proto").and_then(|h| h.to_str().ok()); + let redirect_uri = get_oauth_redirect_uri(port, host, proto); + + let state_str = uuid::Uuid::new_v4().to_string(); + + // Initialize OAuth flow state and background handler + let (auth_url, mut code_rx) = + crate::modules::oauth_server::prepare_oauth_flow_manually(redirect_uri.clone(), state_str.clone()) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + // Start background task to handle callback/manual code submission + let token_manager = state.token_manager.clone(); + let redirect_uri_clone = redirect_uri.clone(); + tokio::spawn(async move { + match code_rx.recv().await { + Some(Ok(code)) => { + crate::modules::logger::log_info( + "Consuming manually submitted OAuth code in background", + ); + // Simplified backend flow for web callback + match crate::modules::oauth::exchange_code(&code, &redirect_uri_clone).await { + Ok(token_resp) => { + // Success! Now add/upsert account + if let Some(refresh_token) = &token_resp.refresh_token { + match token_manager.get_user_info(refresh_token).await { + Ok(user_info) => { + if let Err(e) = token_manager + .add_account(&user_info.email, refresh_token) + .await + { + crate::modules::logger::log_error(&format!( + "Failed to save account in background OAuth: {}", + e + )); + } else { + crate::modules::logger::log_info(&format!( + "Successfully added account {} via background OAuth", + user_info.email + )); + } + } + Err(e) => { + crate::modules::logger::log_error(&format!( + "Failed to fetch user info in background OAuth: {}", + e + )); + } + } + } else { + crate::modules::logger::log_error( + "Background OAuth error: Google did not return a refresh_token.", + ); + } + } + Err(e) => { + crate::modules::logger::log_error(&format!( + "Background OAuth exchange failed: {}", + e + )); + } + } + } + Some(Err(e)) => { + crate::modules::logger::log_error(&format!("Background OAuth flow error: {}", e)); + } + None => { + crate::modules::logger::log_info("Background OAuth flow channel closed"); + } + } + }); + + Ok(Json(serde_json::json!({ + "url": auth_url, + "state": state_str + }))) +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/// Get OAuth redirect URI +/// Forces localhost to bypass Google 2.0 policy restrictions on IP addresses and non-HTTPS. +/// Only uses external address when ABV_PUBLIC_URL is explicitly set (e.g., user configured HTTPS domain). +pub fn get_oauth_redirect_uri(port: u16, _host: Option<&str>, _proto: Option<&str>) -> String { + if let Ok(public_url) = std::env::var("ABV_PUBLIC_URL") { + let base = public_url.trim_end_matches('/'); + format!("{}/auth/callback", base) + } else { + // Force localhost. For remote deployments, users can complete auth via fallback feature. + format!("http://localhost:{}/auth/callback", port) + } +} diff --git a/src-tauri/src/proxy/server/routes.rs b/src-tauri/src/proxy/server/routes.rs new file mode 100644 index 000000000..0f7b7f4ca --- /dev/null +++ b/src-tauri/src/proxy/server/routes.rs @@ -0,0 +1,244 @@ +//! Route definitions for the Axum server +//! +//! This module defines all API routes and builds the router. + +use axum::{ + routing::{any, delete, get, post}, + Router, +}; + +use crate::proxy::server::admin; +use crate::proxy::server::oauth; +use crate::proxy::server::types::AppState; + +/// Build the admin API routes (requires authentication) +pub fn build_admin_routes() -> Router { + Router::new() + // Health check + .route("/health", get(health_check)) + // Account management + .route("/accounts", get(admin::list_accounts).post(admin::add_account)) + .route("/accounts/current", get(admin::get_current_account)) + .route("/accounts/switch", post(admin::switch_account)) + .route("/accounts/refresh", post(admin::refresh_all_quotas)) + .route("/accounts/:accountId", delete(admin::delete_account)) + .route("/accounts/:accountId/bind-device", post(admin::bind_device)) + .route("/accounts/:accountId/device-profiles", get(admin::get_device_profiles)) + .route("/accounts/:accountId/device-versions", get(admin::list_device_versions)) + .route("/accounts/device-preview", post(admin::preview_generate_profile)) + .route( + "/accounts/:accountId/bind-device-profile", + post(admin::bind_device_profile_with_profile), + ) + .route("/accounts/restore-original", post(admin::restore_original_device)) + .route( + "/accounts/:accountId/device-versions/:versionId/restore", + post(admin::restore_device_version), + ) + .route( + "/accounts/:accountId/device-versions/:versionId", + delete(admin::delete_device_version), + ) + // Import + .route("/accounts/import/v1", post(admin::import_v1_accounts)) + .route("/accounts/import/db", post(admin::import_from_db)) + .route("/accounts/import/db-custom", post(admin::import_custom_db)) + .route("/accounts/sync/db", post(admin::sync_account_from_db)) + // Statistics (legacy paths) + .route("/stats/summary", get(admin::get_token_stats_summary)) + .route("/stats/hourly", get(admin::get_token_stats_hourly)) + .route("/stats/daily", get(admin::get_token_stats_daily)) + .route("/stats/weekly", get(admin::get_token_stats_weekly)) + .route("/stats/accounts", get(admin::get_token_stats_by_account)) + .route("/stats/models", get(admin::get_token_stats_by_model)) + // Configuration + .route("/config", get(admin::get_config).post(admin::save_config)) + // CLI sync + .route("/proxy/cli/status", post(admin::get_cli_sync_status)) + .route("/proxy/cli/sync", post(admin::execute_cli_sync)) + .route("/proxy/cli/restore", post(admin::execute_cli_restore)) + .route("/proxy/cli/config", post(admin::get_cli_config_content)) + // OpenCode sync + .route("/proxy/opencode/status", post(admin::get_opencode_sync_status)) + .route("/proxy/opencode/sync", post(admin::execute_opencode_sync)) + .route( + "/proxy/opencode/restore", + post(admin::execute_opencode_restore), + ) + .route( + "/proxy/opencode/config", + post(admin::get_opencode_config_content), + ) + // Proxy control + .route("/proxy/status", get(admin::get_proxy_status)) + .route("/proxy/start", post(admin::start_proxy_service)) + .route("/proxy/stop", post(admin::stop_proxy_service)) + .route("/proxy/mapping", post(admin::update_model_mapping)) + .route("/proxy/api-key/generate", post(admin::generate_api_key)) + .route("/proxy/session-bindings/clear", post(admin::clear_proxy_session_bindings)) + .route("/proxy/rate-limits", delete(admin::clear_all_rate_limits)) + .route("/proxy/rate-limits/:accountId", delete(admin::clear_rate_limit)) + // [FIX #820] Preferred account + .route( + "/proxy/preferred-account", + get(admin::get_preferred_account).post(admin::set_preferred_account), + ) + // OAuth (Admin endpoints) + .route("/accounts/oauth/prepare", post(admin::prepare_oauth_url)) + .route("/accounts/oauth/start", post(admin::start_oauth_login)) + .route("/accounts/oauth/complete", post(admin::complete_oauth_login)) + .route("/accounts/oauth/cancel", post(admin::cancel_oauth_login)) + .route("/accounts/oauth/submit-code", post(admin::submit_oauth_code)) + // z.ai + .route("/zai/models/fetch", post(admin::fetch_zai_models)) + // Monitor + .route("/proxy/monitor/toggle", post(admin::set_proxy_monitor_enabled)) + // Cloudflared + .route("/proxy/cloudflared/status", get(admin::cloudflared_get_status)) + .route("/proxy/cloudflared/install", post(admin::cloudflared_install)) + .route("/proxy/cloudflared/start", post(admin::cloudflared_start)) + .route("/proxy/cloudflared/stop", post(admin::cloudflared_stop)) + // System + .route("/system/open-folder", post(admin::open_folder)) + .route("/proxy/stats", get(admin::get_proxy_stats)) + // Logs + .route("/logs", get(admin::get_proxy_logs_filtered)) + .route("/logs/count", get(admin::get_proxy_logs_count_filtered)) + .route("/logs/clear", post(admin::clear_proxy_logs)) + .route("/logs/:logId", get(admin::get_proxy_log_detail)) + // Token stats (new paths) + .route("/stats/token/clear", post(admin::clear_token_stats)) + .route("/stats/token/hourly", get(admin::get_token_stats_hourly)) + .route("/stats/token/daily", get(admin::get_token_stats_daily)) + .route("/stats/token/weekly", get(admin::get_token_stats_weekly)) + .route("/stats/token/by-account", get(admin::get_token_stats_by_account)) + .route("/stats/token/summary", get(admin::get_token_stats_summary)) + .route("/stats/token/by-model", get(admin::get_token_stats_by_model)) + .route( + "/stats/token/model-trend/hourly", + get(admin::get_token_stats_model_trend_hourly), + ) + .route( + "/stats/token/model-trend/daily", + get(admin::get_token_stats_model_trend_daily), + ) + .route( + "/stats/token/account-trend/hourly", + get(admin::get_token_stats_account_trend_hourly), + ) + .route( + "/stats/token/account-trend/daily", + get(admin::get_token_stats_account_trend_daily), + ) + // Account bulk operations + .route("/accounts/bulk-delete", post(admin::delete_accounts)) + .route("/accounts/export", post(admin::export_accounts)) + .route("/accounts/reorder", post(admin::reorder_accounts)) + .route("/accounts/:accountId/quota", get(admin::fetch_account_quota)) + .route("/accounts/:accountId/toggle-proxy", post(admin::toggle_proxy_status)) + // Warmup + .route("/accounts/warmup", post(admin::warm_up_all_accounts)) + .route("/accounts/:accountId/warmup", post(admin::warm_up_account)) + // System paths + .route("/system/data-dir", get(admin::get_data_dir_path)) + .route("/system/save-file", post(admin::save_text_file)) + .route("/system/updates/settings", get(admin::get_update_settings)) + .route("/system/updates/check-status", get(admin::should_check_updates)) + .route("/system/updates/check", post(admin::check_for_updates)) + .route("/system/updates/touch", post(admin::update_last_check_time)) + .route("/system/updates/save", post(admin::save_update_settings)) + .route("/system/autostart/status", get(admin::is_auto_launch_enabled)) + .route("/system/autostart/toggle", post(admin::toggle_auto_launch)) + .route( + "/system/http-api/settings", + get(admin::get_http_api_settings).post(admin::save_http_api_settings), + ) + .route("/system/antigravity/path", get(admin::get_antigravity_path)) + .route("/system/antigravity/args", get(admin::get_antigravity_args)) + // OAuth (Web) - Admin interface + .route("/auth/url", get(oauth::prepare_oauth_url_web)) +} + +/// Build the proxy API routes (AI endpoints) +pub fn build_proxy_routes() -> Router { + use crate::proxy::handlers; + + Router::new() + // OpenAI Protocol + .route("/v1/models", get(handlers::openai::handle_list_models)) + .route( + "/v1/chat/completions", + post(handlers::openai::handle_chat_completions), + ) + .route( + "/v1/completions", + post(handlers::openai::handle_completions), + ) + .route("/v1/responses", post(handlers::openai::handle_completions)) // Codex CLI compat + .route( + "/v1/images/generations", + post(handlers::openai::handle_images_generations), + ) + .route( + "/v1/images/edits", + post(handlers::openai::handle_images_edits), + ) + .route( + "/v1/audio/transcriptions", + post(handlers::audio::handle_audio_transcription), + ) + // Claude Protocol + .route("/v1/messages", post(handlers::claude::handle_messages)) + .route( + "/v1/messages/count_tokens", + post(handlers::claude::handle_count_tokens), + ) + .route( + "/v1/models/claude", + get(handlers::claude::handle_list_models), + ) + // z.ai MCP (optional reverse-proxy) + .route( + "/mcp/web_search_prime/mcp", + any(handlers::mcp::handle_web_search_prime), + ) + .route( + "/mcp/web_reader/mcp", + any(handlers::mcp::handle_web_reader), + ) + .route( + "/mcp/zai-mcp-server/mcp", + any(handlers::mcp::handle_zai_mcp_server), + ) + // Gemini Protocol (Native) + .route("/v1beta/models", get(handlers::gemini::handle_list_models)) + .route( + "/v1beta/models/:model", + get(handlers::gemini::handle_get_model).post(handlers::gemini::handle_generate), + ) + .route( + "/v1beta/models/:model/countTokens", + post(handlers::gemini::handle_count_tokens), + ) + // Common endpoints + .route("/v1/models/detect", post(handlers::common::handle_detect_model)) + .route("/internal/warmup", post(handlers::warmup::handle_warmup)) + // Telemetry intercept + .route("/v1/api/event_logging/batch", post(silent_ok)) + .route("/v1/api/event_logging", post(silent_ok)) +} + +/// Health check handler +pub async fn health_check() -> axum::response::Response { + axum::Json(serde_json::json!({ + "status": "ok" + })) + .into_response() +} + +/// Silent OK handler (for telemetry intercept) +pub async fn silent_ok() -> axum::response::Response { + axum::http::StatusCode::OK.into_response() +} + +use axum::response::IntoResponse; diff --git a/src-tauri/src/proxy/server/types.rs b/src-tauri/src/proxy/server/types.rs new file mode 100644 index 000000000..566d8fa0b --- /dev/null +++ b/src-tauri/src/proxy/server/types.rs @@ -0,0 +1,283 @@ +//! Server types and response structures +//! +//! This module contains all shared types used across the admin API handlers. + +use crate::proxy::TokenManager; +use serde::{Deserialize, Serialize}; +use std::sync::atomic::AtomicUsize; +use std::sync::Arc; +use tokio::sync::RwLock; + +/// Axum application state shared across all handlers +#[derive(Clone)] +pub struct AppState { + pub token_manager: Arc, + pub custom_mapping: Arc>>, + #[allow(dead_code)] + pub request_timeout: u64, + #[allow(dead_code)] + pub thought_signature_map: + Arc>>, + #[allow(dead_code)] + pub upstream_proxy: Arc>, + pub upstream: Arc, + pub zai: Arc>, + pub provider_rr: Arc, + pub zai_vision_mcp: Arc, + pub monitor: Arc, + pub experimental: Arc>, + pub debug_logging: Arc>, + pub switching: Arc>, + pub integration: crate::modules::integration::SystemManager, + pub account_service: Arc, + pub security: Arc>, + pub cloudflared_state: Arc, + pub is_running: Arc>, + pub port: u16, +} + +// Implement FromRef for security state extraction in middleware +impl axum::extract::FromRef for Arc> { + fn from_ref(state: &AppState) -> Self { + state.security.clone() + } +} + +// ============================================================================ +// Response Types +// ============================================================================ + +#[derive(Serialize)] +pub struct ErrorResponse { + pub error: String, +} + +#[derive(Serialize)] +pub struct AccountResponse { + pub id: String, + pub email: String, + pub name: Option, + pub is_current: bool, + pub disabled: bool, + pub disabled_reason: Option, + pub disabled_at: Option, + pub proxy_disabled: bool, + pub proxy_disabled_reason: Option, + pub proxy_disabled_at: Option, + pub protected_models: Vec, + pub quota: Option, + pub device_bound: bool, + pub last_used: i64, +} + +#[derive(Serialize)] +pub struct QuotaResponse { + pub models: Vec, + pub last_updated: i64, + pub subscription_tier: Option, + pub is_forbidden: bool, +} + +#[derive(Serialize)] +pub struct ModelQuota { + pub name: String, + pub percentage: i32, + pub reset_time: String, +} + +#[derive(Serialize)] +pub struct AccountListResponse { + pub accounts: Vec, + pub current_account_id: Option, +} + +// ============================================================================ +// Request Types +// ============================================================================ + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AddAccountRequest { + pub refresh_token: String, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SwitchRequest { + pub account_id: String, +} + +#[derive(Deserialize)] +pub struct BindDeviceRequest { + #[serde(default = "default_bind_mode")] + pub mode: String, +} + +fn default_bind_mode() -> String { + "generate".to_string() +} + +#[derive(Deserialize, Debug, Default)] +#[serde(rename_all = "camelCase")] +pub struct LogsFilterQuery { + #[serde(default)] + pub filter: String, + #[serde(default)] + pub errors_only: bool, + #[serde(default)] + pub limit: usize, + #[serde(default)] + pub offset: usize, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SaveConfigWrapper { + pub config: crate::models::AppConfig, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UpdateMappingWrapper { + pub config: crate::proxy::config::ProxyConfig, +} + +#[derive(Deserialize, Debug, Default)] +#[serde(rename_all = "camelCase")] +pub struct StatsPeriodQuery { + pub hours: Option, + pub days: Option, + pub weeks: Option, +} + +#[derive(Deserialize)] +pub struct BulkDeleteRequest { + #[serde(rename = "accountIds")] + pub account_ids: Vec, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReorderRequest { + pub account_ids: Vec, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ToggleProxyRequest { + pub enable: bool, + pub reason: Option, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SaveFileRequest { + pub path: String, + pub content: String, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CloudflaredStartRequest { + pub config: crate::modules::cloudflared::CloudflaredConfig, +} + +#[derive(Deserialize)] +pub struct CustomDbRequest { + pub path: String, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliSyncStatusRequest { + pub app_type: crate::proxy::cli_sync::CliApp, + pub proxy_url: String, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliSyncRequest { + pub app_type: crate::proxy::cli_sync::CliApp, + pub proxy_url: String, + pub api_key: String, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliRestoreRequest { + pub app_type: crate::proxy::cli_sync::CliApp, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliConfigContentRequest { + pub app_type: crate::proxy::cli_sync::CliApp, + pub file_name: Option, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OpencodeSyncStatusRequest { + pub proxy_url: String, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OpencodeSyncRequest { + pub proxy_url: String, + pub api_key: String, + #[serde(default)] + pub sync_accounts: bool, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OpencodeConfigContentRequest { + pub file_name: Option, +} + +#[derive(Deserialize)] +pub struct SubmitCodeRequest { + pub code: String, + pub state: Option, +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/// Convert Account model to AccountResponse +pub fn to_account_response( + account: &crate::models::account::Account, + current_id: &Option, +) -> AccountResponse { + AccountResponse { + id: account.id.clone(), + email: account.email.clone(), + name: account.name.clone(), + is_current: current_id.as_ref() == Some(&account.id), + disabled: account.disabled, + disabled_reason: account.disabled_reason.clone(), + disabled_at: account.disabled_at, + proxy_disabled: account.proxy_disabled, + proxy_disabled_reason: account.proxy_disabled_reason.clone(), + proxy_disabled_at: account.proxy_disabled_at, + protected_models: account.protected_models.iter().cloned().collect(), + quota: account.quota.as_ref().map(|q| QuotaResponse { + models: q + .models + .iter() + .map(|m| ModelQuota { + name: m.name.clone(), + percentage: m.percentage, + reset_time: m.reset_time.clone(), + }) + .collect(), + last_updated: q.last_updated, + subscription_tier: q.subscription_tier.clone(), + is_forbidden: q.is_forbidden, + }), + device_bound: account.device_profile.is_some(), + last_used: account.last_used, + } +} diff --git a/src-tauri/src/proxy/signature_cache.rs b/src-tauri/src/proxy/signature_cache.rs index 4d52fec59..714624f05 100644 --- a/src-tauri/src/proxy/signature_cache.rs +++ b/src-tauri/src/proxy/signature_cache.rs @@ -239,7 +239,6 @@ impl SignatureCache { None } - /// 删除指定会话的缓存签名 pub fn delete_session_signature(&self, session_id: &str) { if let Ok(mut cache) = self.session_signatures.lock() { if cache.remove(session_id).is_some() { diff --git a/src-tauri/src/proxy/sticky_config.rs b/src-tauri/src/proxy/sticky_config.rs index 643c402e9..ca9c8ed4e 100644 --- a/src-tauri/src/proxy/sticky_config.rs +++ b/src-tauri/src/proxy/sticky_config.rs @@ -9,6 +9,10 @@ pub enum SchedulingMode { Balance, /// 性能优先 (Performance-first): 纯轮询模式 (Round-robin),账号负载最均衡,但不利用缓存 PerformanceFirst, + /// 指定账号 (Selected): 仅在指定的账号列表中进行负载均衡 + Selected, + /// Power-of-2-Choices: 随机选2个账号,选负载最低的,减少热点问题 + P2C, } impl Default for SchedulingMode { @@ -25,6 +29,16 @@ pub struct StickySessionConfig { pub mode: SchedulingMode, /// 缓存优先模式下的最大等待时间 (秒) pub max_wait_seconds: u64, + /// 指定模式下使用的账号 ID 列表 + pub selected_accounts: Vec, + /// 指定模式下每个账号允许的模型列表 (AccountID -> [Model Names]) + #[serde(default)] + pub selected_models: std::collections::HashMap>, + /// [NEW] 严格模式:如果选中的账号全部不可用,返回错误而不是 fallback 到其他账号 + /// - true: 严格模式,仅使用 selected_accounts,不可用时报错 + /// - false: 宽松模式,selected_accounts 不可用时 fallback 到其他账号 + #[serde(default)] + pub strict_selected: bool, } impl Default for StickySessionConfig { @@ -32,6 +46,9 @@ impl Default for StickySessionConfig { Self { mode: SchedulingMode::Balance, max_wait_seconds: 60, + selected_accounts: Vec::new(), + selected_models: std::collections::HashMap::new(), + strict_selected: false, } } } diff --git a/src-tauri/src/proxy/tests/comprehensive.rs b/src-tauri/src/proxy/tests/comprehensive.rs deleted file mode 100644 index df4dd8004..000000000 --- a/src-tauri/src/proxy/tests/comprehensive.rs +++ /dev/null @@ -1,134 +0,0 @@ -#[cfg(test)] -mod tests { - use crate::proxy::mappers::claude::models::{ - ClaudeRequest, Message, MessageContent, ContentBlock, ThinkingConfig - }; - use crate::proxy::mappers::claude::request::transform_claude_request_in; - use crate::proxy::mappers::claude::thinking_utils::{analyze_conversation_state, close_tool_loop_for_thinking}; - use serde_json::json; - - - // ================================================================================== - // 场景一:首次 Thinking 请求 (P0-2 Fix) - // 验证在没有历史签名的情况下,首次发起 Thinking 请求是否被放行 (Perimssive Mode) - // ================================================================================== - #[test] - fn test_first_thinking_request_permissive_mode() { - // 1. 构造一个全新的请求 (无历史消息) - let req = ClaudeRequest { - model: "claude-3-7-sonnet-20250219".to_string(), - messages: vec![ - Message { - role: "user".to_string(), - content: MessageContent::String("Hello, please think.".to_string()), - } - ], - system: None, - tools: None, // 无工具调用 - stream: false, - max_tokens: None, - temperature: None, - top_p: None, - top_k: None, - thinking: Some(ThinkingConfig { - type_: "enabled".to_string(), - budget_tokens: Some(1024), - }), - metadata: None, - output_config: None, - size: None, - quality: None, - }; - - // 2. 执行转换 - // 如果修复生效,这里应该成功返回,且 thinkingConfig 被保留 - let result = transform_claude_request_in(&req, "test-project", false); - assert!(result.is_ok(), "First thinking request should be allowed"); - - let body = result.unwrap(); - let request = &body["request"]; - - // 验证 thinkingConfig 是否存在 (即 thinking 模式未被禁用) - let has_thinking_config = request.get("generationConfig") - .and_then(|g| g.get("thinkingConfig")) - .is_some(); - - assert!(has_thinking_config, "Thinking config should be preserved for first request without tool calls"); - } - - // ================================================================================== - // 场景二:工具循环恢复 (P1-4 Fix) - // 验证当历史消息中丢失 Thinking 块导致死循环时,是否会自动注入合成消息来闭环 - // ================================================================================== - #[test] - fn test_tool_loop_recovery() { - // 1. 构造一个 "Broken Tool Loop" 场景 - // Assistant (ToolUse) -> User (ToolResult) - // 但 Assistant 消息中缺少 Thinking 块 (模拟被 stripping) - let mut messages = vec![ - Message { - role: "user".to_string(), - content: MessageContent::String("Check weather".to_string()), - }, - Message { - role: "assistant".to_string(), - content: MessageContent::Array(vec![ - // 只有 ToolUse,没有 Thinking (Broken State) - ContentBlock::ToolUse { - id: "call_1".to_string(), - name: "get_weather".to_string(), - input: json!({"location": "Beijing"}), - signature: None, - cache_control: None, - } - ]), - }, - Message { - role: "user".to_string(), - content: MessageContent::Array(vec![ - ContentBlock::ToolResult { - tool_use_id: "call_1".to_string(), - content: json!("Sunny"), - is_error: None, - } - ]), - } - ]; - - // 2. 分析当前状态 - let state = analyze_conversation_state(&messages); - assert!(state.in_tool_loop, "Should detect tool loop"); - - // 3. 执行恢复逻辑 - close_tool_loop_for_thinking(&mut messages); - - // 4. 验证是否注入了合成消息 - assert_eq!(messages.len(), 5, "Should have injected 2 synthetic messages"); - - // 验证倒数第二条是 Assistant 的 "Completed" 消息 - let injected_assistant = &messages[3]; - assert_eq!(injected_assistant.role, "assistant"); - - // 验证最后一条是 User 的 "Proceed" 消息 - let injected_user = &messages[4]; - assert_eq!(injected_user.role, "user"); - - // 这样当前状态就不再是 "in_tool_loop" (最后一条是 User Text),模型可以开始新的 Thinking - let new_state = analyze_conversation_state(&messages); - assert!(!new_state.in_tool_loop, "Tool loop should be broken/closed"); - } - - // ================================================================================== - // 场景三:跨模型兼容性 (P1-5 Fix) - 模拟 - // 由于 request.rs 中的 is_model_compatible 是私有的,我们通过集成测试验证效果 - // ================================================================================== - /* - 注意:由于 is_model_compatible 和缓存逻辑深度集成在 transform_claude_request_in 中, - 且依赖全局单例 SignatureCache,单元测试较难模拟 "缓存了旧签名但切换了模型" 的状态。 - 这里主要通过验证 "不兼容签名被丢弃" 的副作用(即 thoughtSignature 字段消息)来测试。 - 但由于 SignatureCache 是全局的,我们无法在测试中轻易预置状态。 - 因此,此场景主要依赖 Verification Guide 中的手动测试。 - 或者,我们可以测试 request.rs 中公开的某些 helper (如果有的话),但目前没有。 - */ - -} diff --git a/src-tauri/src/proxy/tests/mod.rs b/src-tauri/src/proxy/tests/mod.rs deleted file mode 100644 index 212ce9264..000000000 --- a/src-tauri/src/proxy/tests/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod comprehensive; -pub mod security_ip_tests; -pub mod security_integration_tests; -pub mod quota_protection; diff --git a/src-tauri/src/proxy/tests/quota_protection.rs b/src-tauri/src/proxy/tests/quota_protection.rs deleted file mode 100644 index d2d97b51a..000000000 --- a/src-tauri/src/proxy/tests/quota_protection.rs +++ /dev/null @@ -1,1144 +0,0 @@ -// ================================================================================== -// 配额保护功能完整测试 -// 验证从账号创建到配额保护策略执行的完整流程 -// ================================================================================== - -#[cfg(test)] -mod tests { - use std::path::PathBuf; - - use crate::models::QuotaProtectionConfig; - use crate::proxy::common::model_mapping::normalize_to_standard_id; - use crate::proxy::token_manager::ProxyToken; - - // ================================================================================== - // 辅助函数:创建模拟账号 - // ================================================================================== - - fn create_mock_token( - account_id: &str, - email: &str, - protected_models: Vec<&str>, - remaining_quota: Option, - ) -> ProxyToken { - ProxyToken { - account_id: account_id.to_string(), - access_token: format!("mock_access_token_{}", account_id), - refresh_token: format!("mock_refresh_token_{}", account_id), - expires_in: 3600, - timestamp: chrono::Utc::now().timestamp() + 3600, - email: email.to_string(), - account_path: PathBuf::from(format!("/tmp/test_accounts/{}.json", account_id)), - project_id: Some("test-project".to_string()), - subscription_tier: Some("PRO".to_string()), - remaining_quota, - protected_models: protected_models.iter().map(|s| s.to_string()).collect(), - health_score: 1.0, - reset_time: None, - validation_blocked: false, - validation_blocked_until: 0, - model_quotas: std::collections::HashMap::new(), - } - } - - // ================================================================================== - // 测试 1: normalize_to_standard_id 函数正确性 - // 验证各种 Claude 模型名称都能正确归一化 - // ================================================================================== - - #[test] - fn test_normalize_to_standard_id_claude_models() { - // Claude Sonnet 系列 - assert_eq!( - normalize_to_standard_id("claude-sonnet-4-5"), - Some("claude-sonnet-4-5".to_string()) - ); - assert_eq!( - normalize_to_standard_id("claude-sonnet-4-5-thinking"), - Some("claude-sonnet-4-5".to_string()) - ); - - // Claude Opus 系列 - 这是关键的测试! - assert_eq!( - normalize_to_standard_id("claude-opus-4-5-thinking"), - Some("claude-sonnet-4-5".to_string()), - "claude-opus-4-5-thinking 应该归一化为 claude-sonnet-4-5" - ); - - // Gemini 系列 - assert_eq!( - normalize_to_standard_id("gemini-3-flash"), - Some("gemini-3-flash".to_string()) - ); - assert_eq!( - normalize_to_standard_id("gemini-3-pro-high"), - Some("gemini-3-pro-high".to_string()) - ); - assert_eq!( - normalize_to_standard_id("gemini-3-pro-low"), - Some("gemini-3-pro-high".to_string()) - ); - - // 不支持的模型应返回 None - assert_eq!(normalize_to_standard_id("gpt-4"), None); - assert_eq!(normalize_to_standard_id("unknown-model"), None); - } - - // ================================================================================== - // 测试 2: 配额保护模型匹配逻辑 - // 验证 protected_models.contains() 在归一化后能正确匹配 - // ================================================================================== - - #[test] - fn test_protected_models_matching() { - // 创建一个账号,protected_models 中有 claude-sonnet-4-5 - let token = create_mock_token( - "account-1", - "test@example.com", - vec!["claude-sonnet-4-5"], - Some(50), - ); - - // 测试:请求 claude-opus-4-5-thinking 时应该被保护 - let target_model = "claude-opus-4-5-thinking"; - let normalized = - normalize_to_standard_id(target_model).unwrap_or_else(|| target_model.to_string()); - - assert_eq!(normalized, "claude-sonnet-4-5"); - assert!( - token.protected_models.contains(&normalized), - "claude-opus-4-5-thinking 归一化后应该匹配 protected_models 中的 claude-sonnet-4-5" - ); - - // 测试:请求 claude-sonnet-4-5-thinking 时也应该被保护 - let target_model_2 = "claude-sonnet-4-5-thinking"; - let normalized_2 = - normalize_to_standard_id(target_model_2).unwrap_or_else(|| target_model_2.to_string()); - - assert!( - token.protected_models.contains(&normalized_2), - "claude-sonnet-4-5-thinking 归一化后应该匹配 protected_models" - ); - - // 测试:请求 gemini-3-flash 时不应该被保护(因为 protected_models 中没有) - let target_model_3 = "gemini-3-flash"; - let normalized_3 = - normalize_to_standard_id(target_model_3).unwrap_or_else(|| target_model_3.to_string()); - - assert!( - !token.protected_models.contains(&normalized_3), - "gemini-3-flash 不应该匹配 claude-sonnet-4-5" - ); - } - - // ================================================================================== - // 测试 3: 多账号轮询时的配额保护过滤 - // 模拟多个账号,验证被保护的账号会被跳过 - // ================================================================================== - - #[test] - fn test_multi_account_quota_protection_filtering() { - // 创建 3 个账号 - let tokens = vec![ - // 账号 1: claude-sonnet-4-5 被保护(配额低) - create_mock_token( - "account-1", - "user1@example.com", - vec!["claude-sonnet-4-5"], - Some(20), - ), - // 账号 2: 没有被保护 - create_mock_token("account-2", "user2@example.com", vec![], Some(80)), - // 账号 3: gemini-3-flash 被保护 - create_mock_token( - "account-3", - "user3@example.com", - vec!["gemini-3-flash"], - Some(30), - ), - ]; - - // 模拟请求 claude-opus-4-5-thinking - let target_model = "claude-opus-4-5-thinking"; - let normalized_target = - normalize_to_standard_id(target_model).unwrap_or_else(|| target_model.to_string()); - - // 过滤掉被保护的账号 - let available_accounts: Vec<_> = tokens - .iter() - .filter(|t| !t.protected_models.contains(&normalized_target)) - .collect(); - - // 验证:账号 1 被过滤(因为 claude-sonnet-4-5 被保护) - // 账号 2 和 3 可用 - assert_eq!(available_accounts.len(), 2); - assert!(available_accounts - .iter() - .any(|t| t.account_id == "account-2")); - assert!(available_accounts - .iter() - .any(|t| t.account_id == "account-3")); - assert!(!available_accounts - .iter() - .any(|t| t.account_id == "account-1")); - - // 模拟请求 gemini-3-flash - let target_model_2 = "gemini-3-flash"; - let normalized_target_2 = - normalize_to_standard_id(target_model_2).unwrap_or_else(|| target_model_2.to_string()); - - let available_accounts_2: Vec<_> = tokens - .iter() - .filter(|t| !t.protected_models.contains(&normalized_target_2)) - .collect(); - - // 验证:账号 3 被过滤(因为 gemini-3-flash 被保护) - // 账号 1 和 2 可用 - assert_eq!(available_accounts_2.len(), 2); - assert!(available_accounts_2 - .iter() - .any(|t| t.account_id == "account-1")); - assert!(available_accounts_2 - .iter() - .any(|t| t.account_id == "account-2")); - assert!(!available_accounts_2 - .iter() - .any(|t| t.account_id == "account-3")); - } - - // ================================================================================== - // 测试 4: 所有账号都被保护时的行为 - // 验证当所有账号的目标模型都被保护时,返回错误 - // ================================================================================== - - #[test] - fn test_all_accounts_protected_returns_error() { - // 创建 3 个账号,全部对 claude-sonnet-4-5 进行保护 - let tokens = vec![ - create_mock_token( - "account-1", - "user1@example.com", - vec!["claude-sonnet-4-5"], - Some(10), - ), - create_mock_token( - "account-2", - "user2@example.com", - vec!["claude-sonnet-4-5"], - Some(15), - ), - create_mock_token( - "account-3", - "user3@example.com", - vec!["claude-sonnet-4-5"], - Some(5), - ), - ]; - - let target_model = "claude-opus-4-5-thinking"; - let normalized_target = - normalize_to_standard_id(target_model).unwrap_or_else(|| target_model.to_string()); - - let available_accounts: Vec<_> = tokens - .iter() - .filter(|t| !t.protected_models.contains(&normalized_target)) - .collect(); - - // 所有账号都被过滤,应该返回 0 - assert_eq!(available_accounts.len(), 0); - - // 在实际代码中,这会导致 "All accounts failed or unhealthy" 错误 - } - - // ================================================================================== - // 测试 5: monitored_models 配置与归一化一致性 - // 验证配置中的 monitored_models 能正确匹配归一化后的模型名 - // ================================================================================== - - #[test] - fn test_monitored_models_normalization_consistency() { - let config = QuotaProtectionConfig { - enabled: true, - threshold_percentage: 60, - monitored_models: vec![ - "claude-sonnet-4-5".to_string(), - "gemini-3-pro-high".to_string(), - "gemini-3-flash".to_string(), - ], - }; - - // 测试各种模型名归一化后是否在 monitored_models 中 - let test_cases = vec![ - ("claude-opus-4-5-thinking", true), // 归一化为 claude-sonnet-4-5 - ("claude-sonnet-4-5-thinking", true), // 归一化为 claude-sonnet-4-5 - ("claude-sonnet-4-5", true), // 直接匹配 - ("gemini-3-pro-high", true), // 直接匹配 - ("gemini-3-pro-low", true), // 归一化为 gemini-3-pro-high - ("gemini-3-flash", true), // 直接匹配 - ("gpt-4", false), // 不支持的模型 - ("gemini-2.5-flash", false), // 不在监控列表中 - ]; - - for (model_name, expected_monitored) in test_cases { - let standard_id = normalize_to_standard_id(model_name); - - let is_monitored = match &standard_id { - Some(id) => config.monitored_models.contains(id), - None => false, - }; - - assert_eq!( - is_monitored, expected_monitored, - "模型 {} (归一化为 {:?}) 的监控状态应为 {}", - model_name, standard_id, expected_monitored - ); - } - } - - // ================================================================================== - // 测试 6: 配额阈值触发逻辑 - // 验证配额低于阈值时触发保护,高于阈值时恢复 - // ================================================================================== - - #[test] - fn test_quota_threshold_trigger_logic() { - let threshold = 60; // 60% 阈值 - - // 模拟 quota 数据 - let quota_data = vec![ - ("claude-opus-4-5-thinking", 50, true), // 50% <= 60%, 应触发保护 - ("claude-sonnet-4-5-thinking", 60, true), // 60% <= 60%, 应触发保护(边界情况) - ("gemini-3-flash", 61, false), // 61% > 60%, 不触发保护 - ("gemini-3-pro-high", 100, false), // 100% > 60%, 不触发保护 - ]; - - for (model_name, percentage, should_protect) in quota_data { - let should_trigger = percentage <= threshold; - - assert_eq!( - should_trigger, - should_protect, - "模型 {} 配额 {}% (阈值 {}%) 应 {} 触发保护", - model_name, - percentage, - threshold, - if should_protect { "" } else { "不" } - ); - } - } - - // ================================================================================== - // 测试 7: 账号优先级排序后的保护过滤 - // 验证高配额账号被保护后,会回退到低配额账号 - // ================================================================================== - - #[test] - fn test_priority_fallback_when_protected() { - // 创建 3 个账号,按配额排序 - let mut tokens = vec![ - create_mock_token( - "account-high", - "high@example.com", - vec!["claude-sonnet-4-5"], - Some(90), - ), - create_mock_token("account-mid", "mid@example.com", vec![], Some(60)), - create_mock_token("account-low", "low@example.com", vec![], Some(30)), - ]; - - // 按配额降序排序(高配额优先) - tokens.sort_by(|a, b| { - let qa = a.remaining_quota.unwrap_or(0); - let qb = b.remaining_quota.unwrap_or(0); - qb.cmp(&qa) - }); - - // 验证排序正确 - assert_eq!(tokens[0].account_id, "account-high"); - assert_eq!(tokens[1].account_id, "account-mid"); - assert_eq!(tokens[2].account_id, "account-low"); - - // 模拟请求 claude-opus-4-5-thinking - let target_model = "claude-opus-4-5-thinking"; - let normalized_target = - normalize_to_standard_id(target_model).unwrap_or_else(|| target_model.to_string()); - - // 按顺序选择第一个可用账号 - let selected = tokens - .iter() - .find(|t| !t.protected_models.contains(&normalized_target)); - - // 验证:account-high 被跳过,选择 account-mid - assert!(selected.is_some()); - assert_eq!( - selected.unwrap().account_id, - "account-mid", - "高配额账号被保护后,应该回退到 account-mid" - ); - } - - // ================================================================================== - // 测试 8: 模型级别保护(同一账号不同模型) - // 验证一个账号可以对某些模型保护,对其他模型不保护 - // ================================================================================== - - #[test] - fn test_model_level_protection_granularity() { - // 账号对 claude-sonnet-4-5 保护,但对 gemini-3-flash 不保护 - let token = create_mock_token( - "account-1", - "user@example.com", - vec!["claude-sonnet-4-5"], - Some(50), - ); - - // 请求 claude-opus-4-5-thinking -> 被保护 - let normalized_claude = normalize_to_standard_id("claude-opus-4-5-thinking") - .unwrap_or_else(|| "claude-opus-4-5-thinking".to_string()); - assert!( - token.protected_models.contains(&normalized_claude), - "Claude 请求应该被保护" - ); - - // 请求 gemini-3-flash -> 不被保护 - let normalized_gemini = normalize_to_standard_id("gemini-3-flash") - .unwrap_or_else(|| "gemini-3-flash".to_string()); - assert!( - !token.protected_models.contains(&normalized_gemini), - "Gemini 请求不应该被保护" - ); - } - - // ================================================================================== - // 测试 9: 配额保护启用/禁用开关 - // 验证当 quota_protection.enabled = false 时,保护逻辑不生效 - // ================================================================================== - - #[test] - fn test_quota_protection_enabled_flag() { - let config_enabled = QuotaProtectionConfig { - enabled: true, - threshold_percentage: 60, - monitored_models: vec!["claude-sonnet-4-5".to_string()], - }; - - let config_disabled = QuotaProtectionConfig { - enabled: false, - threshold_percentage: 60, - monitored_models: vec!["claude-sonnet-4-5".to_string()], - }; - - let token = create_mock_token( - "account-1", - "user@example.com", - vec!["claude-sonnet-4-5"], - Some(50), - ); - - let target_model = "claude-opus-4-5-thinking"; - let normalized_target = - normalize_to_standard_id(target_model).unwrap_or_else(|| target_model.to_string()); - - // 启用配额保护时,账号应该被过滤 - let is_protected_when_enabled = - config_enabled.enabled && token.protected_models.contains(&normalized_target); - assert!(is_protected_when_enabled, "启用时应该被保护"); - - // 禁用配额保护时,即使 protected_models 中有值,也不过滤 - let is_protected_when_disabled = - config_disabled.enabled && token.protected_models.contains(&normalized_target); - assert!(!is_protected_when_disabled, "禁用时不应该被保护"); - } - - // ================================================================================== - // 测试 10: 完整流程模拟(集成测试风格) - // 模拟多账号、配额保护配置、请求轮询的完整流程 - // ================================================================================== - - #[test] - fn test_full_quota_protection_flow() { - // 1. 配置配额保护 - let config = QuotaProtectionConfig { - enabled: true, - threshold_percentage: 60, - monitored_models: vec![ - "claude-sonnet-4-5".to_string(), - "gemini-3-flash".to_string(), - ], - }; - - // 2. 创建多个账号,模拟不同配额状态 - let accounts = vec![ - // 账号 A: Claude 配额低(50%),应该被保护 - create_mock_token( - "account-a", - "a@example.com", - vec!["claude-sonnet-4-5"], - Some(50), - ), - // 账号 B: Claude 配额正常(80%),不被保护 - create_mock_token("account-b", "b@example.com", vec![], Some(80)), - // 账号 C: Claude 和 Gemini 都被保护 - create_mock_token( - "account-c", - "c@example.com", - vec!["claude-sonnet-4-5", "gemini-3-flash"], - Some(30), - ), - // 账号 D: 只有 Gemini 被保护 - create_mock_token( - "account-d", - "d@example.com", - vec!["gemini-3-flash"], - Some(40), - ), - ]; - - // 3. 模拟多次请求,验证账号选择逻辑 - - // 请求 1: claude-opus-4-5-thinking - let target_claude = normalize_to_standard_id("claude-opus-4-5-thinking") - .unwrap_or_else(|| "claude-opus-4-5-thinking".to_string()); - - let available_for_claude: Vec<_> = accounts - .iter() - .filter(|a| !config.enabled || !a.protected_models.contains(&target_claude)) - .collect(); - - // 账号 A 和 C 被过滤,B 和 D 可用 - assert_eq!(available_for_claude.len(), 2); - let claude_account_ids: Vec<_> = available_for_claude - .iter() - .map(|a| a.account_id.as_str()) - .collect(); - assert!(claude_account_ids.contains(&"account-b")); - assert!(claude_account_ids.contains(&"account-d")); - - // 请求 2: gemini-3-flash - let target_gemini = normalize_to_standard_id("gemini-3-flash") - .unwrap_or_else(|| "gemini-3-flash".to_string()); - - let available_for_gemini: Vec<_> = accounts - .iter() - .filter(|a| !config.enabled || !a.protected_models.contains(&target_gemini)) - .collect(); - - // 账号 C 和 D 被过滤,A 和 B 可用 - assert_eq!(available_for_gemini.len(), 2); - let gemini_account_ids: Vec<_> = available_for_gemini - .iter() - .map(|a| a.account_id.as_str()) - .collect(); - assert!(gemini_account_ids.contains(&"account-a")); - assert!(gemini_account_ids.contains(&"account-b")); - - // 请求 3: 未被监控的模型 (gemini-2.5-flash) - let target_unmonitored = normalize_to_standard_id("gemini-2.5-flash") - .unwrap_or_else(|| "gemini-2.5-flash".to_string()); - - let available_for_unmonitored: Vec<_> = accounts - .iter() - .filter(|a| !config.enabled || !a.protected_models.contains(&target_unmonitored)) - .collect(); - - // 未被监控的模型,所有账号都可用 - assert_eq!(available_for_unmonitored.len(), 4); - } - - // ================================================================================== - // 测试 11: 边界情况 - 空 protected_models - // ================================================================================== - - #[test] - fn test_empty_protected_models() { - let token = create_mock_token( - "account-1", - "user@example.com", - vec![], // 没有被保护的模型 - Some(50), - ); - - let target = normalize_to_standard_id("claude-opus-4-5-thinking") - .unwrap_or_else(|| "claude-opus-4-5-thinking".to_string()); - - assert!( - !token.protected_models.contains(&target), - "空 protected_models 不应该匹配任何模型" - ); - } - - // ================================================================================== - // 测试 12: 边界情况 - 大小写敏感性 - // ================================================================================== - - #[test] - fn test_model_name_case_sensitivity() { - // normalize_to_standard_id 应该是大小写不敏感的 - assert_eq!( - normalize_to_standard_id("Claude-Opus-4-5-Thinking"), - Some("claude-sonnet-4-5".to_string()) - ); - assert_eq!( - normalize_to_standard_id("CLAUDE-OPUS-4-5-THINKING"), - Some("claude-sonnet-4-5".to_string()) - ); - assert_eq!( - normalize_to_standard_id("GEMINI-3-FLASH"), - Some("gemini-3-flash".to_string()) - ); - } - - // ================================================================================== - // 测试 13: 端到端场景 - 会话中途配额保护生效后的路由切换 - // 模拟:请求1 -> 绑定账号A -> 请求2 -> 继续用A -> 刷新配额 -> A被保护 -> 请求3 -> 切换到B - // ================================================================================== - - #[test] - fn test_sticky_session_quota_protection_mid_session_single_account() { - // 场景:只有一个账号,会话绑定后配额保护生效 - // 预期:返回配额保护错误 - - let session_id = "session-12345"; - let target_model = "claude-opus-4-5-thinking"; - let normalized_target = - normalize_to_standard_id(target_model).unwrap_or_else(|| target_model.to_string()); - - // 初始状态:账号 A 没有被保护 - let mut account_a = create_mock_token( - "account-a", - "a@example.com", - vec![], // 初始没有保护 - Some(70), - ); - - // 模拟会话绑定表 - let mut session_bindings: std::collections::HashMap = - std::collections::HashMap::new(); - - // === 请求 1: 绑定到账号 A === - session_bindings.insert(session_id.to_string(), account_a.account_id.clone()); - - // 验证请求 1 成功 - let bound_account = session_bindings.get(session_id); - assert_eq!(bound_account, Some(&"account-a".to_string())); - - // === 请求 2: 继续使用账号 A === - // 账号 A 仍然可用 - assert!(!account_a.protected_models.contains(&normalized_target)); - - // === 系统触发配额刷新,发现账号 A 配额低于阈值 === - // 模拟配额刷新后,account_a 的 claude-sonnet-4-5 被加入保护列表 - account_a - .protected_models - .insert("claude-sonnet-4-5".to_string()); - - // === 请求 3: 尝试使用账号 A,但被配额保护 === - let accounts = vec![account_a.clone()]; // 只有一个账号 - - // 检查绑定的账号是否被保护 - let bound_id = session_bindings.get(session_id).unwrap(); - let bound_account = accounts.iter().find(|a| &a.account_id == bound_id).unwrap(); - let is_protected = bound_account.protected_models.contains(&normalized_target); - - assert!(is_protected, "账号 A 应该被配额保护"); - - // 尝试找其他可用账号 - let available_accounts: Vec<_> = accounts - .iter() - .filter(|a| !a.protected_models.contains(&normalized_target)) - .collect(); - - // 没有可用账号 - assert_eq!(available_accounts.len(), 0, "应该没有可用账号"); - - // 在实际实现中,这会返回错误消息 - // 验证应该返回配额保护相关的错误 - let error_message = if available_accounts.is_empty() { - if accounts - .iter() - .all(|a| a.protected_models.contains(&normalized_target)) - { - format!( - "All accounts quota-protected for model {}", - normalized_target - ) - } else { - "All accounts failed or unhealthy.".to_string() - } - } else { - "OK".to_string() - }; - - assert!( - error_message.contains("quota-protected"), - "错误消息应该包含 quota-protected: {}", - error_message - ); - } - - #[test] - fn test_sticky_session_quota_protection_mid_session_multi_account() { - // 场景:多个账号,会话绑定的账号配额保护生效后,应该路由到其他账号 - - let session_id = "session-67890"; - let target_model = "claude-opus-4-5-thinking"; - let normalized_target = - normalize_to_standard_id(target_model).unwrap_or_else(|| target_model.to_string()); - - // 初始状态:账号 A 和 B 都没有被保护 - let mut account_a = create_mock_token("account-a", "a@example.com", vec![], Some(70)); - let account_b = create_mock_token("account-b", "b@example.com", vec![], Some(80)); - - let mut session_bindings: std::collections::HashMap = - std::collections::HashMap::new(); - - // === 请求 1: 绑定到账号 A === - session_bindings.insert(session_id.to_string(), account_a.account_id.clone()); - - // === 请求 2: 继续使用账号 A === - assert!(!account_a.protected_models.contains(&normalized_target)); - - // === 系统触发配额刷新,账号 A 被保护 === - account_a - .protected_models - .insert("claude-sonnet-4-5".to_string()); - - // === 请求 3: 账号 A 被保护,应该解绑并切换到账号 B === - let accounts = vec![account_a.clone(), account_b.clone()]; - - // 检查绑定的账号 - let bound_id = session_bindings.get(session_id).unwrap(); - let bound_account = accounts.iter().find(|a| &a.account_id == bound_id).unwrap(); - let is_protected = bound_account.protected_models.contains(&normalized_target); - - assert!(is_protected, "账号 A 应该被配额保护"); - - // 模拟解绑逻辑 - if is_protected { - session_bindings.remove(session_id); - } - - // 寻找其他可用账号 - let available_accounts: Vec<_> = accounts - .iter() - .filter(|a| !a.protected_models.contains(&normalized_target)) - .collect(); - - // 应该有账号 B 可用 - assert_eq!(available_accounts.len(), 1); - assert_eq!(available_accounts[0].account_id, "account-b"); - - // 重新绑定到账号 B - let new_account = available_accounts[0]; - session_bindings.insert(session_id.to_string(), new_account.account_id.clone()); - - // 验证新绑定 - assert_eq!( - session_bindings.get(session_id), - Some(&"account-b".to_string()), - "会话应该重新绑定到账号 B" - ); - } - - // ================================================================================== - // 测试 14: 配额保护实时同步测试 - // 模拟:配额刷新后 protected_models 被更新,TokenManager 内存应该同步 - // ================================================================================== - - #[test] - fn test_quota_protection_sync_after_refresh() { - // 这个测试模拟 update_account_quota 触发 TokenManager 重新加载的场景 - - // 初始内存状态 - let mut tokens_in_memory = vec![create_mock_token( - "account-a", - "a@example.com", - vec![], - Some(70), - )]; - - // 模拟磁盘上的账号数据(配额刷新后更新) - let mut account_on_disk = create_mock_token("account-a", "a@example.com", vec![], Some(50)); - - // 模拟配额刷新:检测到配额低于阈值,触发保护 - let threshold = 60; - if account_on_disk.remaining_quota.unwrap_or(100) <= threshold { - account_on_disk - .protected_models - .insert("claude-sonnet-4-5".to_string()); - } - - // 验证磁盘数据已更新 - assert!( - account_on_disk - .protected_models - .contains("claude-sonnet-4-5"), - "磁盘上的账号应该已被保护" - ); - - // 此时内存数据还是旧的 - assert!( - !tokens_in_memory[0] - .protected_models - .contains("claude-sonnet-4-5"), - "内存中的账号还没被同步" - ); - - // 模拟 trigger_account_reload -> reload_account 同步 - tokens_in_memory[0] = account_on_disk.clone(); - - // 验证内存数据已同步 - assert!( - tokens_in_memory[0] - .protected_models - .contains("claude-sonnet-4-5"), - "同步后内存中的账号应该被保护" - ); - - // 现在请求应该被正确过滤 - let target = normalize_to_standard_id("claude-opus-4-5-thinking") - .unwrap_or_else(|| "claude-opus-4-5-thinking".to_string()); - - let available: Vec<_> = tokens_in_memory - .iter() - .filter(|t| !t.protected_models.contains(&target)) - .collect(); - - assert_eq!(available.len(), 0, "同步后账号应该被过滤"); - } - - // ================================================================================== - // 测试 15: 多轮请求中的配额保护动态变化 - // 模拟完整的请求序列,包括配额保护的触发和恢复 - // ================================================================================== - - #[test] - fn test_quota_protection_dynamic_changes() { - let target_model = "claude-opus-4-5-thinking"; - let normalized_target = - normalize_to_standard_id(target_model).unwrap_or_else(|| target_model.to_string()); - - // 账号池 - let mut account_a = create_mock_token("account-a", "a@example.com", vec![], Some(70)); - let mut account_b = create_mock_token("account-b", "b@example.com", vec![], Some(80)); - - // === 阶段 1: 初始状态,两个账号都可用 === - let accounts = vec![account_a.clone(), account_b.clone()]; - let available: Vec<_> = accounts - .iter() - .filter(|t| !t.protected_models.contains(&normalized_target)) - .collect(); - assert_eq!(available.len(), 2, "阶段1: 两个账号都可用"); - - // === 阶段 2: 账号 A 配额降低,触发保护 === - account_a.remaining_quota = Some(40); - account_a - .protected_models - .insert("claude-sonnet-4-5".to_string()); - - let accounts = vec![account_a.clone(), account_b.clone()]; - let available: Vec<_> = accounts - .iter() - .filter(|t| !t.protected_models.contains(&normalized_target)) - .collect(); - assert_eq!(available.len(), 1, "阶段2: 只有账号 B 可用"); - assert_eq!(available[0].account_id, "account-b"); - - // === 阶段 3: 账号 B 也触发保护 === - account_b.remaining_quota = Some(30); - account_b - .protected_models - .insert("claude-sonnet-4-5".to_string()); - - let accounts = vec![account_a.clone(), account_b.clone()]; - let available: Vec<_> = accounts - .iter() - .filter(|t| !t.protected_models.contains(&normalized_target)) - .collect(); - assert_eq!(available.len(), 0, "阶段3: 没有可用账号"); - - // === 阶段 4: 账号 A 配额恢复(重置),解除保护 === - account_a.remaining_quota = Some(100); - account_a.protected_models.remove("claude-sonnet-4-5"); - - let accounts = vec![account_a.clone(), account_b.clone()]; - let available: Vec<_> = accounts - .iter() - .filter(|t| !t.protected_models.contains(&normalized_target)) - .collect(); - assert_eq!(available.len(), 1, "阶段4: 账号 A 恢复可用"); - assert_eq!(available[0].account_id, "account-a"); - } - - // ================================================================================== - // 测试 16: 完整错误消息验证 - // 验证不同场景下返回的错误消息是否正确 - // ================================================================================== - - #[test] - fn test_error_messages_for_quota_protection() { - let target_model = "claude-opus-4-5-thinking"; - let normalized_target = - normalize_to_standard_id(target_model).unwrap_or_else(|| target_model.to_string()); - - // 场景 1: 所有账号都因配额保护不可用 - let all_protected = vec![ - create_mock_token("a1", "a1@example.com", vec!["claude-sonnet-4-5"], Some(30)), - create_mock_token("a2", "a2@example.com", vec!["claude-sonnet-4-5"], Some(20)), - ]; - - let all_are_quota_protected = all_protected - .iter() - .all(|a| a.protected_models.contains(&normalized_target)); - - assert!(all_are_quota_protected, "所有账号都被配额保护"); - - // 生成错误消息 - let error = format!( - "All {} accounts are quota-protected for model '{}'. Wait for quota reset or adjust protection threshold.", - all_protected.len(), - normalized_target - ); - - assert!(error.contains("quota-protected")); - assert!(error.contains("claude-sonnet-4-5")); - - // 场景 2: 混合情况(部分限流,部分配额保护) - let mixed = vec![ - create_mock_token("a1", "a1@example.com", vec!["claude-sonnet-4-5"], Some(30)), - create_mock_token("a2", "a2@example.com", vec![], Some(20)), // 这个假设被限流 - ]; - - let quota_protected_count = mixed - .iter() - .filter(|a| a.protected_models.contains(&normalized_target)) - .count(); - - assert_eq!(quota_protected_count, 1); - } - - // ================================================================================== - // 测试 17: get_model_quota_from_json 函数正确性 - // 验证从磁盘读取特定模型 quota 而非 max(所有模型) - // ================================================================================== - - #[test] - fn test_get_model_quota_from_json_reads_correct_model() { - // 创建模拟账号 JSON 文件,包含多个模型的 quota - let account_json = serde_json::json!({ - "email": "test@example.com", - "quota": { - "models": [ - { "name": "claude-sonnet-4-5", "percentage": 60 }, - { "name": "claude-opus-4-5-thinking", "percentage": 40 }, - { "name": "gemini-3-flash", "percentage": 100 } - ] - } - }); - - // 使用 std::env::temp_dir() 创建临时文件 - let temp_dir = std::env::temp_dir(); - let account_path = temp_dir.join(format!("test_quota_{}.json", uuid::Uuid::new_v4())); - std::fs::write(&account_path, account_json.to_string()).expect("Failed to write temp file"); - - // 测试读取 claude-sonnet-4-5 的 quota - let sonnet_quota = - crate::proxy::token_manager::TokenManager::get_model_quota_from_json_for_test( - &account_path, - "claude-sonnet-4-5", - ); - assert_eq!( - sonnet_quota, - Some(60), - "claude-sonnet-4-5 应该返回 60%,而非 max(100%)" - ); - - // 测试读取 gemini-3-flash 的 quota - let gemini_quota = - crate::proxy::token_manager::TokenManager::get_model_quota_from_json_for_test( - &account_path, - "gemini-3-flash", - ); - assert_eq!(gemini_quota, Some(100), "gemini-3-flash 应该返回 100%"); - - // 测试读取不存在的模型 - let unknown_quota = - crate::proxy::token_manager::TokenManager::get_model_quota_from_json_for_test( - &account_path, - "unknown-model", - ); - assert_eq!(unknown_quota, None, "不存在的模型应该返回 None"); - - // 清理临时文件 - let _ = std::fs::remove_file(&account_path); - } - - // ================================================================================== - // 测试 18: 排序使用目标模型 quota 而非 max quota - // 验证修复后的排序逻辑正确性 - // ================================================================================== - - #[test] - fn test_sorting_uses_target_model_quota_not_max() { - // 使用 std::env::temp_dir() 创建临时目录 - let temp_dir = std::env::temp_dir().join(format!("test_sorting_{}", uuid::Uuid::new_v4())); - std::fs::create_dir_all(&temp_dir).expect("Failed to create temp dir"); - - // 账号 A: max=100 (gemini), sonnet=40 - let account_a_json = serde_json::json!({ - "email": "carmelioventori@example.com", - "quota": { - "models": [ - { "name": "claude-sonnet-4-5", "percentage": 40 }, - { "name": "gemini-3-flash", "percentage": 100 } - ] - } - }); - - // 账号 B: max=100 (gemini), sonnet=100 - let account_b_json = serde_json::json!({ - "email": "kiriyamaleo@example.com", - "quota": { - "models": [ - { "name": "claude-sonnet-4-5", "percentage": 100 }, - { "name": "gemini-3-flash", "percentage": 100 } - ] - } - }); - - // 账号 C: max=100 (gemini), sonnet=60 - let account_c_json = serde_json::json!({ - "email": "mizusawakai9@example.com", - "quota": { - "models": [ - { "name": "claude-sonnet-4-5", "percentage": 60 }, - { "name": "gemini-3-flash", "percentage": 100 } - ] - } - }); - - // 写入临时文件 - let path_a = temp_dir.join("account_a.json"); - let path_b = temp_dir.join("account_b.json"); - let path_c = temp_dir.join("account_c.json"); - - std::fs::write(&path_a, account_a_json.to_string()).unwrap(); - std::fs::write(&path_b, account_b_json.to_string()).unwrap(); - std::fs::write(&path_c, account_c_json.to_string()).unwrap(); - - // 创建 tokens,remaining_quota 使用 max 值(模拟旧逻辑) - let mut tokens = vec![ - create_mock_token_with_path("a", "carmelioventori@example.com", vec![], Some(100), path_a.clone()), - create_mock_token_with_path("b", "kiriyamaleo@example.com", vec![], Some(100), path_b.clone()), - create_mock_token_with_path("c", "mizusawakai9@example.com", vec![], Some(100), path_c.clone()), - ]; - - // 目标模型: claude-sonnet-4-5 - let target_model = "claude-sonnet-4-5"; - - // 使用修复后的排序逻辑:读取目标模型的 quota - tokens.sort_by(|a, b| { - let quota_a = crate::proxy::token_manager::TokenManager::get_model_quota_from_json_for_test( - &a.account_path, - target_model, - ) - .unwrap_or(0); - let quota_b = crate::proxy::token_manager::TokenManager::get_model_quota_from_json_for_test( - &b.account_path, - target_model, - ) - .unwrap_or(0); - quota_b.cmp("a_a) // 高 quota 优先 - }); - - // 验证排序结果:sonnet quota 100% > 60% > 40% - assert_eq!( - tokens[0].email, "kiriyamaleo@example.com", - "sonnet=100% 的账号应该排第一" - ); - assert_eq!( - tokens[1].email, "mizusawakai9@example.com", - "sonnet=60% 的账号应该排第二" - ); - assert_eq!( - tokens[2].email, "carmelioventori@example.com", - "sonnet=40% 的账号应该排第三" - ); - - // 清理临时目录 - let _ = std::fs::remove_dir_all(&temp_dir); - } - - // ================================================================================== - // 测试 19: 模型名称归一化后的 quota 匹配 - // 验证请求 claude-opus-4-5-thinking 时能正确匹配 claude-sonnet-4-5 的 quota - // ================================================================================== - - #[test] - fn test_quota_matching_with_normalized_model_name() { - // 账号 JSON:只记录标准化后的模型名 - let account_json = serde_json::json!({ - "email": "test@example.com", - "quota": { - "models": [ - { "name": "claude-sonnet-4-5", "percentage": 75 }, - { "name": "gemini-3-flash", "percentage": 90 } - ] - } - }); - - let temp_dir = std::env::temp_dir(); - let account_path = temp_dir.join(format!("test_normalized_{}.json", uuid::Uuid::new_v4())); - std::fs::write(&account_path, account_json.to_string()).expect("Failed to write temp file"); - - // 请求 claude-opus-4-5-thinking,应该归一化为 claude-sonnet-4-5 - let request_model = "claude-opus-4-5-thinking"; - let normalized = normalize_to_standard_id(request_model) - .unwrap_or_else(|| request_model.to_string()); - - assert_eq!(normalized, "claude-sonnet-4-5", "应该归一化为 claude-sonnet-4-5"); - - // 读取归一化后模型的 quota - let quota = crate::proxy::token_manager::TokenManager::get_model_quota_from_json_for_test( - &account_path, - &normalized, - ); - - assert_eq!( - quota, - Some(75), - "claude-opus-4-5-thinking 归一化后应该读取 claude-sonnet-4-5 的 quota (75%)" - ); - - // 清理临时文件 - let _ = std::fs::remove_file(&account_path); - } - - /// 辅助函数:创建带有自定义 account_path 的 mock token - fn create_mock_token_with_path( - account_id: &str, - email: &str, - protected_models: Vec<&str>, - remaining_quota: Option, - account_path: PathBuf, - ) -> ProxyToken { - ProxyToken { - account_id: account_id.to_string(), - access_token: format!("mock_access_token_{}", account_id), - refresh_token: format!("mock_refresh_token_{}", account_id), - expires_in: 3600, - timestamp: chrono::Utc::now().timestamp() + 3600, - email: email.to_string(), - account_path, - project_id: Some("test-project".to_string()), - subscription_tier: Some("PRO".to_string()), - remaining_quota, - protected_models: protected_models.iter().map(|s| s.to_string()).collect(), - health_score: 1.0, - reset_time: None, - validation_blocked: false, - validation_blocked_until: 0, - model_quotas: std::collections::HashMap::new(), - } - } -} diff --git a/src-tauri/src/proxy/tests/security_integration_tests.rs b/src-tauri/src/proxy/tests/security_integration_tests.rs deleted file mode 100644 index ec09bb045..000000000 --- a/src-tauri/src/proxy/tests/security_integration_tests.rs +++ /dev/null @@ -1,490 +0,0 @@ -//! IP Security Integration Tests -//! IP 安全功能的集成测试 -//! -//! 这些测试需要启动完整的代理服务器来验证端到端的功能 - -#[cfg(test)] -mod integration_tests { - use crate::modules::security_db::{ - self, init_db, add_to_blacklist, remove_from_blacklist, - add_to_whitelist, remove_from_whitelist, get_blacklist, get_whitelist, - }; - use std::time::Duration; - - /// 辅助函数:清理测试环境 - fn cleanup_test_data() { - if let Ok(entries) = get_blacklist() { - for entry in entries { - let _ = remove_from_blacklist(&entry.id); - } - } - if let Ok(entries) = get_whitelist() { - for entry in entries { - let _ = remove_from_whitelist(&entry.id); - } - } - } - - // ============================================================================ - // 集成测试场景 1:黑名单阻止请求 - // ============================================================================ - - /// 测试场景:当 IP 在黑名单中时,请求应该被拒绝 - /// - /// 预期行为: - /// 1. 添加 IP 到黑名单 - /// 2. 该 IP 发起的请求返回 403 Forbidden - /// 3. 响应体包含封禁原因 - #[test] - fn test_scenario_blacklist_blocks_request() { - let _ = init_db(); - cleanup_test_data(); - - // 添加测试 IP 到黑名单 - let entry = add_to_blacklist( - "192.168.100.100", - Some("Integration test - malicious activity"), - None, - "integration_test", - ); - assert!(entry.is_ok(), "Should add IP to blacklist"); - - // 验证黑名单条目存在 - let blacklist = get_blacklist().unwrap(); - let found = blacklist.iter().any(|e| e.ip_pattern == "192.168.100.100"); - assert!(found, "IP should be in blacklist"); - - // 实际的 HTTP 请求测试需要启动服务器 - // 这里验证数据层正确性 - let is_blocked = security_db::is_ip_in_blacklist("192.168.100.100").unwrap(); - assert!(is_blocked, "IP should be blocked"); - - cleanup_test_data(); - } - - // ============================================================================ - // 集成测试场景 2:白名单优先模式 - // ============================================================================ - - /// 测试场景:白名单优先模式下,白名单 IP 跳过黑名单检查 - /// - /// 预期行为: - /// 1. IP 同时存在于黑名单和白名单 - /// 2. 启用 whitelist_priority 模式 - /// 3. 请求应该被允许(白名单优先) - #[test] - fn test_scenario_whitelist_priority() { - let _ = init_db(); - cleanup_test_data(); - - // 添加 IP 到黑名单 - let _ = add_to_blacklist( - "10.0.0.50", - Some("Should be overridden by whitelist"), - None, - "test", - ); - - // 添加相同 IP 到白名单 - let _ = add_to_whitelist( - "10.0.0.50", - Some("Trusted - override blacklist"), - ); - - // 验证两个列表都包含该 IP - assert!(security_db::is_ip_in_blacklist("10.0.0.50").unwrap()); - assert!(security_db::is_ip_in_whitelist("10.0.0.50").unwrap()); - - // 在实际中间件中,whitelist_priority=true 时,会先检查白名单 - // 如果在白名单中,则跳过黑名单检查 - // 这里只验证数据正确性,中间件逻辑由 ip_filter.rs 保证 - - cleanup_test_data(); - } - - // ============================================================================ - // 集成测试场景 3:临时封禁与过期 - // ============================================================================ - - /// 测试场景:临时封禁在过期后自动解除 - /// - /// 预期行为: - /// 1. 添加临时封禁(已过期) - /// 2. 查询时自动清理过期条目 - /// 3. 请求应该被允许 - #[test] - fn test_scenario_temporary_ban_expiration() { - let _ = init_db(); - cleanup_test_data(); - - // 获取当前时间戳 - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs() as i64; - - // 添加已过期的临时封禁 - let _ = add_to_blacklist( - "expired.ban.test", - Some("Temporary ban - should be expired"), - Some(now - 60), // 1分钟前过期 - "test", - ); - - // 查询时应该触发过期清理 - let is_blocked = security_db::is_ip_in_blacklist("expired.ban.test").unwrap(); - assert!(!is_blocked, "Expired ban should not block"); - - cleanup_test_data(); - } - - // ============================================================================ - // 集成测试场景 4:CIDR 范围封禁 - // ============================================================================ - - /// 测试场景:CIDR 范围封禁覆盖整个子网 - /// - /// 预期行为: - /// 1. 封禁 192.168.1.0/24 - /// 2. 192.168.1.x 的所有请求被拒绝 - /// 3. 192.168.2.x 的请求正常通过 - #[test] - fn test_scenario_cidr_subnet_blocking() { - let _ = init_db(); - cleanup_test_data(); - - // 封禁整个子网 - let _ = add_to_blacklist( - "192.168.1.0/24", - Some("Entire subnet blocked"), - None, - "test", - ); - - // 验证子网内的 IP 被阻止 - for last_octet in [1, 50, 100, 200, 254] { - let ip = format!("192.168.1.{}", last_octet); - let is_blocked = security_db::is_ip_in_blacklist(&ip).unwrap(); - assert!(is_blocked, "IP {} should be blocked by CIDR", ip); - } - - // 验证子网外的 IP 不被阻止 - for last_octet in [1, 50, 100] { - let ip = format!("192.168.2.{}", last_octet); - let is_blocked = security_db::is_ip_in_blacklist(&ip).unwrap(); - assert!(!is_blocked, "IP {} should NOT be blocked", ip); - } - - cleanup_test_data(); - } - - // ============================================================================ - // 集成测试场景 5:封禁消息详情 - // ============================================================================ - - /// 测试场景:封禁响应包含详细信息 - /// - /// 预期行为: - /// 1. 添加带原因的封禁 - /// 2. 请求被拒绝时,响应包含: - /// - 封禁原因 - /// - 是否为临时/永久封禁 - /// - 剩余封禁时间(如果是临时) - #[test] - fn test_scenario_ban_message_details() { - let _ = init_db(); - cleanup_test_data(); - - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs() as i64; - - // 添加临时封禁(2小时后过期) - let _ = add_to_blacklist( - "temp.ban.message", - Some("Rate limit exceeded"), - Some(now + 7200), // 2小时后 - "rate_limiter", - ); - - // 获取封禁详情 - let entry = security_db::get_blacklist_entry_for_ip("temp.ban.message") - .unwrap() - .unwrap(); - - assert_eq!(entry.reason.as_deref(), Some("Rate limit exceeded")); - assert!(entry.expires_at.is_some()); - - let remaining = entry.expires_at.unwrap() - now; - assert!(remaining > 0 && remaining <= 7200, "Should have ~2h remaining"); - - cleanup_test_data(); - } - - // ============================================================================ - // 集成测试场景 6:访问日志记录 - // ============================================================================ - - /// 测试场景:被阻止的请求记录到日志 - /// - /// 预期行为: - /// 1. 黑名单 IP 发起请求 - /// 2. 请求被拒绝 - /// 3. 访问日志记录:IP、时间、状态(403)、封禁原因 - #[test] - fn test_scenario_blocked_request_logging() { - let _ = init_db(); - cleanup_test_data(); - - // 模拟保存被阻止的访问日志 - let log = security_db::IpAccessLog { - id: uuid::Uuid::new_v4().to_string(), - client_ip: "blocked.request.test".to_string(), - timestamp: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs() as i64, - method: Some("POST".to_string()), - path: Some("/v1/messages".to_string()), - user_agent: Some("TestClient/1.0".to_string()), - status: Some(403), - duration: Some(0), - api_key_hash: None, - blocked: true, - block_reason: Some("IP in blacklist".to_string()), - username: None, - }; - - let save_result = security_db::save_ip_access_log(&log); - assert!(save_result.is_ok()); - - // 验证日志可以检索 - let logs = security_db::get_ip_access_logs(10, 0, None, true).unwrap(); - let found = logs.iter().any(|l| l.client_ip == "blocked.request.test"); - assert!(found, "Blocked request should be logged"); - - let _ = security_db::clear_ip_access_logs(); - } - - // ============================================================================ - // 集成测试场景 7:不影响正常请求性能 - // ============================================================================ - - /// 测试场景:安全检查不显著影响正常请求性能 - /// - /// 预期行为: - /// 1. 黑名单/白名单检查时间 < 5ms - /// 2. 与没有安全检查的基线相比,延迟增加 < 10ms - #[test] - fn test_scenario_performance_impact() { - let _ = init_db(); - cleanup_test_data(); - - // 添加一些黑名单条目 - for i in 0..50 { - let _ = add_to_blacklist(&format!("perf.test.{}", i), None, None, "test"); - } - - // 添加一些 CIDR 规则 - for i in 0..10 { - let _ = add_to_blacklist(&format!("172.{}.0.0/16", i), None, None, "test"); - } - - // 测试查找性能 - let start = std::time::Instant::now(); - let iterations = 100; - - for _ in 0..iterations { - // 模拟正常请求的安全检查 - let _ = security_db::is_ip_in_whitelist("10.0.0.1"); - let _ = security_db::is_ip_in_blacklist("10.0.0.1"); - } - - let duration = start.elapsed(); - let avg_per_check = duration / (iterations * 2); - - println!("Average security check time: {:?}", avg_per_check); - - // 断言:平均每次检查应该在 5ms 以内 - assert!( - avg_per_check < Duration::from_millis(5), - "Security check should be fast" - ); - - cleanup_test_data(); - } - - // ============================================================================ - // 集成测试场景 8:数据持久化 - // ============================================================================ - - /// 测试场景:黑名单/白名单数据持久化 - /// - /// 预期行为: - /// 1. 添加数据后重新初始化数据库连接 - /// 2. 数据仍然存在 - #[test] - fn test_scenario_data_persistence() { - let _ = init_db(); - cleanup_test_data(); - - // 添加数据 - let _ = add_to_blacklist("persist.test.ip", Some("Persistence test"), None, "test"); - let _ = add_to_whitelist("persist.white.ip", Some("Persistence test")); - - // 重新初始化(实际上只是验证数据仍然可读) - let _ = init_db(); - - // 验证数据仍然存在 - assert!(security_db::is_ip_in_blacklist("persist.test.ip").unwrap()); - assert!(security_db::is_ip_in_whitelist("persist.white.ip").unwrap()); - - cleanup_test_data(); - } -} - -// ============================================================================ -// 压力测试 -// ============================================================================ - -#[cfg(test)] -mod stress_tests { - use crate::modules::security_db::{ - init_db, add_to_blacklist, remove_from_blacklist, - is_ip_in_blacklist, get_blacklist, save_ip_access_log, - IpAccessLog, clear_ip_access_logs, - }; - use std::thread; - use std::time::{Duration, Instant}; - - /// 辅助函数:清理测试环境 - fn cleanup_test_data() { - if let Ok(entries) = get_blacklist() { - for entry in entries { - let _ = remove_from_blacklist(&entry.id); - } - } - let _ = clear_ip_access_logs(); - } - - /// 压力测试:大量黑名单条目 - #[test] - fn stress_test_large_blacklist() { - let _ = init_db(); - cleanup_test_data(); - - let count = 500; - - // 批量添加 - let start = Instant::now(); - for i in 0..count { - let _ = add_to_blacklist(&format!("stress.{}.{}.{}.{}", i/256, (i/16)%16, i%16, i), None, None, "stress"); - } - let add_duration = start.elapsed(); - println!("Added {} entries in {:?}", count, add_duration); - - // 随机查找测试 - let start = Instant::now(); - for i in 0..100 { - let _ = is_ip_in_blacklist(&format!("stress.{}.{}.{}.{}", i/256, (i/16)%16, i%16, i)); - } - let lookup_duration = start.elapsed(); - println!("100 lookups in large blacklist took {:?}", lookup_duration); - - // 验证性能合理 - assert!( - lookup_duration < Duration::from_secs(1), - "Lookups should be reasonably fast even with large blacklist" - ); - - cleanup_test_data(); - } - - /// 压力测试:大量访问日志 - #[test] - fn stress_test_access_logging() { - let _ = init_db(); - let _ = clear_ip_access_logs(); - - let count = 1000; - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs() as i64; - - // 批量写入日志 - let start = Instant::now(); - for i in 0..count { - let log = IpAccessLog { - id: uuid::Uuid::new_v4().to_string(), - client_ip: format!("log.stress.{}", i % 100), - timestamp: now, - method: Some("POST".to_string()), - path: Some("/v1/messages".to_string()), - user_agent: Some("StressTest/1.0".to_string()), - status: Some(200), - duration: Some(100), - api_key_hash: Some("hash".to_string()), - blocked: false, - block_reason: None, - username: None, - }; - let _ = save_ip_access_log(&log); - } - let write_duration = start.elapsed(); - println!("Wrote {} access logs in {:?}", count, write_duration); - - // 验证写入性能合理 - assert!( - write_duration < Duration::from_secs(10), - "Access log writing should be reasonably fast" - ); - - let _ = clear_ip_access_logs(); - } - - /// 压力测试:并发操作 - #[test] - fn stress_test_concurrent_operations() { - let _ = init_db(); - cleanup_test_data(); - - let thread_count = 5; - let ops_per_thread = 20; - - let handles: Vec<_> = (0..thread_count) - .map(|t| { - thread::spawn(move || { - for i in 0..ops_per_thread { - // 每个线程添加-查询-删除 - let ip = format!("concurrent.{}.{}", t, i); - if let Ok(entry) = add_to_blacklist(&ip, None, None, "concurrent") { - let _ = is_ip_in_blacklist(&ip); - let _ = remove_from_blacklist(&entry.id); - } - } - }) - }) - .collect(); - - // 等待所有线程完成 - for handle in handles { - handle.join().expect("Thread should not panic"); - } - - // 验证没有遗留数据 - let remaining = get_blacklist().unwrap(); - let concurrent_remaining: Vec<_> = remaining - .iter() - .filter(|e| e.ip_pattern.starts_with("concurrent.")) - .collect(); - - assert!( - concurrent_remaining.is_empty(), - "All concurrent test data should be cleaned up" - ); - - cleanup_test_data(); - } -} diff --git a/src-tauri/src/proxy/tests/security_ip_tests.rs b/src-tauri/src/proxy/tests/security_ip_tests.rs deleted file mode 100644 index 4e44109f3..000000000 --- a/src-tauri/src/proxy/tests/security_ip_tests.rs +++ /dev/null @@ -1,746 +0,0 @@ -//! IP Security Module Tests -//! IP 安全监控功能的综合测试套件 -//! -//! 测试目标: -//! 1. 验证 IP 黑/白名单功能的正确性 -//! 2. 验证 CIDR 匹配逻辑 -//! 3. 验证过期时间处理 -//! 4. 验证不影响主流程性能 -//! 5. 验证数据库操作的原子性和一致性 - -#[cfg(test)] -mod security_db_tests { - use crate::modules::security_db::{ - self, IpAccessLog, IpBlacklistEntry, IpWhitelistEntry, - init_db, add_to_blacklist, remove_from_blacklist, get_blacklist, - is_ip_in_blacklist, get_blacklist_entry_for_ip, - add_to_whitelist, remove_from_whitelist, get_whitelist, - is_ip_in_whitelist, save_ip_access_log, get_ip_access_logs, - get_ip_stats, cleanup_old_ip_logs, clear_ip_access_logs, - }; - use std::time::{SystemTime, UNIX_EPOCH}; - - /// 辅助函数:获取当前时间戳 - fn now_timestamp() -> i64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs() as i64 - } - - /// 辅助函数:清理测试环境 - fn cleanup_test_data() { - // 清理黑名单 - if let Ok(entries) = get_blacklist() { - for entry in entries { - let _ = remove_from_blacklist(&entry.id); - } - } - // 清理白名单 - if let Ok(entries) = get_whitelist() { - for entry in entries { - let _ = remove_from_whitelist(&entry.id); - } - } - // 清理访问日志 - let _ = clear_ip_access_logs(); - } - - // ============================================================================ - // 测试类别 1: 数据库初始化 - // ============================================================================ - - #[test] - fn test_db_initialization() { - // 验证数据库初始化不会 panic - let result = init_db(); - assert!(result.is_ok(), "Database initialization should succeed: {:?}", result.err()); - } - - #[test] - fn test_db_multiple_initializations() { - // 验证多次初始化不会出错 (幂等性) - for _ in 0..3 { - let result = init_db(); - assert!(result.is_ok(), "Multiple DB initializations should be idempotent"); - } - } - - // ============================================================================ - // 测试类别 2: IP 黑名单基本操作 - // ============================================================================ - - #[test] - fn test_blacklist_add_and_check() { - let _ = init_db(); - cleanup_test_data(); - - // 添加 IP 到黑名单 - let result = add_to_blacklist("192.168.1.100", Some("Test block"), None, "test"); - assert!(result.is_ok(), "Should add IP to blacklist: {:?}", result.err()); - - // 验证 IP 在黑名单中 - let is_blocked = is_ip_in_blacklist("192.168.1.100"); - assert!(is_blocked.is_ok()); - assert!(is_blocked.unwrap(), "IP should be in blacklist"); - - // 验证其他 IP 不在黑名单中 - let is_other_blocked = is_ip_in_blacklist("192.168.1.101"); - assert!(is_other_blocked.is_ok()); - assert!(!is_other_blocked.unwrap(), "Other IP should not be in blacklist"); - - cleanup_test_data(); - } - - #[test] - fn test_blacklist_remove() { - let _ = init_db(); - cleanup_test_data(); - - // 添加 IP - let entry = add_to_blacklist("10.0.0.5", Some("Temp block"), None, "test").unwrap(); - - // 验证存在 - assert!(is_ip_in_blacklist("10.0.0.5").unwrap()); - - // 移除 - let remove_result = remove_from_blacklist(&entry.id); - assert!(remove_result.is_ok()); - - // 验证已移除 - assert!(!is_ip_in_blacklist("10.0.0.5").unwrap()); - - cleanup_test_data(); - } - - #[test] - fn test_blacklist_get_entry_details() { - let _ = init_db(); - cleanup_test_data(); - - // 添加带有详细信息的条目 - let _ = add_to_blacklist( - "172.16.0.50", - Some("Abuse detected"), - Some(now_timestamp() + 3600), // 1小时后过期 - "admin", - ); - - // 获取条目详情 - let entry_result = get_blacklist_entry_for_ip("172.16.0.50"); - assert!(entry_result.is_ok()); - - let entry = entry_result.unwrap(); - assert!(entry.is_some()); - - let entry = entry.unwrap(); - assert_eq!(entry.ip_pattern, "172.16.0.50"); - assert_eq!(entry.reason.as_deref(), Some("Abuse detected")); - assert_eq!(entry.created_by, "admin"); - assert!(entry.expires_at.is_some()); - - cleanup_test_data(); - } - - // ============================================================================ - // 测试类别 3: CIDR 匹配 - // ============================================================================ - - #[test] - fn test_cidr_matching_basic() { - let _ = init_db(); - cleanup_test_data(); - - // 添加 CIDR 范围到黑名单 - let _ = add_to_blacklist("192.168.1.0/24", Some("Block subnet"), None, "test"); - - // 验证该子网内的 IP 都被阻止 - assert!(is_ip_in_blacklist("192.168.1.1").unwrap(), "192.168.1.1 should match /24"); - assert!(is_ip_in_blacklist("192.168.1.100").unwrap(), "192.168.1.100 should match /24"); - assert!(is_ip_in_blacklist("192.168.1.254").unwrap(), "192.168.1.254 should match /24"); - - // 验证子网外的 IP 不被阻止 - assert!(!is_ip_in_blacklist("192.168.2.1").unwrap(), "192.168.2.1 should not match"); - assert!(!is_ip_in_blacklist("10.0.0.1").unwrap(), "10.0.0.1 should not match"); - - cleanup_test_data(); - } - - #[test] - fn test_cidr_matching_various_masks() { - let _ = init_db(); - cleanup_test_data(); - - // 测试 /16 掩码 - let _ = add_to_blacklist("10.10.0.0/16", Some("Block /16"), None, "test"); - - assert!(is_ip_in_blacklist("10.10.0.1").unwrap(), "Should match /16"); - assert!(is_ip_in_blacklist("10.10.255.255").unwrap(), "Should match /16"); - assert!(!is_ip_in_blacklist("10.11.0.1").unwrap(), "Should not match /16"); - - cleanup_test_data(); - - // 测试 /32 掩码 (单个 IP) - let _ = add_to_blacklist("8.8.8.8/32", Some("Block single"), None, "test"); - - assert!(is_ip_in_blacklist("8.8.8.8").unwrap(), "Should match /32"); - assert!(!is_ip_in_blacklist("8.8.8.9").unwrap(), "Should not match /32"); - - cleanup_test_data(); - } - - #[test] - fn test_cidr_edge_cases() { - let _ = init_db(); - cleanup_test_data(); - - // 测试 /0 (所有 IP) - 边界情况 - let _ = add_to_blacklist("0.0.0.0/0", Some("Block all"), None, "test"); - - assert!(is_ip_in_blacklist("1.2.3.4").unwrap(), "Everything should match /0"); - assert!(is_ip_in_blacklist("255.255.255.255").unwrap(), "Everything should match /0"); - - cleanup_test_data(); - - // 测试 /8 掩码 - let _ = add_to_blacklist("10.0.0.0/8", Some("Block /8"), None, "test"); - - assert!(is_ip_in_blacklist("10.255.255.255").unwrap(), "Should match /8"); - assert!(!is_ip_in_blacklist("11.0.0.0").unwrap(), "Should not match /8"); - - cleanup_test_data(); - } - - // ============================================================================ - // 测试类别 4: 过期时间处理 - // ============================================================================ - - #[test] - fn test_blacklist_expiration() { - let _ = init_db(); - cleanup_test_data(); - - // 添加一个已过期的条目 - let _ = add_to_blacklist( - "expired.test.ip", - Some("Already expired"), - Some(now_timestamp() - 60), // 1分钟前过期 - "test", - ); - - // 过期条目应该被自动清理 - let is_blocked = is_ip_in_blacklist("expired.test.ip"); - // 注意:取决于实现,过期条目可能在查询时被清理 - // 根据 security_db.rs 的实现,get_blacklist_entry_for_ip 会先清理过期条目 - assert!(!is_blocked.unwrap(), "Expired entry should be cleaned up"); - - cleanup_test_data(); - } - - #[test] - fn test_blacklist_not_yet_expired() { - let _ = init_db(); - cleanup_test_data(); - - // 添加一个未过期的条目 - let _ = add_to_blacklist( - "not.expired.ip", - Some("Will expire later"), - Some(now_timestamp() + 3600), // 1小时后过期 - "test", - ); - - // 未过期条目应该仍然生效 - assert!(is_ip_in_blacklist("not.expired.ip").unwrap()); - - cleanup_test_data(); - } - - #[test] - fn test_permanent_blacklist() { - let _ = init_db(); - cleanup_test_data(); - - // 添加永久封禁 (无过期时间) - let _ = add_to_blacklist( - "permanent.block.ip", - Some("Permanent ban"), - None, // 无过期时间 - "test", - ); - - // 永久封禁应该始终生效 - assert!(is_ip_in_blacklist("permanent.block.ip").unwrap()); - - cleanup_test_data(); - } - - // ============================================================================ - // 测试类别 5: IP 白名单 - // ============================================================================ - - #[test] - fn test_whitelist_add_and_check() { - let _ = init_db(); - cleanup_test_data(); - - // 添加 IP 到白名单 - let result = add_to_whitelist("10.0.0.1", Some("Trusted server")); - assert!(result.is_ok()); - - // 验证 IP 在白名单中 - assert!(is_ip_in_whitelist("10.0.0.1").unwrap()); - assert!(!is_ip_in_whitelist("10.0.0.2").unwrap()); - - cleanup_test_data(); - } - - #[test] - fn test_whitelist_cidr() { - let _ = init_db(); - cleanup_test_data(); - - // 添加 CIDR 范围到白名单 - let _ = add_to_whitelist("192.168.0.0/16", Some("Internal network")); - - // 验证子网内的 IP 都被允许 - assert!(is_ip_in_whitelist("192.168.1.1").unwrap()); - assert!(is_ip_in_whitelist("192.168.255.255").unwrap()); - - // 验证子网外的 IP 不在白名单 - assert!(!is_ip_in_whitelist("10.0.0.1").unwrap()); - - cleanup_test_data(); - } - - // ============================================================================ - // 测试类别 6: IP 访问日志 - // ============================================================================ - - #[test] - fn test_access_log_save_and_retrieve() { - let _ = init_db(); - cleanup_test_data(); - - // 保存访问日志 - let log = IpAccessLog { - id: uuid::Uuid::new_v4().to_string(), - client_ip: "test.log.ip".to_string(), - timestamp: now_timestamp(), - method: Some("POST".to_string()), - path: Some("/v1/messages".to_string()), - user_agent: Some("TestClient/1.0".to_string()), - status: Some(200), - duration: Some(150), - api_key_hash: Some("hash123".to_string()), - blocked: false, - block_reason: None, - username: None, - }; - - let save_result = save_ip_access_log(&log); - assert!(save_result.is_ok(), "Should save access log: {:?}", save_result.err()); - - // 检索日志 - let logs = get_ip_access_logs(10, 0, Some("test.log.ip"), false); - assert!(logs.is_ok()); - - let logs = logs.unwrap(); - assert!(!logs.is_empty(), "Should retrieve saved log"); - assert_eq!(logs[0].client_ip, "test.log.ip"); - - cleanup_test_data(); - } - - #[test] - fn test_access_log_blocked_filter() { - let _ = init_db(); - cleanup_test_data(); - - // 保存正常日志 - let normal_log = IpAccessLog { - id: uuid::Uuid::new_v4().to_string(), - client_ip: "normal.access.ip".to_string(), - timestamp: now_timestamp(), - method: Some("GET".to_string()), - path: Some("/healthz".to_string()), - user_agent: None, - status: Some(200), - duration: Some(10), - api_key_hash: None, - blocked: false, - block_reason: None, - username: None, - }; - let _ = save_ip_access_log(&normal_log); - - // 保存被阻止的日志 - let blocked_log = IpAccessLog { - id: uuid::Uuid::new_v4().to_string(), - client_ip: "blocked.access.ip".to_string(), - timestamp: now_timestamp(), - method: Some("POST".to_string()), - path: Some("/v1/messages".to_string()), - user_agent: None, - status: Some(403), - duration: Some(0), - api_key_hash: None, - blocked: true, - block_reason: Some("IP in blacklist".to_string()), - username: None, - }; - let _ = save_ip_access_log(&blocked_log); - - // 只检索被阻止的日志 - let blocked_only = get_ip_access_logs(10, 0, None, true).unwrap(); - assert_eq!(blocked_only.len(), 1); - assert_eq!(blocked_only[0].client_ip, "blocked.access.ip"); - assert!(blocked_only[0].blocked); - - cleanup_test_data(); - } - - // ============================================================================ - // 测试类别 7: 统计功能 - // ============================================================================ - - #[test] - fn test_ip_stats() { - let _ = init_db(); - cleanup_test_data(); - - // 添加一些测试数据 - for i in 0..5 { - let log = IpAccessLog { - id: uuid::Uuid::new_v4().to_string(), - client_ip: format!("stats.test.{}", i % 3), // 3 个唯一 IP - timestamp: now_timestamp(), - method: Some("POST".to_string()), - path: Some("/v1/messages".to_string()), - user_agent: None, - status: Some(200), - duration: Some(100), - api_key_hash: None, - blocked: i == 4, // 最后一个被阻止 - block_reason: if i == 4 { Some("Test".to_string()) } else { None }, - username: None, - }; - let _ = save_ip_access_log(&log); - } - - // 添加黑名单和白名单条目 - let _ = add_to_blacklist("stats.black.1", None, None, "test"); - let _ = add_to_blacklist("stats.black.2", None, None, "test"); - let _ = add_to_whitelist("stats.white.1", None); - - // 获取统计 - let stats = get_ip_stats(); - assert!(stats.is_ok()); - - let stats = stats.unwrap(); - assert!(stats.total_requests >= 5, "Should have at least 5 requests"); - assert!(stats.unique_ips >= 3, "Should have at least 3 unique IPs"); - assert!(stats.blocked_count >= 1, "Should have at least 1 blocked request"); - assert_eq!(stats.blacklist_count, 2); - assert_eq!(stats.whitelist_count, 1); - - cleanup_test_data(); - } - - // ============================================================================ - // 测试类别 8: 清理功能 - // ============================================================================ - - #[test] - fn test_cleanup_old_logs() { - let _ = init_db(); - cleanup_test_data(); - - // 添加一条 "旧" 日志 (模拟 2 天前) - let old_log = IpAccessLog { - id: uuid::Uuid::new_v4().to_string(), - client_ip: "old.log.ip".to_string(), - timestamp: now_timestamp() - (2 * 24 * 3600), // 2 天前 - method: Some("GET".to_string()), - path: Some("/old".to_string()), - user_agent: None, - status: Some(200), - duration: Some(10), - api_key_hash: None, - blocked: false, - block_reason: None, - username: None, - }; - let _ = save_ip_access_log(&old_log); - - // 添加一条新日志 - let new_log = IpAccessLog { - id: uuid::Uuid::new_v4().to_string(), - client_ip: "new.log.ip".to_string(), - timestamp: now_timestamp(), - method: Some("GET".to_string()), - path: Some("/new".to_string()), - user_agent: None, - status: Some(200), - duration: Some(10), - api_key_hash: None, - blocked: false, - block_reason: None, - username: None, - }; - let _ = save_ip_access_log(&new_log); - - // 清理 1 天前的日志 - let deleted = cleanup_old_ip_logs(1); - assert!(deleted.is_ok()); - assert!(deleted.unwrap() >= 1, "Should delete at least 1 old log"); - - // 验证新日志仍然存在 - let logs = get_ip_access_logs(10, 0, Some("new.log.ip"), false).unwrap(); - assert!(!logs.is_empty(), "New log should still exist"); - - // 验证旧日志已被清理 - let old_logs = get_ip_access_logs(10, 0, Some("old.log.ip"), false).unwrap(); - assert!(old_logs.is_empty(), "Old log should be cleaned up"); - - cleanup_test_data(); - } - - // ============================================================================ - // 测试类别 9: 并发安全性 - // ============================================================================ - - #[test] - fn test_concurrent_access() { - use std::thread; - - let _ = init_db(); - cleanup_test_data(); - - let handles: Vec<_> = (0..10) - .map(|i| { - thread::spawn(move || { - // 每个线程添加不同的 IP - let ip = format!("concurrent.test.{}", i); - let _ = add_to_blacklist(&ip, Some("Concurrent test"), None, "test"); - - // 验证自己添加的 IP - is_ip_in_blacklist(&ip).unwrap_or(false) - }) - }) - .collect(); - - let results: Vec = handles.into_iter().map(|h| h.join().unwrap()).collect(); - - // 所有线程都应该成功 - assert!(results.iter().all(|&r| r), "All concurrent adds should succeed"); - - cleanup_test_data(); - } - - // ============================================================================ - // 测试类别 10: 边界情况和错误处理 - // ============================================================================ - - #[test] - fn test_duplicate_blacklist_entry() { - let _ = init_db(); - cleanup_test_data(); - - // 第一次添加应该成功 - let result1 = add_to_blacklist("duplicate.test.ip", Some("First"), None, "test"); - assert!(result1.is_ok()); - - // 第二次添加相同 IP 应该失败 (UNIQUE constraint) - let result2 = add_to_blacklist("duplicate.test.ip", Some("Second"), None, "test"); - assert!(result2.is_err(), "Duplicate IP should fail"); - - cleanup_test_data(); - } - - #[test] - fn test_empty_ip_pattern() { - let _ = init_db(); - cleanup_test_data(); - - // 空 IP 模式应该仍然可以添加 (取决于业务需求) - // 这里只测试不会 panic - let result = add_to_blacklist("", Some("Empty IP"), None, "test"); - // 结果可能成功或失败,但不应该 panic - let _ = result; - - cleanup_test_data(); - } - - #[test] - fn test_special_characters_in_reason() { - let _ = init_db(); - cleanup_test_data(); - - // 测试包含特殊字符的原因 - let reason = "Test with 'quotes' and \"double quotes\" and emoji 🚫"; - let result = add_to_blacklist("special.char.test", Some(reason), None, "test"); - assert!(result.is_ok()); - - let entry = get_blacklist_entry_for_ip("special.char.test").unwrap().unwrap(); - assert_eq!(entry.reason.as_deref(), Some(reason)); - - cleanup_test_data(); - } - - #[test] - fn test_hit_count_increment() { - let _ = init_db(); - cleanup_test_data(); - - // 添加一个黑名单条目 - let _ = add_to_blacklist("hit.count.test", Some("Count test"), None, "test"); - - // 多次查询应该增加 hit_count - for _ in 0..5 { - let _ = get_blacklist_entry_for_ip("hit.count.test"); - } - - // 检查 hit_count - let blacklist = get_blacklist().unwrap(); - let entry = blacklist.iter().find(|e| e.ip_pattern == "hit.count.test"); - assert!(entry.is_some()); - assert!(entry.unwrap().hit_count >= 5, "Hit count should be at least 5"); - - cleanup_test_data(); - } -} - -// ============================================================================ -// IP Filter 中间件测试 (单元测试) -// ============================================================================ - -#[cfg(test)] -mod ip_filter_middleware_tests { - // 注意:中间件测试需要模拟 HTTP 请求,这里提供测试框架 - // 实际的集成测试应该在启动完整服务后进行 - - /// 验证 IP 提取逻辑的正确性 - #[test] - fn test_ip_extraction_priority() { - // X-Forwarded-For 应该优先于 X-Real-IP - // X-Real-IP 应该优先于 ConnectInfo - // 这里只验证逻辑概念,实际测试需要构造 HTTP 请求 - - // 场景 1: X-Forwarded-For 有多个 IP,取第一个 - let xff_header = "203.0.113.1, 198.51.100.2, 192.0.2.3"; - let first_ip = xff_header.split(',').next().unwrap().trim(); - assert_eq!(first_ip, "203.0.113.1"); - - // 场景 2: 单个 IP - let single_ip = "10.0.0.1"; - let parsed = single_ip.split(',').next().unwrap().trim(); - assert_eq!(parsed, "10.0.0.1"); - } -} - -// ============================================================================ -// 性能基准测试 -// ============================================================================ - -#[cfg(test)] -mod performance_benchmarks { - use super::security_db_tests::*; - use crate::modules::security_db::{ - init_db, add_to_blacklist, is_ip_in_blacklist, get_blacklist, - clear_ip_access_logs, - }; - use std::time::Instant; - - /// 基准测试:黑名单查找性能 - #[test] - fn benchmark_blacklist_lookup() { - let _ = init_db(); - - // 清理并添加 100 个黑名单条目 - if let Ok(entries) = get_blacklist() { - for entry in entries { - let _ = crate::modules::security_db::remove_from_blacklist(&entry.id); - } - } - - for i in 0..100 { - let _ = add_to_blacklist( - &format!("bench.ip.{}", i), - Some("Benchmark"), - None, - "test", - ); - } - - // 执行 1000 次查找 - let start = Instant::now(); - for _ in 0..1000 { - let _ = is_ip_in_blacklist("bench.ip.50"); - } - let duration = start.elapsed(); - - println!("1000 blacklist lookups took: {:?}", duration); - println!("Average per lookup: {:?}", duration / 1000); - - // 性能断言:平均查找应该在 1ms 以内 - assert!( - duration.as_millis() < 5000, - "Blacklist lookup should be fast (< 5ms avg)" - ); - - // 清理 - if let Ok(entries) = get_blacklist() { - for entry in entries { - let _ = crate::modules::security_db::remove_from_blacklist(&entry.id); - } - } - } - - /// 基准测试:CIDR 匹配性能 - #[test] - fn benchmark_cidr_matching() { - let _ = init_db(); - - // 清理并添加 CIDR 规则 - if let Ok(entries) = get_blacklist() { - for entry in entries { - let _ = crate::modules::security_db::remove_from_blacklist(&entry.id); - } - } - - // 添加 20 个 CIDR 规则 - for i in 0..20 { - let _ = add_to_blacklist( - &format!("10.{}.0.0/16", i), - Some("CIDR Benchmark"), - None, - "test", - ); - } - - // 测试 CIDR 匹配性能 - let start = Instant::now(); - for _ in 0..1000 { - // 测试需要遍历 CIDR 的 IP - let _ = is_ip_in_blacklist("10.5.100.50"); - } - let duration = start.elapsed(); - - println!("1000 CIDR matches took: {:?}", duration); - println!("Average per match: {:?}", duration / 1000); - - // 性能断言:CIDR 匹配应该在合理时间内 - assert!( - duration.as_millis() < 5000, - "CIDR matching should be reasonably fast" - ); - - // 清理 - if let Ok(entries) = get_blacklist() { - for entry in entries { - let _ = crate::modules::security_db::remove_from_blacklist(&entry.id); - } - } - } -} diff --git a/src-tauri/src/proxy/token_manager.rs b/src-tauri/src/proxy/token_manager.rs deleted file mode 100644 index 0ec118b6f..000000000 --- a/src-tauri/src/proxy/token_manager.rs +++ /dev/null @@ -1,3028 +0,0 @@ -// 移除冗余的顶层导入,因为这些在代码中已由 full path 或局部导入处理 -use dashmap::DashMap; -use std::collections::{HashSet, HashMap}; -use std::path::PathBuf; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use tokio_util::sync::CancellationToken; - -use crate::proxy::rate_limit::RateLimitTracker; -use crate::proxy::sticky_config::StickySessionConfig; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum OnDiskAccountState { - Enabled, - Disabled, - Unknown, -} - -#[derive(Debug, Clone)] -pub struct ProxyToken { - pub account_id: String, - pub access_token: String, - pub refresh_token: String, - pub expires_in: i64, - pub timestamp: i64, - pub email: String, - pub account_path: PathBuf, // 账号文件路径,用于更新 - pub project_id: Option, - pub subscription_tier: Option, // "FREE" | "PRO" | "ULTRA" - pub remaining_quota: Option, // [FIX #563] Remaining quota for priority sorting - pub protected_models: HashSet, // [NEW #621] - pub health_score: f32, // [NEW] 健康分数 (0.0 - 1.0) - pub reset_time: Option, // [NEW] 配额刷新时间戳(用于排序优化) - pub validation_blocked: bool, // [NEW] Check for validation block (VALIDATION_REQUIRED temporary block) - pub validation_blocked_until: i64, // [NEW] Timestamp until which the account is blocked - pub model_quotas: HashMap, // [OPTIMIZATION] In-memory cache for model-specific quotas -} - -pub struct TokenManager { - tokens: Arc>, // account_id -> ProxyToken - current_index: Arc, - last_used_account: Arc>>, - data_dir: PathBuf, - rate_limit_tracker: Arc, // 新增: 限流跟踪器 - sticky_config: Arc>, // 新增:调度配置 - session_accounts: Arc>, // 新增:会话与账号映射 (SessionID -> AccountID) - preferred_account_id: Arc>>, // [FIX #820] 优先使用的账号ID(固定账号模式) - health_scores: Arc>, // account_id -> health_score - circuit_breaker_config: Arc>, // [NEW] 熔断配置缓存 - /// 支持优雅关闭时主动 abort 后台任务 - auto_cleanup_handle: Arc>>>, - cancel_token: CancellationToken, -} - -impl TokenManager { - /// 创建新的 TokenManager - pub fn new(data_dir: PathBuf) -> Self { - Self { - tokens: Arc::new(DashMap::new()), - current_index: Arc::new(AtomicUsize::new(0)), - last_used_account: Arc::new(tokio::sync::Mutex::new(None)), - data_dir, - rate_limit_tracker: Arc::new(RateLimitTracker::new()), - sticky_config: Arc::new(tokio::sync::RwLock::new(StickySessionConfig::default())), - session_accounts: Arc::new(DashMap::new()), - preferred_account_id: Arc::new(tokio::sync::RwLock::new(None)), // [FIX #820] - health_scores: Arc::new(DashMap::new()), - circuit_breaker_config: Arc::new(tokio::sync::RwLock::new( - crate::models::CircuitBreakerConfig::default(), - )), - auto_cleanup_handle: Arc::new(tokio::sync::Mutex::new(None)), - cancel_token: CancellationToken::new(), - } - } - - /// 启动限流记录自动清理后台任务(每15秒检查并清除过期记录) - pub async fn start_auto_cleanup(&self) { - let tracker = self.rate_limit_tracker.clone(); - let cancel = self.cancel_token.child_token(); - - let handle = tokio::spawn(async move { - let mut interval = tokio::time::interval(std::time::Duration::from_secs(15)); - loop { - tokio::select! { - _ = cancel.cancelled() => { - tracing::info!("Auto-cleanup task received cancel signal"); - break; - } - _ = interval.tick() => { - let cleaned = tracker.cleanup_expired(); - if cleaned > 0 { - tracing::info!( - "Auto-cleanup: Removed {} expired rate limit record(s)", - cleaned - ); - } - } - } - } - }); - - // 先 abort 旧任务(防止任务泄漏),再存储新 handle - let mut guard = self.auto_cleanup_handle.lock().await; - if let Some(old) = guard.take() { - old.abort(); - tracing::warn!("Aborted previous auto-cleanup task"); - } - *guard = Some(handle); - - tracing::info!("Rate limit auto-cleanup task started (interval: 15s)"); - } - - /// 从主应用账号目录加载所有账号 - pub async fn load_accounts(&self) -> Result { - let accounts_dir = self.data_dir.join("accounts"); - - if !accounts_dir.exists() { - return Err(format!("账号目录不存在: {:?}", accounts_dir)); - } - - // Reload should reflect current on-disk state (accounts can be added/removed/disabled). - self.tokens.clear(); - self.current_index.store(0, Ordering::SeqCst); - { - let mut last_used = self.last_used_account.lock().await; - *last_used = None; - } - - let entries = std::fs::read_dir(&accounts_dir) - .map_err(|e| format!("读取账号目录失败: {}", e))?; - - let mut count = 0; - - for entry in entries { - let entry = entry.map_err(|e| format!("读取目录项失败: {}", e))?; - let path = entry.path(); - - if path.extension().and_then(|s| s.to_str()) != Some("json") { - continue; - } - - // 尝试加载账号 - match self.load_single_account(&path).await { - Ok(Some(token)) => { - let account_id = token.account_id.clone(); - self.tokens.insert(account_id, token); - count += 1; - } - Ok(None) => { - // 跳过无效账号 - } - Err(e) => { - tracing::debug!("加载账号失败 {:?}: {}", path, e); - } - } - } - - Ok(count) - } - - /// 重新加载指定账号(用于配额更新后的实时同步) - pub async fn reload_account(&self, account_id: &str) -> Result<(), String> { - let path = self - .data_dir - .join("accounts") - .join(format!("{}.json", account_id)); - if !path.exists() { - return Err(format!("账号文件不存在: {:?}", path)); - } - - match self.load_single_account(&path).await { - Ok(Some(token)) => { - self.tokens.insert(account_id.to_string(), token); - // [NEW] 重新加载账号时自动清除该账号的限流记录 - self.clear_rate_limit(account_id); - Ok(()) - } - Ok(None) => { - // [FIX] 账号被禁用或不可用时,从内存池中彻底移除 (Issue #1565) - // load_single_account returning None means the account should be skipped in its - // current state (disabled / proxy_disabled / quota_protection / validation_blocked...). - self.remove_account(account_id); - Ok(()) - } - Err(e) => Err(format!("同步账号失败: {}", e)), - } - } - - /// 重新加载所有账号 - pub async fn reload_all_accounts(&self) -> Result { - let count = self.load_accounts().await?; - // [NEW] 重新加载所有账号时自动清除所有限流记录 - self.clear_all_rate_limits(); - Ok(count) - } - - /// 从内存中彻底移除指定账号及其关联数据 (Issue #1477) - pub fn remove_account(&self, account_id: &str) { - // 1. 从 DashMap 中移除令牌 - if self.tokens.remove(account_id).is_some() { - tracing::info!("[Proxy] Removed account {} from memory cache", account_id); - } - - // 2. 清理相关的健康分数 - self.health_scores.remove(account_id); - - // 3. 清理该账号的所有限流记录 - self.clear_rate_limit(account_id); - - // 4. 清理涉及该账号的所有会话绑定 - self.session_accounts.retain(|_, v| v != account_id); - - // 5. 如果是当前优先账号,也需要清理 - if let Ok(mut preferred) = self.preferred_account_id.try_write() { - if preferred.as_deref() == Some(account_id) { - *preferred = None; - tracing::info!("[Proxy] Cleared preferred account status for {}", account_id); - } - } - } - - /// Check if an account has been disabled on disk. - /// - /// Safety net: avoids selecting a disabled account when the in-memory pool hasn't been - /// reloaded yet (e.g. fixed account mode / sticky session). - /// - /// Note: this is intentionally tolerant to transient read/parse failures (e.g. concurrent - /// writes). Failures are reported as `Unknown` so callers can skip without purging the in-memory - /// token pool. - async fn get_account_state_on_disk(account_path: &std::path::PathBuf) -> OnDiskAccountState { - const MAX_RETRIES: usize = 2; - const RETRY_DELAY_MS: u64 = 5; - - for attempt in 0..=MAX_RETRIES { - let content = match tokio::fs::read_to_string(account_path).await { - Ok(c) => c, - Err(e) => { - // If the file is gone, the in-memory token is definitely stale. - if e.kind() == std::io::ErrorKind::NotFound { - return OnDiskAccountState::Disabled; - } - if attempt < MAX_RETRIES { - tokio::time::sleep(std::time::Duration::from_millis(RETRY_DELAY_MS)).await; - continue; - } - tracing::debug!( - "Failed to read account file on disk {:?}: {}", - account_path, - e - ); - return OnDiskAccountState::Unknown; - } - }; - - let account = match serde_json::from_str::(&content) { - Ok(v) => v, - Err(e) => { - if attempt < MAX_RETRIES { - tokio::time::sleep(std::time::Duration::from_millis(RETRY_DELAY_MS)).await; - continue; - } - tracing::debug!( - "Failed to parse account JSON on disk {:?}: {}", - account_path, - e - ); - return OnDiskAccountState::Unknown; - } - }; - - let disabled = account - .get("disabled") - .and_then(|v| v.as_bool()) - .unwrap_or(false) - || account - .get("proxy_disabled") - .and_then(|v| v.as_bool()) - .unwrap_or(false) - || account - .get("quota") - .and_then(|q| q.get("is_forbidden")) - .and_then(|v| v.as_bool()) - .unwrap_or(false); - - return if disabled { - OnDiskAccountState::Disabled - } else { - OnDiskAccountState::Enabled - }; - } - - OnDiskAccountState::Unknown - } - - /// 加载单个账号 - async fn load_single_account(&self, path: &PathBuf) -> Result, String> { - let content = std::fs::read_to_string(path).map_err(|e| format!("读取文件失败: {}", e))?; - - let mut account: serde_json::Value = - serde_json::from_str(&content).map_err(|e| format!("解析 JSON 失败: {}", e))?; - - // [修复 #1344] 先检查账号是否被手动禁用(非配额保护原因) - let is_proxy_disabled = account - .get("proxy_disabled") - .and_then(|v| v.as_bool()) - .unwrap_or(false); - - let disabled_reason = account - .get("proxy_disabled_reason") - .and_then(|v| v.as_str()) - .unwrap_or(""); - - if is_proxy_disabled && disabled_reason != "quota_protection" { - // Account manually disabled - tracing::debug!( - "Account skipped due to manual disable: {:?} (email={}, reason={})", - path, - account - .get("email") - .and_then(|v| v.as_str()) - .unwrap_or(""), - disabled_reason - ); - return Ok(None); - } - - // [NEW] Check for validation block (VALIDATION_REQUIRED temporary block) - if account - .get("validation_blocked") - .and_then(|v| v.as_bool()) - .unwrap_or(false) - { - let block_until = account - .get("validation_blocked_until") - .and_then(|v| v.as_i64()) - .unwrap_or(0); - - let now = chrono::Utc::now().timestamp(); - - if now < block_until { - // Still blocked - tracing::debug!( - "Skipping validation-blocked account: {:?} (email={}, blocked until {})", - path, - account - .get("email") - .and_then(|v| v.as_str()) - .unwrap_or(""), - chrono::DateTime::from_timestamp(block_until, 0) - .map(|dt| dt.format("%H:%M:%S").to_string()) - .unwrap_or_else(|| block_until.to_string()) - ); - return Ok(None); - } else { - // Block expired - clear it - account["validation_blocked"] = serde_json::json!(false); - account["validation_blocked_until"] = serde_json::json!(0); - account["validation_blocked_reason"] = serde_json::Value::Null; - - let updated_json = - serde_json::to_string_pretty(&account).map_err(|e| e.to_string())?; - std::fs::write(path, updated_json).map_err(|e| e.to_string())?; - tracing::info!( - "Validation block expired and cleared for account: {}", - account - .get("email") - .and_then(|v| v.as_str()) - .unwrap_or("") - ); - } - } - - // 最终检查账号主开关 - if account - .get("disabled") - .and_then(|v| v.as_bool()) - .unwrap_or(false) - { - tracing::debug!( - "Skipping disabled account file: {:?} (email={})", - path, - account - .get("email") - .and_then(|v| v.as_str()) - .unwrap_or("") - ); - return Ok(None); - } - - // Safety check: verify state on disk again to handle concurrent mid-parse writes - if Self::get_account_state_on_disk(path).await == OnDiskAccountState::Disabled { - tracing::debug!("Account file {:?} is disabled on disk, skipping.", path); - return Ok(None); - } - - // 配额保护检查 - 只处理配额保护逻辑 - // 这样可以在加载时自动恢复配额已恢复的账号 - if self.check_and_protect_quota(&mut account, path).await { - tracing::debug!( - "Account skipped due to quota protection: {:?} (email={})", - path, - account - .get("email") - .and_then(|v| v.as_str()) - .unwrap_or("") - ); - return Ok(None); - } - - // [兼容性] 再次确认最终状态(可能被 check_and_protect_quota 修改) - if account - .get("proxy_disabled") - .and_then(|v| v.as_bool()) - .unwrap_or(false) - { - tracing::debug!( - "Skipping proxy-disabled account file: {:?} (email={})", - path, - account - .get("email") - .and_then(|v| v.as_str()) - .unwrap_or("") - ); - return Ok(None); - } - - let account_id = account["id"].as_str() - .ok_or("缺少 id 字段")? - .to_string(); - - let email = account["email"].as_str() - .ok_or("缺少 email 字段")? - .to_string(); - - let token_obj = account["token"].as_object() - .ok_or("缺少 token 字段")?; - - let access_token = token_obj["access_token"].as_str() - .ok_or("缺少 access_token")? - .to_string(); - - let refresh_token = token_obj["refresh_token"].as_str() - .ok_or("缺少 refresh_token")? - .to_string(); - - let expires_in = token_obj["expires_in"].as_i64() - .ok_or("缺少 expires_in")?; - - let timestamp = token_obj["expiry_timestamp"].as_i64() - .ok_or("缺少 expiry_timestamp")?; - - // project_id 是可选的 - let project_id = token_obj - .get("project_id") - .and_then(|v| v.as_str()) - .map(|s| s.to_string()); - - // 【新增】提取订阅等级 (subscription_tier 为 "FREE" | "PRO" | "ULTRA") - let subscription_tier = account - .get("quota") - .and_then(|q| q.get("subscription_tier")) - .and_then(|v| v.as_str()) - .map(|s| s.to_string()); - - // [FIX #563] 提取最大剩余配额百分比用于优先级排序 (Option now) - let remaining_quota = account - .get("quota") - .and_then(|q| self.calculate_quota_stats(q)); - // .filter(|&r| r > 0); // 移除 >0 过滤,因为 0% 也是有效数据,只是优先级低 - - // 【新增 #621】提取受限模型列表 - let protected_models: HashSet = account - .get("protected_models") - .and_then(|v| v.as_array()) - .map(|arr| { - arr.iter() - .filter_map(|v| v.as_str()) - .map(|s| s.to_string()) - .collect() - }) - .unwrap_or_default(); - - let health_score = self.health_scores.get(&account_id).map(|v| *v).unwrap_or(1.0); - - // [NEW] 提取最近的配额刷新时间(用于排序优化:刷新时间越近优先级越高) - let reset_time = self.extract_earliest_reset_time(&account); - - // [OPTIMIZATION] 构建模型配额内存缓存,避免排序时读取磁盘 - let mut model_quotas = HashMap::new(); - if let Some(models) = account.get("quota").and_then(|q| q.get("models")).and_then(|m| m.as_array()) { - for model in models { - if let (Some(name), Some(pct)) = (model.get("name").and_then(|v| v.as_str()), model.get("percentage").and_then(|v| v.as_i64())) { - // Normalize name to standard ID - let standard_id = crate::proxy::common::model_mapping::normalize_to_standard_id(name) - .unwrap_or_else(|| name.to_string()); - model_quotas.insert(standard_id, pct as i32); - } - } - } - - Ok(Some(ProxyToken { - account_id, - access_token, - refresh_token, - expires_in, - timestamp, - email, - account_path: path.clone(), - project_id, - subscription_tier, - remaining_quota, - protected_models, - health_score, - reset_time, - validation_blocked: account.get("validation_blocked").and_then(|v| v.as_bool()).unwrap_or(false), - validation_blocked_until: account.get("validation_blocked_until").and_then(|v| v.as_i64()).unwrap_or(0), - model_quotas, - })) - } - - /// 检查账号是否应该被配额保护 - /// 如果配额低于阈值,自动禁用账号并返回 true - async fn check_and_protect_quota( - &self, - account_json: &mut serde_json::Value, - account_path: &PathBuf, - ) -> bool { - // 1. 加载配额保护配置 - let config = match crate::modules::config::load_app_config() { - Ok(cfg) => cfg.quota_protection, - Err(_) => return false, // 配置加载失败,跳过保护 - }; - - if !config.enabled { - return false; // 配额保护未启用 - } - - // 2. 获取配额信息 - // 注意:我们需要 clone 配额信息来遍历,避免借用冲突,但修改是针对 account_json 的 - let quota = match account_json.get("quota") { - Some(q) => q.clone(), - None => return false, // 无配额信息,跳过 - }; - - // 3. [兼容性 #621] 检查是否被旧版账号级配额保护禁用,尝试恢复并转为模型级 - let is_proxy_disabled = account_json - .get("proxy_disabled") - .and_then(|v| v.as_bool()) - .unwrap_or(false); - - let reason = account_json.get("proxy_disabled_reason") - .and_then(|v| v.as_str()) - .unwrap_or(""); - - if is_proxy_disabled && reason == "quota_protection" { - // 如果是被旧版账号级保护禁用的,尝试恢复并转为模型级 - return self - .check_and_restore_quota(account_json, account_path, "a, &config) - .await; - } - - // [修复 #1344] 不再处理其他禁用原因,让调用方负责检查手动禁用 - - // 4. 获取模型列表 - let models = match quota.get("models").and_then(|m| m.as_array()) { - Some(m) => m, - None => return false, - }; - - // 5. 遍历受监控的模型,检查保护与恢复 - let threshold = config.threshold_percentage as i32; - - let mut changed = false; - - for model in models { - let name = model.get("name").and_then(|v| v.as_str()).unwrap_or(""); - // [FIX] 先归一化模型名,再检查是否在监控列表中 - // 这样 claude-opus-4-5-thinking 会被归一化为 claude-sonnet-4-5 进行匹配 - let standard_id = crate::proxy::common::model_mapping::normalize_to_standard_id(name) - .unwrap_or_else(|| name.to_string()); - - if !config.monitored_models.iter().any(|m| m == &standard_id) { - continue; - } - - let percentage = model - .get("percentage") - .and_then(|v| v.as_i64()) - .unwrap_or(0) as i32; - let account_id = account_json - .get("id") - .and_then(|v| v.as_str()) - .unwrap_or("unknown") - .to_string(); - - if percentage <= threshold { - // 触发保护 (Issue #621 改为模型级) - // [FIX] 使用归一化后的 standard_id 而不是原始 name - if self - .trigger_quota_protection( - account_json, - &account_id, - account_path, - percentage, - threshold, - &standard_id, - ) - .await - .unwrap_or(false) - { - changed = true; - } - } else { - // 尝试恢复 (如果之前受限) - let protected_models = account_json - .get("protected_models") - .and_then(|v| v.as_array()); - // [FIX] 使用归一化后的 standard_id 进行匹配 - let is_protected = protected_models.map_or(false, |arr| { - arr.iter().any(|m| m.as_str() == Some(&standard_id as &str)) - }); - - if is_protected { - // [FIX] 使用归一化后的 standard_id - if self - .restore_quota_protection( - account_json, - &account_id, - account_path, - &standard_id, - ) - .await - .unwrap_or(false) - { - changed = true; - } - } - } - } - - let _ = changed; // 避免 unused 警告,如果后续逻辑需要可以继续使用 - - // 我们不再因为配额原因返回 true(即不再跳过账号), - // 而是加载并在 get_token 时进行过滤。 - false - } - - /// 计算账号的最大剩余配额百分比(用于排序) - /// 返回值: Option (max_percentage) - fn calculate_quota_stats(&self, quota: &serde_json::Value) -> Option { - let models = match quota.get("models").and_then(|m| m.as_array()) { - Some(m) => m, - None => return None, - }; - - let mut max_percentage = 0; - let mut has_data = false; - - for model in models { - if let Some(pct) = model.get("percentage").and_then(|v| v.as_i64()) { - let pct_i32 = pct as i32; - if pct_i32 > max_percentage { - max_percentage = pct_i32; - } - has_data = true; - } - } - - if has_data { - Some(max_percentage) - } else { - None - } - } - - /// 从磁盘读取特定模型的 quota 百分比 [FIX] 排序使用目标模型的 quota 而非 max - /// - /// # 参数 - /// * `account_path` - 账号 JSON 文件路径 - /// * `model_name` - 目标模型名称(已标准化) - fn get_model_quota_from_json(account_path: &PathBuf, model_name: &str) -> Option { - let content = std::fs::read_to_string(account_path).ok()?; - let account: serde_json::Value = serde_json::from_str(&content).ok()?; - let models = account.get("quota")?.get("models")?.as_array()?; - - for model in models { - if let Some(name) = model.get("name").and_then(|v| v.as_str()) { - if crate::proxy::common::model_mapping::normalize_to_standard_id(name) - .unwrap_or_else(|| name.to_string()) - == model_name - { - return model - .get("percentage") - .and_then(|v| v.as_i64()) - .map(|p| p as i32); - } - } - } - None - } - - /// 测试辅助函数:公开访问 get_model_quota_from_json - #[cfg(test)] - pub fn get_model_quota_from_json_for_test(account_path: &PathBuf, model_name: &str) -> Option { - Self::get_model_quota_from_json(account_path, model_name) - } - - /// 触发配额保护,限制特定模型 (Issue #621) - /// 返回 true 如果发生了改变 - async fn trigger_quota_protection( - &self, - account_json: &mut serde_json::Value, - account_id: &str, - account_path: &PathBuf, - current_val: i32, - threshold: i32, - model_name: &str, - ) -> Result { - // 1. 初始化 protected_models 数组(如果不存在) - if account_json.get("protected_models").is_none() { - account_json["protected_models"] = serde_json::Value::Array(Vec::new()); - } - - let protected_models = account_json["protected_models"].as_array_mut().unwrap(); - - // 2. 检查是否已存在 - if !protected_models - .iter() - .any(|m| m.as_str() == Some(model_name)) - { - protected_models.push(serde_json::Value::String(model_name.to_string())); - - tracing::info!( - "账号 {} 的模型 {} 因配额受限({}% <= {}%)已被加入保护列表", - account_id, - model_name, - current_val, - threshold - ); - - // 3. 写入磁盘 - std::fs::write(account_path, serde_json::to_string_pretty(account_json).unwrap()) - .map_err(|e| format!("写入文件失败: {}", e))?; - - return Ok(true); - } - - Ok(false) - } - - /// 检查并从账号级保护恢复(迁移至模型级,Issue #621) - async fn check_and_restore_quota( - &self, - account_json: &mut serde_json::Value, - account_path: &PathBuf, - quota: &serde_json::Value, - config: &crate::models::QuotaProtectionConfig, - ) -> bool { - // [兼容性] 如果该账号当前处于 proxy_disabled=true 且原因是 quota_protection, - // 我们将其 proxy_disabled 设为 false,但同时更新其 protected_models 列表。 - tracing::info!( - "正在迁移账号 {} 从全局配额保护模式至模型级保护模式", - account_json - .get("email") - .and_then(|v| v.as_str()) - .unwrap_or("unknown") - ); - - account_json["proxy_disabled"] = serde_json::Value::Bool(false); - account_json["proxy_disabled_reason"] = serde_json::Value::Null; - account_json["proxy_disabled_at"] = serde_json::Value::Null; - - let threshold = config.threshold_percentage as i32; - let mut protected_list = Vec::new(); - - if let Some(models) = quota.get("models").and_then(|m| m.as_array()) { - for model in models { - let name = model.get("name").and_then(|v| v.as_str()).unwrap_or(""); - if !config.monitored_models.iter().any(|m| m == name) { continue; } - - let percentage = model.get("percentage").and_then(|v| v.as_i64()).unwrap_or(0) as i32; - if percentage <= threshold { - protected_list.push(serde_json::Value::String(name.to_string())); - } - } - } - - account_json["protected_models"] = serde_json::Value::Array(protected_list); - - let _ = std::fs::write(account_path, serde_json::to_string_pretty(account_json).unwrap()); - - false // 返回 false 表示现在已可以尝试加载该账号(模型级过滤会在 get_token 时发生) - } - - /// 恢复特定模型的配额保护 (Issue #621) - /// 返回 true 如果发生了改变 - async fn restore_quota_protection( - &self, - account_json: &mut serde_json::Value, - account_id: &str, - account_path: &PathBuf, - model_name: &str, - ) -> Result { - if let Some(arr) = account_json - .get_mut("protected_models") - .and_then(|v| v.as_array_mut()) - { - let original_len = arr.len(); - arr.retain(|m| m.as_str() != Some(model_name)); - - if arr.len() < original_len { - tracing::info!( - "账号 {} 的模型 {} 配额已恢复,移出保护列表", - account_id, - model_name - ); - std::fs::write( - account_path, - serde_json::to_string_pretty(account_json).unwrap(), - ) - .map_err(|e| format!("写入文件失败: {}", e))?; - return Ok(true); - } - } - - Ok(false) - } - - /// P2C 算法的候选池大小 - 从前 N 个最优候选中随机选择 - const P2C_POOL_SIZE: usize = 5; - - /// Power of 2 Choices (P2C) 选择算法 - /// 从前 5 个候选中随机选 2 个,选择配额更高的 -> 避免热点 - /// 返回选中的索引 - /// - /// # 参数 - /// * `candidates` - 已排序的候选 token 列表 - /// * `attempted` - 已尝试失败的账号 ID 集合 - /// * `normalized_target` - 归一化后的目标模型名 - /// * `quota_protection_enabled` - 是否启用配额保护 - fn select_with_p2c<'a>( - &self, - candidates: &'a [ProxyToken], - attempted: &HashSet, - normalized_target: &str, - quota_protection_enabled: bool, - ) -> Option<&'a ProxyToken> { - use rand::Rng; - - // 过滤可用 token - let available: Vec<&ProxyToken> = candidates.iter() - .filter(|t| !attempted.contains(&t.account_id)) - .filter(|t| !quota_protection_enabled || !t.protected_models.contains(normalized_target)) - .collect(); - - if available.is_empty() { return None; } - if available.len() == 1 { return Some(available[0]); } - - // P2C: 从前 min(P2C_POOL_SIZE, len) 个中随机选 2 个 - let pool_size = available.len().min(Self::P2C_POOL_SIZE); - let mut rng = rand::thread_rng(); - - let pick1 = rng.gen_range(0..pool_size); - let pick2 = rng.gen_range(0..pool_size); - // 确保选择不同的两个候选 - let pick2 = if pick2 == pick1 { - (pick1 + 1) % pool_size - } else { - pick2 - }; - - let c1 = available[pick1]; - let c2 = available[pick2]; - - // 选择配额更高的 - let selected = if c1.remaining_quota.unwrap_or(0) >= c2.remaining_quota.unwrap_or(0) { - c1 - } else { - c2 - }; - - tracing::debug!( - "🎲 [P2C] Selected {} ({}%) from [{}({}%), {}({}%)]", - selected.email, selected.remaining_quota.unwrap_or(0), - c1.email, c1.remaining_quota.unwrap_or(0), - c2.email, c2.remaining_quota.unwrap_or(0) - ); - - Some(selected) - } - - /// 先发送取消信号,再带超时等待任务完成 - /// - /// # 参数 - /// * `timeout` - 等待任务完成的超时时间 - pub async fn graceful_shutdown(&self, timeout: std::time::Duration) { - tracing::info!("Initiating graceful shutdown of background tasks..."); - - // 发送取消信号给所有后台任务 - self.cancel_token.cancel(); - - // 带超时等待任务完成 - match tokio::time::timeout(timeout, self.abort_background_tasks()).await { - Ok(_) => tracing::info!("All background tasks cleaned up gracefully"), - Err(_) => tracing::warn!("Graceful cleanup timed out after {:?}, tasks were force-aborted", timeout), - } - } - - /// 中止并等待所有后台任务完成 - /// abort() 仅设置取消标志,必须 await 确认清理完成 - pub async fn abort_background_tasks(&self) { - Self::abort_task(&self.auto_cleanup_handle, "Auto-cleanup task").await; - } - - /// 中止单个后台任务并记录结果 - /// - /// # 参数 - /// * `handle` - 任务句柄的 Mutex 引用 - /// * `task_name` - 任务名称(用于日志) - async fn abort_task( - handle: &tokio::sync::Mutex>>, - task_name: &str, - ) { - let Some(handle) = handle.lock().await.take() else { - return; - }; - - handle.abort(); - match handle.await { - Ok(()) => tracing::debug!("{} completed", task_name), - Err(e) if e.is_cancelled() => tracing::info!("{} aborted", task_name), - Err(e) => tracing::warn!("{} error: {}", task_name, e), - } - } - - /// 获取当前可用的 Token(支持粘性会话与智能调度) - /// 参数 `quota_group` 用于区分 "claude" vs "gemini" 组 - /// 参数 `force_rotate` 为 true 时将忽略锁定,强制切换账号 - /// 参数 `session_id` 用于跨请求维持会话粘性 - /// 参数 `target_model` 用于检查配额保护 (Issue #621) - pub async fn get_token( - &self, - quota_group: &str, - force_rotate: bool, - session_id: Option<&str>, - target_model: &str, - ) -> Result<(String, String, String, String, u64), String> { - // [FIX] 检查并处理待重新加载的账号(配额保护同步) - let pending_reload = crate::proxy::server::take_pending_reload_accounts(); - for account_id in pending_reload { - if let Err(e) = self.reload_account(&account_id).await { - tracing::warn!("[Quota] Failed to reload account {}: {}", account_id, e); - } else { - tracing::info!( - "[Quota] Reloaded account {} (protected_models synced)", - account_id - ); - } - } - - // [FIX #1477] 检查并处理待删除的账号(彻底清理缓存) - let pending_delete = crate::proxy::server::take_pending_delete_accounts(); - for account_id in pending_delete { - self.remove_account(&account_id); - tracing::info!( - "[Proxy] Purged deleted account {} from all caches", - account_id - ); - } - - // 【优化 Issue #284】添加 5 秒超时,防止死锁 - let timeout_duration = std::time::Duration::from_secs(5); - match tokio::time::timeout( - timeout_duration, - self.get_token_internal(quota_group, force_rotate, session_id, target_model), - ) - .await - { - Ok(result) => result, - Err(_) => Err( - "Token acquisition timeout (5s) - system too busy or deadlock detected".to_string(), - ), - } - } - - /// 内部实现:获取 Token 的核心逻辑 - async fn get_token_internal( - &self, - quota_group: &str, - force_rotate: bool, - session_id: Option<&str>, - target_model: &str, - ) -> Result<(String, String, String, String, u64), String> { - let mut tokens_snapshot: Vec = - self.tokens.iter().map(|e| e.value().clone()).collect(); - let mut total = tokens_snapshot.len(); - if total == 0 { - return Err("Token pool is empty".to_string()); - } - - // ===== 【优化】Quota-First 排序: 保护低配额账号,均衡使用 ===== - // 优先级: 目标模型配额 > 健康分 > 订阅等级 > 刷新时间 - // -> 高配额账号优先被选中,避免 PRO/ULTRA 先用完丢失5小时刷新周期 - // [FIX] 使用目标模型的 quota 而非 max(所有模型) - const RESET_TIME_THRESHOLD_SECS: i64 = 600; // 10 分钟阈值,差异小于此值视为相同 - - let normalized_target = - crate::proxy::common::model_mapping::normalize_to_standard_id(target_model) - .unwrap_or_else(|| target_model.to_string()); - - tokens_snapshot.sort_by(|a, b| { - // Priority 1: 目标模型的 quota (higher is better) -> 保护低配额账号 - // [OPTIMIZATION] 使用内存缓存,不再读取磁盘 IO - let quota_a = a.model_quotas.get(&normalized_target).copied() - .unwrap_or(a.remaining_quota.unwrap_or(0)); - let quota_b = b.model_quotas.get(&normalized_target).copied() - .unwrap_or(b.remaining_quota.unwrap_or(0)); - - let quota_cmp = quota_b.cmp("a_a); - if quota_cmp != std::cmp::Ordering::Equal { - return quota_cmp; - } - - // Priority 2: Health score (higher is better) - let health_cmp = b.health_score.partial_cmp(&a.health_score) - .unwrap_or(std::cmp::Ordering::Equal); - if health_cmp != std::cmp::Ordering::Equal { - return health_cmp; - } - - // Priority 3: Subscription tier (ULTRA > PRO > FREE) -> 平局时高级账号优先 - let tier_priority = |tier: &Option| { - let t = tier.as_deref().unwrap_or("").to_lowercase(); - if t.contains("ultra") { 0 } - else if t.contains("pro") { 1 } - else if t.contains("free") { 2 } - else { 3 } - }; - let tier_cmp = tier_priority(&a.subscription_tier) - .cmp(&tier_priority(&b.subscription_tier)); - if tier_cmp != std::cmp::Ordering::Equal { - return tier_cmp; - } - - // Priority 4: Reset time (earlier is better, but only if diff > 10 min) - let reset_a = a.reset_time.unwrap_or(i64::MAX); - let reset_b = b.reset_time.unwrap_or(i64::MAX); - if (reset_a - reset_b).abs() >= RESET_TIME_THRESHOLD_SECS { - reset_a.cmp(&reset_b) - } else { - std::cmp::Ordering::Equal - } - }); - - // 【调试日志】打印排序后的账号顺序(显示目标模型的 quota) - tracing::debug!( - "🔄 [Token Rotation] target={} Accounts: {:?}", - normalized_target, - tokens_snapshot.iter().map(|t| format!( - "{}(quota={}%, reset={:?}, health={:.2})", - t.email, - t.model_quotas.get(&normalized_target).copied().unwrap_or(0), - t.reset_time.map(|ts| { - let now = chrono::Utc::now().timestamp(); - let diff_secs = ts - now; - if diff_secs > 0 { - format!("{}m", diff_secs / 60) - } else { - "now".to_string() - } - }), - t.health_score - )).collect::>() - ); - - // 0. 读取当前调度配置 - let scheduling = self.sticky_config.read().await.clone(); - use crate::proxy::sticky_config::SchedulingMode; - - // 【新增】检查配额保护是否启用(如果关闭,则忽略 protected_models 检查) - let quota_protection_enabled = crate::modules::config::load_app_config() - .map(|cfg| cfg.quota_protection.enabled) - .unwrap_or(false); - - // ===== [FIX #820] 固定账号模式:优先使用指定账号 ===== - let preferred_id = self.preferred_account_id.read().await.clone(); - if let Some(ref pref_id) = preferred_id { - // 查找优先账号 - if let Some(preferred_token) = tokens_snapshot - .iter() - .find(|t| &t.account_id == pref_id) - .cloned() - { - // 检查账号是否可用(未限流、未被配额保护) - match Self::get_account_state_on_disk(&preferred_token.account_path).await { - OnDiskAccountState::Disabled => { - tracing::warn!( - "🔒 [FIX #820] Preferred account {} is disabled on disk, purging and falling back", - preferred_token.email - ); - self.remove_account(&preferred_token.account_id); - tokens_snapshot.retain(|t| t.account_id != preferred_token.account_id); - total = tokens_snapshot.len(); - - { - let mut preferred = self.preferred_account_id.write().await; - if preferred.as_deref() == Some(pref_id.as_str()) { - *preferred = None; - } - } - - if total == 0 { - return Err("Token pool is empty".to_string()); - } - } - OnDiskAccountState::Unknown => { - tracing::warn!( - "🔒 [FIX #820] Preferred account {} state on disk is unavailable, falling back", - preferred_token.email - ); - // Don't purge on transient read/parse failures; just skip this token for this request. - tokens_snapshot.retain(|t| t.account_id != preferred_token.account_id); - total = tokens_snapshot.len(); - if total == 0 { - return Err("Token pool is empty".to_string()); - } - } - OnDiskAccountState::Enabled => { - let normalized_target = - crate::proxy::common::model_mapping::normalize_to_standard_id( - target_model, - ) - .unwrap_or_else(|| target_model.to_string()); - - let is_rate_limited = self - .is_rate_limited(&preferred_token.account_id, Some(&normalized_target)) - .await; - let is_quota_protected = quota_protection_enabled - && preferred_token - .protected_models - .contains(&normalized_target); - - if !is_rate_limited && !is_quota_protected { - tracing::info!( - "🔒 [FIX #820] Using preferred account: {} (fixed mode)", - preferred_token.email - ); - - // 直接使用优先账号,跳过轮询逻辑 - let mut token = preferred_token.clone(); - - // 检查 token 是否过期(提前5分钟刷新) - let now = chrono::Utc::now().timestamp(); - if now >= token.timestamp - 300 { - tracing::debug!("账号 {} 的 token 即将过期,正在刷新...", token.email); - match crate::modules::oauth::refresh_access_token(&token.refresh_token, Some(&token.account_id)) - .await - { - Ok(token_response) => { - token.access_token = token_response.access_token.clone(); - token.expires_in = token_response.expires_in; - token.timestamp = now + token_response.expires_in; - - if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { - entry.access_token = token.access_token.clone(); - entry.expires_in = token.expires_in; - entry.timestamp = token.timestamp; - } - let _ = self - .save_refreshed_token(&token.account_id, &token_response) - .await; - } - Err(e) => { - tracing::warn!("Preferred account token refresh failed: {}", e); - // 继续使用旧 token,让后续逻辑处理失败 - } - } - } - - // 确保有 project_id - let project_id = if let Some(pid) = &token.project_id { - pid.clone() - } else { - match crate::proxy::project_resolver::fetch_project_id(&token.access_token) - .await - { - Ok(pid) => { - if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { - entry.project_id = Some(pid.clone()); - } - let _ = self.save_project_id(&token.account_id, &pid).await; - pid - } - Err(_) => "bamboo-precept-lgxtn".to_string(), // fallback - } - }; - - return Ok((token.access_token, project_id, token.email, token.account_id, 0)); - } else { - if is_rate_limited { - tracing::warn!("🔒 [FIX #820] Preferred account {} is rate-limited, falling back to round-robin", preferred_token.email); - } else { - tracing::warn!("🔒 [FIX #820] Preferred account {} is quota-protected for {}, falling back to round-robin", preferred_token.email, target_model); - } - } - } - } - } else { - tracing::warn!("🔒 [FIX #820] Preferred account {} not found in pool, falling back to round-robin", pref_id); - } - } - // ===== [END FIX #820] ===== - - // 【优化 Issue #284】将锁操作移到循环外,避免重复获取锁 - // 预先获取 last_used_account 的快照,避免在循环中多次加锁 - let last_used_account_id = if quota_group != "image_gen" { - let last_used = self.last_used_account.lock().await; - last_used.clone() - } else { - None - }; - - let mut attempted: HashSet = HashSet::new(); - let mut last_error: Option = None; - let mut need_update_last_used: Option<(String, std::time::Instant)> = None; - - for attempt in 0..total { - let rotate = force_rotate || attempt > 0; - - // ===== 【核心】粘性会话与智能调度逻辑 ===== - let mut target_token: Option = None; - - // 归一化目标模型名为标准 ID,用于配额保护检查 - let normalized_target = crate::proxy::common::model_mapping::normalize_to_standard_id(target_model) - .unwrap_or_else(|| target_model.to_string()); - - // 模式 A: 粘性会话处理 (CacheFirst 或 Balance 且有 session_id) - if !rotate - && session_id.is_some() - && scheduling.mode != SchedulingMode::PerformanceFirst - { - let sid = session_id.unwrap(); - - // 1. 检查会话是否已绑定账号 - if let Some(bound_id) = self.session_accounts.get(sid).map(|v| v.clone()) { - // 【修复】先通过 account_id 找到对应的账号,获取其 email - // 2. 转换 email -> account_id 检查绑定的账号是否限流 - if let Some(bound_token) = - tokens_snapshot.iter().find(|t| t.account_id == bound_id) - { - let key = self - .email_to_account_id(&bound_token.email) - .unwrap_or_else(|| bound_token.account_id.clone()); - // [FIX] Pass None for specific model wait time if not applicable - let reset_sec = self.rate_limit_tracker.get_remaining_wait(&key, None); - if reset_sec > 0 { - // 【修复 Issue #284】立即解绑并切换账号,不再阻塞等待 - // 原因:阻塞等待会导致并发请求时客户端 socket 超时 (UND_ERR_SOCKET) - tracing::debug!( - "Sticky Session: Bound account {} is rate-limited ({}s), unbinding and switching.", - bound_token.email, reset_sec - ); - self.session_accounts.remove(sid); - } else if !attempted.contains(&bound_id) - && !(quota_protection_enabled - && bound_token.protected_models.contains(&normalized_target)) - { - // 3. 账号可用且未被标记为尝试失败,优先复用 - tracing::debug!("Sticky Session: Successfully reusing bound account {} for session {}", bound_token.email, sid); - target_token = Some(bound_token.clone()); - } else if quota_protection_enabled - && bound_token.protected_models.contains(&normalized_target) - { - tracing::debug!("Sticky Session: Bound account {} is quota-protected for model {} [{}], unbinding and switching.", bound_token.email, normalized_target, target_model); - self.session_accounts.remove(sid); - } - } else { - // 绑定的账号已不存在(可能被删除),解绑 - tracing::debug!( - "Sticky Session: Bound account not found for session {}, unbinding", - sid - ); - self.session_accounts.remove(sid); - } - } - } - - // 模式 B: 原子化 60s 全局锁定 (针对无 session_id 情况的默认保护) - // 【修复】性能优先模式应跳过 60s 锁定; - if target_token.is_none() - && !rotate - && quota_group != "image_gen" - && scheduling.mode != SchedulingMode::PerformanceFirst - { - // 【优化】使用预先获取的快照,不再在循环内加锁 - if let Some((account_id, last_time)) = &last_used_account_id { - // [FIX #3] 60s 锁定逻辑应检查 `attempted` 集合,避免重复尝试失败的账号 - if last_time.elapsed().as_secs() < 60 && !attempted.contains(account_id) { - if let Some(found) = - tokens_snapshot.iter().find(|t| &t.account_id == account_id) - { - // 【修复】检查限流状态和配额保护,避免复用已被锁定的账号 - if !self - .is_rate_limited(&found.account_id, Some(&normalized_target)) - .await - && !(quota_protection_enabled - && found.protected_models.contains(&normalized_target)) - { - tracing::debug!( - "60s Window: Force reusing last account: {}", - found.email - ); - target_token = Some(found.clone()); - } else { - if self - .is_rate_limited(&found.account_id, Some(&normalized_target)) - .await - { - tracing::debug!( - "60s Window: Last account {} is rate-limited, skipping", - found.email - ); - } else { - tracing::debug!("60s Window: Last account {} is quota-protected for model {} [{}], skipping", found.email, normalized_target, target_model); - } - } - } - } - } - - // 若无锁定,则使用 P2C 选择账号 (避免热点问题) - if target_token.is_none() { - // 先过滤出未限流的账号 - let mut non_limited: Vec = Vec::new(); - for t in &tokens_snapshot { - if !self.is_rate_limited(&t.account_id, Some(&normalized_target)).await { - non_limited.push(t.clone()); - } - } - - if let Some(selected) = self.select_with_p2c( - &non_limited, &attempted, &normalized_target, quota_protection_enabled - ) { - target_token = Some(selected.clone()); - need_update_last_used = Some((selected.account_id.clone(), std::time::Instant::now())); - - // 如果是会话首次分配且需要粘性,在此建立绑定 - if let Some(sid) = session_id { - if scheduling.mode != SchedulingMode::PerformanceFirst { - self.session_accounts - .insert(sid.to_string(), selected.account_id.clone()); - tracing::debug!( - "Sticky Session: Bound new account {} to session {}", - selected.email, - sid - ); - } - } - } - } - } else if target_token.is_none() { - // 模式 C: P2C 选择 (替代纯轮询) - tracing::debug!( - "🔄 [Mode C] P2C selection from {} candidates", - total - ); - - // 先过滤出未限流的账号 - let mut non_limited: Vec = Vec::new(); - for t in &tokens_snapshot { - if !self.is_rate_limited(&t.account_id, Some(&normalized_target)).await { - non_limited.push(t.clone()); - } - } - - if let Some(selected) = self.select_with_p2c( - &non_limited, &attempted, &normalized_target, quota_protection_enabled - ) { - tracing::debug!(" {} - SELECTED via P2C", selected.email); - target_token = Some(selected.clone()); - - if rotate { - tracing::debug!("Force Rotation: Switched to account: {}", selected.email); - } - } - } - - let mut token = match target_token { - Some(t) => t, - None => { - let mut wait_ms = 0; - // 乐观重置策略: 双层防护机制 - // 计算最短等待时间 - let min_wait = tokens_snapshot - .iter() - .filter_map(|t| self.rate_limit_tracker.get_reset_seconds(&t.account_id)) - .min(); - - // Layer 1: 如果最短等待时间 <= 2秒,执行缓冲延迟 - if let Some(wait_sec) = min_wait { - if wait_sec <= 2 { - wait_ms = (wait_sec as f64 * 1000.0) as u64; - tracing::warn!( - "All accounts rate-limited but shortest wait is {}s. Applying {}ms buffer for state sync...", - wait_sec, wait_ms - ); - - // 缓冲延迟 - tokio::time::sleep(tokio::time::Duration::from_millis(wait_ms)).await; - - // 重新尝试选择账号 - let retry_token = tokens_snapshot.iter() - .find(|t| !attempted.contains(&t.account_id) && !self.is_rate_limited_sync(&t.account_id, None)); - - if let Some(t) = retry_token { - tracing::info!( - "✅ Buffer delay successful! Found available account: {}", - t.email - ); - t.clone() - } else { - // Layer 2: 缓冲后仍无可用账号,执行乐观重置 - tracing::warn!( - "Buffer delay failed. Executing optimistic reset for all {} accounts...", - tokens_snapshot.len() - ); - - // 清除所有限流记录 - self.rate_limit_tracker.clear_all(); - - // 再次尝试选择账号 - let final_token = tokens_snapshot - .iter() - .find(|t| !attempted.contains(&t.account_id)); - - if let Some(t) = final_token { - tracing::info!( - "✅ Optimistic reset successful! Using account: {}", - t.email - ); - t.clone() - } else { - return Err( - "All accounts failed after optimistic reset.".to_string() - ); - } - } - } else { - return Err(format!("All accounts limited. Wait {}s.", wait_sec)); - } - } else { - return Err("All accounts failed or unhealthy.".to_string()); - } - } - }; - - // Safety net: avoid selecting an account that has been disabled on disk but still - // exists in the in-memory snapshot (e.g. stale cache + sticky session binding). - match Self::get_account_state_on_disk(&token.account_path).await { - OnDiskAccountState::Disabled => { - tracing::warn!( - "Selected account {} is disabled on disk, purging and retrying", - token.email - ); - attempted.insert(token.account_id.clone()); - self.remove_account(&token.account_id); - continue; - } - OnDiskAccountState::Unknown => { - tracing::warn!( - "Selected account {} state on disk is unavailable, skipping", - token.email - ); - attempted.insert(token.account_id.clone()); - continue; - } - OnDiskAccountState::Enabled => {} - } - - // 3. 检查 token 是否过期(提前5分钟刷新) - let now = chrono::Utc::now().timestamp(); - if now >= token.timestamp - 300 { - tracing::debug!("账号 {} 的 token 即将过期,正在刷新...", token.email); - - // 调用 OAuth 刷新 token - match crate::modules::oauth::refresh_access_token(&token.refresh_token, Some(&token.account_id)).await { - Ok(token_response) => { - tracing::debug!("Token 刷新成功!"); - - // 更新本地内存对象供后续使用 - token.access_token = token_response.access_token.clone(); - token.expires_in = token_response.expires_in; - token.timestamp = now + token_response.expires_in; - - // 同步更新跨线程共享的 DashMap - if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { - entry.access_token = token.access_token.clone(); - entry.expires_in = token.expires_in; - entry.timestamp = token.timestamp; - } - - // 同步落盘(避免重启后继续使用过期 timestamp 导致频繁刷新) - if let Err(e) = self - .save_refreshed_token(&token.account_id, &token_response) - .await - { - tracing::debug!("保存刷新后的 token 失败 ({}): {}", token.email, e); - } - } - Err(e) => { - tracing::error!("Token 刷新失败 ({}): {},尝试下一个账号", token.email, e); - if e.contains("\"invalid_grant\"") || e.contains("invalid_grant") { - tracing::error!( - "Disabling account due to invalid_grant ({}): refresh_token likely revoked/expired", - token.email - ); - let _ = self - .disable_account( - &token.account_id, - &format!("invalid_grant: {}", e), - ) - .await; - self.tokens.remove(&token.account_id); - } - // Avoid leaking account emails to API clients; details are still in logs. - last_error = Some(format!("Token refresh failed: {}", e)); - attempted.insert(token.account_id.clone()); - - // 【优化】标记需要清除锁定,避免在循环内加锁 - if quota_group != "image_gen" { - if matches!(&last_used_account_id, Some((id, _)) if id == &token.account_id) - { - need_update_last_used = - Some((String::new(), std::time::Instant::now())); - // 空字符串表示需要清除 - } - } - continue; - } - } - } - - // 4. 确保有 project_id - let project_id = if let Some(pid) = &token.project_id { - pid.clone() - } else { - tracing::debug!("账号 {} 缺少 project_id,尝试获取...", token.email); - match crate::proxy::project_resolver::fetch_project_id(&token.access_token).await { - Ok(pid) => { - if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { - entry.project_id = Some(pid.clone()); - } - let _ = self.save_project_id(&token.account_id, &pid).await; - pid - } - Err(e) => { - tracing::error!("Failed to fetch project_id for {}: {}", token.email, e); - last_error = Some(format!( - "Failed to fetch project_id for {}: {}", - token.email, e - )); - attempted.insert(token.account_id.clone()); - - // 【优化】标记需要清除锁定,避免在循环内加锁 - if quota_group != "image_gen" { - if matches!(&last_used_account_id, Some((id, _)) if id == &token.account_id) - { - need_update_last_used = - Some((String::new(), std::time::Instant::now())); - // 空字符串表示需要清除 - } - } - continue; - } - } - }; - - // 【优化】在成功返回前,统一更新 last_used_account(如果需要) - if let Some((new_account_id, new_time)) = need_update_last_used { - if quota_group != "image_gen" { - let mut last_used = self.last_used_account.lock().await; - if new_account_id.is_empty() { - // 空字符串表示需要清除锁定 - *last_used = None; - } else { - *last_used = Some((new_account_id, new_time)); - } - } - } - - return Ok((token.access_token, project_id, token.email, token.account_id, 0)); - } - - Err(last_error.unwrap_or_else(|| "All accounts failed".to_string())) - } - - async fn disable_account(&self, account_id: &str, reason: &str) -> Result<(), String> { - let path = if let Some(entry) = self.tokens.get(account_id) { - entry.account_path.clone() - } else { - self.data_dir - .join("accounts") - .join(format!("{}.json", account_id)) - }; - - let mut content: serde_json::Value = serde_json::from_str( - &std::fs::read_to_string(&path).map_err(|e| format!("读取文件失败: {}", e))?, - ) - .map_err(|e| format!("解析 JSON 失败: {}", e))?; - - let now = chrono::Utc::now().timestamp(); - content["disabled"] = serde_json::Value::Bool(true); - content["disabled_at"] = serde_json::Value::Number(now.into()); - content["disabled_reason"] = serde_json::Value::String(truncate_reason(reason, 800)); - - std::fs::write(&path, serde_json::to_string_pretty(&content).unwrap()) - .map_err(|e| format!("写入文件失败: {}", e))?; - - // 【修复 Issue #3】从内存中移除禁用的账号,防止被60s锁定逻辑继续使用 - self.tokens.remove(account_id); - - tracing::warn!("Account disabled: {} ({:?})", account_id, path); - Ok(()) - } - - /// 保存 project_id 到账号文件 - async fn save_project_id(&self, account_id: &str, project_id: &str) -> Result<(), String> { - let entry = self.tokens.get(account_id) - .ok_or("账号不存在")?; - - let path = &entry.account_path; - - let mut content: serde_json::Value = serde_json::from_str( - &std::fs::read_to_string(path).map_err(|e| format!("读取文件失败: {}", e))? - ).map_err(|e| format!("解析 JSON 失败: {}", e))?; - - content["token"]["project_id"] = serde_json::Value::String(project_id.to_string()); - - std::fs::write(path, serde_json::to_string_pretty(&content).unwrap()) - .map_err(|e| format!("写入文件失败: {}", e))?; - - tracing::debug!("已保存 project_id 到账号 {}", account_id); - Ok(()) - } - - /// 保存刷新后的 token 到账号文件 - async fn save_refreshed_token(&self, account_id: &str, token_response: &crate::modules::oauth::TokenResponse) -> Result<(), String> { - let entry = self.tokens.get(account_id) - .ok_or("账号不存在")?; - - let path = &entry.account_path; - - let mut content: serde_json::Value = serde_json::from_str( - &std::fs::read_to_string(path).map_err(|e| format!("读取文件失败: {}", e))? - ).map_err(|e| format!("解析 JSON 失败: {}", e))?; - - let now = chrono::Utc::now().timestamp(); - - content["token"]["access_token"] = serde_json::Value::String(token_response.access_token.clone()); - content["token"]["expires_in"] = serde_json::Value::Number(token_response.expires_in.into()); - content["token"]["expiry_timestamp"] = serde_json::Value::Number((now + token_response.expires_in).into()); - - std::fs::write(path, serde_json::to_string_pretty(&content).unwrap()) - .map_err(|e| format!("写入文件失败: {}", e))?; - - tracing::debug!("已保存刷新后的 token 到账号 {}", account_id); - Ok(()) - } - - pub fn len(&self) -> usize { - self.tokens.len() - } - - /// 通过 email 获取指定账号的 Token(用于预热等需要指定账号的场景) - /// 此方法会自动刷新过期的 token - pub async fn get_token_by_email( - &self, - email: &str, - ) -> Result<(String, String, String, String, u64), String> { - // 查找账号信息 - let token_info = { - let mut found = None; - for entry in self.tokens.iter() { - let token = entry.value(); - if token.email == email { - found = Some(( - token.account_id.clone(), - token.access_token.clone(), - token.refresh_token.clone(), - token.timestamp, - token.expires_in, - chrono::Utc::now().timestamp(), - token.project_id.clone(), - )); - break; - } - } - found - }; - - let ( - account_id, - current_access_token, - refresh_token, - timestamp, - expires_in, - now, - project_id_opt, - ) = match token_info { - Some(info) => info, - None => return Err(format!("未找到账号: {}", email)), - }; - - let project_id = project_id_opt.unwrap_or_else(|| "bamboo-precept-lgxtn".to_string()); - - // 检查是否过期 (提前5分钟) - if now < timestamp + expires_in - 300 { - return Ok((current_access_token, project_id, email.to_string(), account_id, 0)); - } - - tracing::info!("[Warmup] Token for {} is expiring, refreshing...", email); - - // 调用 OAuth 刷新 token - match crate::modules::oauth::refresh_access_token(&refresh_token, Some(&account_id)).await { - Ok(token_response) => { - tracing::info!("[Warmup] Token refresh successful for {}", email); - let new_now = chrono::Utc::now().timestamp(); - - // 更新缓存 - if let Some(mut entry) = self.tokens.get_mut(&account_id) { - entry.access_token = token_response.access_token.clone(); - entry.expires_in = token_response.expires_in; - entry.timestamp = new_now; - } - - // 保存到磁盘 - let _ = self - .save_refreshed_token(&account_id, &token_response) - .await; - - Ok(( - token_response.access_token, - project_id, - email.to_string(), - account_id, - 0, - )) - } - Err(e) => Err(format!( - "[Warmup] Token refresh failed for {}: {}", - email, e - )), - } - } - - // ===== 限流管理方法 ===== - - /// 标记账号限流(从外部调用,通常在 handler 中) - /// 参数为 email,内部会自动转换为 account_id - pub async fn mark_rate_limited( - &self, - email: &str, - status: u16, - retry_after_header: Option<&str>, - error_body: &str, - ) { - // [NEW] 检查熔断是否启用 (使用内存缓存,极快) - let config = self.circuit_breaker_config.read().await.clone(); - if !config.enabled { - return; - } - - // 【替代方案】转换 email -> account_id - let key = self.email_to_account_id(email).unwrap_or_else(|| email.to_string()); - - self.rate_limit_tracker.parse_from_error( - &key, - status, - retry_after_header, - error_body, - None, - &config.backoff_steps, // [NEW] 传入配置 - ); - } - - /// 检查账号是否在限流中 (支持模型级) - pub async fn is_rate_limited(&self, account_id: &str, model: Option<&str>) -> bool { - // [NEW] 检查熔断是否启用 - let config = self.circuit_breaker_config.read().await; - if !config.enabled { - return false; - } - self.rate_limit_tracker.is_rate_limited(account_id, model) - } - - /// [NEW] 检查账号是否在限流中 (同步版本,仅用于 Iterator) - pub fn is_rate_limited_sync(&self, account_id: &str, model: Option<&str>) -> bool { - // 同步版本无法读取 async RwLock,这里使用 blocking_read - let config = self.circuit_breaker_config.blocking_read(); - if !config.enabled { - return false; - } - self.rate_limit_tracker.is_rate_limited(account_id, model) - } - - /// 获取距离限流重置还有多少秒 - #[allow(dead_code)] - pub fn get_rate_limit_reset_seconds(&self, account_id: &str) -> Option { - self.rate_limit_tracker.get_reset_seconds(account_id) - } - - /// 清除过期的限流记录 - #[allow(dead_code)] - pub fn clean_expired_rate_limits(&self) { - self.rate_limit_tracker.cleanup_expired(); - } - - /// 【替代方案】通过 email 查找对应的 account_id - /// 用于将 handlers 传入的 email 转换为 tracker 使用的 account_id - fn email_to_account_id(&self, email: &str) -> Option { - self.tokens - .iter() - .find(|entry| entry.value().email == email) - .map(|entry| entry.value().account_id.clone()) - } - - /// 清除指定账号的限流记录 - pub fn clear_rate_limit(&self, account_id: &str) -> bool { - self.rate_limit_tracker.clear(account_id) - } - - /// 清除所有限流记录 - pub fn clear_all_rate_limits(&self) { - self.rate_limit_tracker.clear_all(); - } - - /// 标记账号请求成功,重置连续失败计数 - /// - /// 在请求成功完成后调用,将该账号的失败计数归零, - /// 下次失败时从最短的锁定时间开始(智能限流)。 - pub fn mark_account_success(&self, account_id: &str) { - self.rate_limit_tracker.mark_success(account_id); - } - - /// 检查是否有可用的 Google 账号 - /// - /// 用于"仅兜底"模式的智能判断:当所有 Google 账号不可用时才使用外部提供商。 - /// - /// # 参数 - /// - `quota_group`: 配额组("claude" 或 "gemini"),暂未使用但保留用于未来扩展 - /// - `target_model`: 目标模型名称(已归一化),用于配额保护检查 - /// - /// # 返回值 - /// - `true`: 至少有一个可用账号(未限流且未被配额保护) - /// - `false`: 所有账号都不可用(被限流或被配额保护) - /// - /// # 示例 - /// ```ignore - /// // 检查是否有可用账号处理 claude-sonnet 请求 - /// let has_available = token_manager.has_available_account("claude", "claude-sonnet-4-20250514").await; - /// if !has_available { - /// // 切换到外部提供商 - /// } - /// ``` - pub async fn has_available_account(&self, _quota_group: &str, target_model: &str) -> bool { - // 检查配额保护是否启用 - let quota_protection_enabled = crate::modules::config::load_app_config() - .map(|cfg| cfg.quota_protection.enabled) - .unwrap_or(false); - - // 遍历所有账号,检查是否有可用的 - for entry in self.tokens.iter() { - let token = entry.value(); - - // 1. 检查是否被限流 - if self.is_rate_limited(&token.account_id, None).await { - tracing::debug!( - "[Fallback Check] Account {} is rate-limited, skipping", - token.email - ); - continue; - } - - // 2. 检查是否被配额保护(如果启用) - if quota_protection_enabled && token.protected_models.contains(target_model) { - tracing::debug!( - "[Fallback Check] Account {} is quota-protected for model {}, skipping", - token.email, - target_model - ); - continue; - } - - // 找到至少一个可用账号 - tracing::debug!( - "[Fallback Check] Found available account: {} for model {}", - token.email, - target_model - ); - return true; - } - - // 所有账号都不可用 - tracing::info!( - "[Fallback Check] No available Google accounts for model {}, fallback should be triggered", - target_model - ); - false - } - - /// 从账号文件获取配额刷新时间 - /// - /// 返回该账号最近的配额刷新时间字符串(ISO 8601 格式) - /// - /// # 参数 - /// - `account_id`: 账号 ID(用于查找账号文件) - pub fn get_quota_reset_time(&self, account_id: &str) -> Option { - // 直接用 account_id 查找账号文件(文件名是 {account_id}.json) - let account_path = self.data_dir.join("accounts").join(format!("{}.json", account_id)); - - let content = std::fs::read_to_string(&account_path).ok()?; - let account: serde_json::Value = serde_json::from_str(&content).ok()?; - - // 获取 quota.models 中最早的 reset_time(最保守的锁定策略) - account - .get("quota") - .and_then(|q| q.get("models")) - .and_then(|m| m.as_array()) - .and_then(|models| { - models.iter() - .filter_map(|m| m.get("reset_time").and_then(|r| r.as_str())) - .filter(|s| !s.is_empty()) - .min() - .map(|s| s.to_string()) - }) - } - - /// 使用配额刷新时间精确锁定账号 - /// - /// 当 API 返回 429 但没有 quotaResetDelay 时,尝试使用账号的配额刷新时间 - /// - /// # 参数 - /// - `account_id`: 账号 ID - /// - `reason`: 限流原因(QuotaExhausted/ServerError 等) - /// - `model`: 可选的模型名称,用于模型级别限流 - pub fn set_precise_lockout(&self, account_id: &str, reason: crate::proxy::rate_limit::RateLimitReason, model: Option) -> bool { - if let Some(reset_time_str) = self.get_quota_reset_time(account_id) { - tracing::info!("找到账号 {} 的配额刷新时间: {}", account_id, reset_time_str); - self.rate_limit_tracker.set_lockout_until_iso(account_id, &reset_time_str, reason, model) - } else { - tracing::debug!("未找到账号 {} 的配额刷新时间,将使用默认退避策略", account_id); - false - } - } - - /// 实时刷新配额并精确锁定账号 - /// - /// 当 429 发生时调用此方法: - /// 1. 实时调用配额刷新 API 获取最新的 reset_time - /// 2. 使用最新的 reset_time 精确锁定账号 - /// 3. 如果获取失败,返回 false 让调用方使用回退策略 - /// - /// # 参数 - /// - `model`: 可选的模型名称,用于模型级别限流 - pub async fn fetch_and_lock_with_realtime_quota( - &self, - email: &str, - reason: crate::proxy::rate_limit::RateLimitReason, - model: Option, - ) -> bool { - // 1. 从 tokens 中获取该账号的 access_token 和 account_id - // 同时获取 account_id,确保锁定 key 与检查 key 一致 - let (access_token, account_id) = { - let mut found: Option<(String, String)> = None; - for entry in self.tokens.iter() { - if entry.value().email == email { - found = Some(( - entry.value().access_token.clone(), - entry.value().account_id.clone(), - )); - break; - } - } - found - }.unzip(); - - let (access_token, account_id) = match (access_token, account_id) { - (Some(token), Some(id)) => (token, id), - _ => { - tracing::warn!("无法找到账号 {} 的 access_token,无法实时刷新配额", email); - return false; - } - }; - - // 2. 调用配额刷新 API - tracing::info!("账号 {} 正在实时刷新配额...", email); - match crate::modules::quota::fetch_quota(&access_token, email, Some(&account_id)).await { - Ok((quota_data, _project_id)) => { - // 3. 从最新配额中提取 reset_time - let earliest_reset = quota_data - .models - .iter() - .filter_map(|m| { - if !m.reset_time.is_empty() { - Some(m.reset_time.as_str()) - } else { - None - } - }) - .min(); - - if let Some(reset_time_str) = earliest_reset { - tracing::info!( - "账号 {} 实时配额刷新成功,reset_time: {}", - email, - reset_time_str - ); - // [FIX] 使用 account_id 作为 key,与 is_rate_limited 检查一致 - self.rate_limit_tracker.set_lockout_until_iso(&account_id, reset_time_str, reason, model) - } else { - tracing::warn!("账号 {} 配额刷新成功但未找到 reset_time", email); - false - } - } - Err(e) => { - tracing::warn!("账号 {} 实时配额刷新失败: {:?}", email, e); - false - } - } - } - - /// 标记账号限流(异步版本,支持实时配额刷新) - /// - /// 三级降级策略: - /// 1. 优先: API 返回 quotaResetDelay → 直接使用 - /// 2. 次优: 实时刷新配额 → 获取最新 reset_time - /// 3. 保底: 使用本地缓存配额 → 读取账号文件 - /// 4. 兜底: 指数退避策略 → 默认锁定时间 - /// - /// # 参数 - /// - `email`: 账号邮箱,用于查找账号信息 - /// - `status`: HTTP 状态码(如 429、500 等) - /// - `retry_after_header`: 可选的 Retry-After 响应头 - /// - `error_body`: 错误响应体,用于解析 quotaResetDelay - /// - `model`: 可选的模型名称,用于模型级别限流 - pub async fn mark_rate_limited_async( - &self, - email: &str, - status: u16, - retry_after_header: Option<&str>, - error_body: &str, - model: Option<&str>, // 🆕 新增模型参数 - ) { - // [NEW] 检查熔断是否启用 - let config = self.circuit_breaker_config.read().await.clone(); - if !config.enabled { - return; - } - - // [FIX] Convert email to account_id for consistent tracking - let account_id = self.email_to_account_id(email).unwrap_or_else(|| email.to_string()); - - // 检查 API 是否返回了精确的重试时间 - let has_explicit_retry_time = retry_after_header.is_some() || - error_body.contains("quotaResetDelay"); - - if has_explicit_retry_time { - // API 返回了精确时间(quotaResetDelay),直接使用,无需实时刷新 - if let Some(m) = model { - tracing::debug!( - "账号 {} 的模型 {} 的 429 响应包含 quotaResetDelay,直接使用 API 返回的时间", - account_id, - m - ); - } else { - tracing::debug!( - "账号 {} 的 429 响应包含 quotaResetDelay,直接使用 API 返回的时间", - account_id - ); - } - self.rate_limit_tracker.parse_from_error( - &account_id, - status, - retry_after_header, - error_body, - model.map(|s| s.to_string()), - &config.backoff_steps, // [NEW] 传入配置 - ); - return; - } - - // 确定限流原因 - let reason = if error_body.to_lowercase().contains("model_capacity") { - crate::proxy::rate_limit::RateLimitReason::ModelCapacityExhausted - } else if error_body.to_lowercase().contains("exhausted") - || error_body.to_lowercase().contains("quota") - { - crate::proxy::rate_limit::RateLimitReason::QuotaExhausted - } else { - crate::proxy::rate_limit::RateLimitReason::Unknown - }; - - // API 未返回 quotaResetDelay,需要实时刷新配额获取精确锁定时间 - if let Some(m) = model { - tracing::info!( - "账号 {} 的模型 {} 的 429 响应未包含 quotaResetDelay,尝试实时刷新配额...", - account_id, - m - ); - } else { - tracing::info!( - "账号 {} 的 429 响应未包含 quotaResetDelay,尝试实时刷新配额...", - account_id - ); - } - - // [FIX] 传入 email 而不是 account_id,因为 fetch_and_lock_with_realtime_quota 期望 email - if self.fetch_and_lock_with_realtime_quota(email, reason, model.map(|s| s.to_string())).await { - tracing::info!("账号 {} 已使用实时配额精确锁定", email); - return; - } - - // 实时刷新失败,尝试使用本地缓存的配额刷新时间 - if self.set_precise_lockout(&account_id, reason, model.map(|s| s.to_string())) { - tracing::info!("账号 {} 已使用本地缓存配额锁定", account_id); - return; - } - - // 都失败了,回退到指数退避策略 - tracing::warn!("账号 {} 无法获取配额刷新时间,使用指数退避策略", account_id); - self.rate_limit_tracker.parse_from_error( - &account_id, - status, - retry_after_header, - error_body, - model.map(|s| s.to_string()), - &config.backoff_steps, // [NEW] 传入配置 - ); - } - - // ===== 调度配置相关方法 ===== - - /// 获取当前调度配置 - pub async fn get_sticky_config(&self) -> StickySessionConfig { - self.sticky_config.read().await.clone() - } - - /// 更新调度配置 - pub async fn update_sticky_config(&self, new_config: StickySessionConfig) { - let mut config = self.sticky_config.write().await; - *config = new_config; - tracing::debug!("Scheduling configuration updated: {:?}", *config); - } - - /// [NEW] 更新熔断器配置 - pub async fn update_circuit_breaker_config(&self, config: crate::models::CircuitBreakerConfig) { - let mut lock = self.circuit_breaker_config.write().await; - *lock = config; - tracing::debug!("Circuit breaker configuration updated"); - } - - /// [NEW] 获取熔断器配置 - pub async fn get_circuit_breaker_config(&self) -> crate::models::CircuitBreakerConfig { - self.circuit_breaker_config.read().await.clone() - } - - /// 清除特定会话的粘性映射 - #[allow(dead_code)] - pub fn clear_session_binding(&self, session_id: &str) { - self.session_accounts.remove(session_id); - } - - /// 清除所有会话的粘性映射 - pub fn clear_all_sessions(&self) { - self.session_accounts.clear(); - } - - // ===== [FIX #820] 固定账号模式相关方法 ===== - - /// 设置优先使用的账号ID(固定账号模式) - /// 传入 Some(account_id) 启用固定账号模式,传入 None 恢复轮询模式 - pub async fn set_preferred_account(&self, account_id: Option) { - let mut preferred = self.preferred_account_id.write().await; - if let Some(ref id) = account_id { - tracing::info!("🔒 [FIX #820] Fixed account mode enabled: {}", id); - } else { - tracing::info!("🔄 [FIX #820] Round-robin mode enabled (no preferred account)"); - } - *preferred = account_id; - } - - /// 获取当前优先使用的账号ID - pub async fn get_preferred_account(&self) -> Option { - self.preferred_account_id.read().await.clone() - } - - /// 使用 Authorization Code 交换 Refresh Token (Web OAuth) - pub async fn exchange_code(&self, code: &str, redirect_uri: &str) -> Result { - crate::modules::oauth::exchange_code(code, redirect_uri) - .await - .and_then(|t| { - t.refresh_token - .ok_or_else(|| "No refresh token returned by Google".to_string()) - }) - } - - /// 获取 OAuth URL (支持自定义 Redirect URI) - pub fn get_oauth_url_with_redirect(&self, redirect_uri: &str, state: &str) -> String { - crate::modules::oauth::get_auth_url(redirect_uri, state) - } - - /// 获取用户信息 (Email 等) - pub async fn get_user_info( - &self, - refresh_token: &str, - ) -> Result { - // 先获取 Access Token - let token = crate::modules::oauth::refresh_access_token(refresh_token, None) - .await - .map_err(|e| format!("刷新 Access Token 失败: {}", e))?; - - crate::modules::oauth::get_user_info(&token.access_token, None).await - } - - /// 添加新账号 (纯后端实现,不依赖 Tauri AppHandle) - pub async fn add_account(&self, email: &str, refresh_token: &str) -> Result<(), String> { - // 1. 获取 Access Token (验证 refresh_token 有效性) - let token_info = crate::modules::oauth::refresh_access_token(refresh_token, None) - .await - .map_err(|e| format!("Invalid refresh token: {}", e))?; - - // 2. 获取项目 ID (Project ID) - let project_id = crate::proxy::project_resolver::fetch_project_id(&token_info.access_token) - .await - .unwrap_or_else(|_| "bamboo-precept-lgxtn".to_string()); // Fallback - - // 3. 委托给 modules::account::add_account 处理 (包含文件写入、索引更新、锁) - let email_clone = email.to_string(); - let refresh_token_clone = refresh_token.to_string(); - - tokio::task::spawn_blocking(move || { - let token_data = crate::models::TokenData::new( - token_info.access_token, - refresh_token_clone, - token_info.expires_in, - Some(email_clone.clone()), - Some(project_id), - None, // session_id - ); - - crate::modules::account::upsert_account(email_clone, None, token_data) - }) - .await - .map_err(|e| format!("Task join error: {}", e))? - .map_err(|e| format!("Failed to save account: {}", e))?; - - // 4. 重新加载 (更新内存) - self.reload_all_accounts().await.map(|_| ()) - } - - /// 记录请求成功,增加健康分 - pub fn record_success(&self, account_id: &str) { - self.health_scores - .entry(account_id.to_string()) - .and_modify(|s| *s = (*s + 0.05).min(1.0)) - .or_insert(1.0); - tracing::debug!("📈 Health score increased for account {}", account_id); - } - - /// 记录请求失败,降低健康分 - pub fn record_failure(&self, account_id: &str) { - self.health_scores - .entry(account_id.to_string()) - .and_modify(|s| *s = (*s - 0.2).max(0.0)) - .or_insert(0.8); - tracing::warn!("📉 Health score decreased for account {}", account_id); - } - - /// [NEW] 从账号配额信息中提取最近的刷新时间戳 - /// - /// Claude 模型(sonnet/opus)共用同一个刷新时间,只需取 claude 系列的 reset_time - /// 返回 Unix 时间戳(秒),用于排序时比较 - fn extract_earliest_reset_time(&self, account: &serde_json::Value) -> Option { - let models = account - .get("quota") - .and_then(|q| q.get("models")) - .and_then(|m| m.as_array())?; - - let mut earliest_ts: Option = None; - - for model in models { - // 优先取 claude 系列的 reset_time(sonnet/opus 共用) - let model_name = model.get("name").and_then(|n| n.as_str()).unwrap_or(""); - if !model_name.contains("claude") { - continue; - } - - if let Some(reset_time_str) = model.get("reset_time").and_then(|r| r.as_str()) { - if reset_time_str.is_empty() { - continue; - } - // 解析 ISO 8601 时间字符串为时间戳 - if let Ok(dt) = chrono::DateTime::parse_from_rfc3339(reset_time_str) { - let ts = dt.timestamp(); - if earliest_ts.is_none() || ts < earliest_ts.unwrap() { - earliest_ts = Some(ts); - } - } - } - } - - // 如果没有 claude 模型的时间,尝试取任意模型的最近时间 - if earliest_ts.is_none() { - for model in models { - if let Some(reset_time_str) = model.get("reset_time").and_then(|r| r.as_str()) { - if reset_time_str.is_empty() { - continue; - } - if let Ok(dt) = chrono::DateTime::parse_from_rfc3339(reset_time_str) { - let ts = dt.timestamp(); - if earliest_ts.is_none() || ts < earliest_ts.unwrap() { - earliest_ts = Some(ts); - } - } - } - } - } - - earliest_ts - } - - /// Helper to find account ID by email - pub fn get_account_id_by_email(&self, email: &str) -> Option { - for entry in self.tokens.iter() { - if entry.value().email == email { - return Some(entry.key().clone()); - } - } - None - } - - /// Set validation blocked status for an account (internal) - pub async fn set_validation_block(&self, account_id: &str, block_until: i64, reason: &str) -> Result<(), String> { - // 1. Update memory - if let Some(mut token) = self.tokens.get_mut(account_id) { - token.validation_blocked = true; - token.validation_blocked_until = block_until; - } - - // 2. Persist to disk - let path = self.data_dir.join("accounts").join(format!("{}.json", account_id)); - if !path.exists() { - return Err(format!("Account file not found: {:?}", path)); - } - - let content = std::fs::read_to_string(&path) - .map_err(|e| format!("Failed to read account file: {}", e))?; - - let mut account: serde_json::Value = serde_json::from_str(&content) - .map_err(|e| format!("Failed to parse account JSON: {}", e))?; - - account["validation_blocked"] = serde_json::Value::Bool(true); - account["validation_blocked_until"] = serde_json::Value::Number(serde_json::Number::from(block_until)); - account["validation_blocked_reason"] = serde_json::Value::String(reason.to_string()); - - // Clear sticky session if blocked - self.session_accounts.retain(|_, v| *v != account_id); - - let json_str = serde_json::to_string_pretty(&account) - .map_err(|e| format!("Failed to serialize account JSON: {}", e))?; - - std::fs::write(&path, json_str) - .map_err(|e| format!("Failed to write account file: {}", e))?; - - tracing::info!( - "🚫 Account {} validation blocked until {} (reason: {})", - account_id, - block_until, - reason - ); - - Ok(()) - } - - /// Public method to set validation block (called from handlers) - pub async fn set_validation_block_public(&self, account_id: &str, block_until: i64, reason: &str) -> Result<(), String> { - self.set_validation_block(account_id, block_until, reason).await - } - - /// Set is_forbidden status for an account (called when proxy encounters 403) - pub async fn set_forbidden(&self, account_id: &str, reason: &str) -> Result<(), String> { - // 1. Persist to disk - update quota.is_forbidden in account JSON - let path = self.data_dir.join("accounts").join(format!("{}.json", account_id)); - if !path.exists() { - return Err(format!("Account file not found: {:?}", path)); - } - - let content = std::fs::read_to_string(&path) - .map_err(|e| format!("Failed to read account file: {}", e))?; - - let mut account: serde_json::Value = serde_json::from_str(&content) - .map_err(|e| format!("Failed to parse account JSON: {}", e))?; - - // Update quota.is_forbidden - if let Some(quota) = account.get_mut("quota") { - quota["is_forbidden"] = serde_json::Value::Bool(true); - } else { - // Create quota object if not exists - account["quota"] = serde_json::json!({ - "models": [], - "last_updated": chrono::Utc::now().timestamp(), - "is_forbidden": true - }); - } - - // Clear sticky session if forbidden - self.session_accounts.retain(|_, v| *v != account_id); - - let json_str = serde_json::to_string_pretty(&account) - .map_err(|e| format!("Failed to serialize account JSON: {}", e))?; - - std::fs::write(&path, json_str) - .map_err(|e| format!("Failed to write account file: {}", e))?; - - // [FIX] 从内存池中移除账号,避免重试时再次选中 - self.remove_account(account_id); - - tracing::warn!( - "🚫 Account {} marked as forbidden (403): {}", - account_id, - truncate_reason(reason, 100) - ); - - Ok(()) - } -} - -/// 截断过长的原因字符串 -fn truncate_reason(reason: &str, max_len: usize) -> String { - if reason.len() <= max_len { - reason.to_string() - } else { - format!("{}...", &reason[..max_len - 3]) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::cmp::Ordering; - - #[tokio::test] - async fn test_reload_account_purges_cache_when_account_becomes_proxy_disabled() { - let tmp_root = std::env::temp_dir().join(format!( - "antigravity-token-manager-test-{}", - uuid::Uuid::new_v4() - )); - let accounts_dir = tmp_root.join("accounts"); - std::fs::create_dir_all(&accounts_dir).unwrap(); - - let account_id = "acc1"; - let email = "a@test.com"; - let now = chrono::Utc::now().timestamp(); - let account_path = accounts_dir.join(format!("{}.json", account_id)); - - let account_json = serde_json::json!({ - "id": account_id, - "email": email, - "token": { - "access_token": "atk", - "refresh_token": "rtk", - "expires_in": 3600, - "expiry_timestamp": now + 3600 - }, - "disabled": false, - "proxy_disabled": false, - "created_at": now, - "last_used": now - }); - std::fs::write(&account_path, serde_json::to_string_pretty(&account_json).unwrap()).unwrap(); - - let manager = TokenManager::new(tmp_root.clone()); - manager.load_accounts().await.unwrap(); - assert!(manager.tokens.get(account_id).is_some()); - - // Prime extra caches to ensure remove_account() is really called. - manager - .session_accounts - .insert("sid1".to_string(), account_id.to_string()); - { - let mut preferred = manager.preferred_account_id.write().await; - *preferred = Some(account_id.to_string()); - } - - // Mark account as proxy-disabled on disk (manual disable). - let mut disabled_json = account_json.clone(); - disabled_json["proxy_disabled"] = serde_json::Value::Bool(true); - disabled_json["proxy_disabled_reason"] = serde_json::Value::String("manual".to_string()); - disabled_json["proxy_disabled_at"] = serde_json::Value::Number(now.into()); - std::fs::write(&account_path, serde_json::to_string_pretty(&disabled_json).unwrap()).unwrap(); - - manager.reload_account(account_id).await.unwrap(); - - assert!(manager.tokens.get(account_id).is_none()); - assert!(manager.session_accounts.get("sid1").is_none()); - assert!(manager.preferred_account_id.read().await.is_none()); - - let _ = std::fs::remove_dir_all(&tmp_root); - } - - #[tokio::test] - async fn test_fixed_account_mode_skips_preferred_when_disabled_on_disk_without_reload() { - let tmp_root = std::env::temp_dir().join(format!( - "antigravity-token-manager-test-fixed-mode-{}", - uuid::Uuid::new_v4() - )); - let accounts_dir = tmp_root.join("accounts"); - std::fs::create_dir_all(&accounts_dir).unwrap(); - - let now = chrono::Utc::now().timestamp(); - - let write_account = |id: &str, email: &str, proxy_disabled: bool| { - let account_path = accounts_dir.join(format!("{}.json", id)); - let json = serde_json::json!({ - "id": id, - "email": email, - "token": { - "access_token": format!("atk-{}", id), - "refresh_token": format!("rtk-{}", id), - "expires_in": 3600, - "expiry_timestamp": now + 3600, - "project_id": format!("pid-{}", id) - }, - "disabled": false, - "proxy_disabled": proxy_disabled, - "proxy_disabled_reason": if proxy_disabled { "manual" } else { "" }, - "created_at": now, - "last_used": now - }); - std::fs::write(&account_path, serde_json::to_string_pretty(&json).unwrap()).unwrap(); - }; - - // Two accounts in pool. - write_account("acc1", "a@test.com", false); - write_account("acc2", "b@test.com", false); - - let manager = TokenManager::new(tmp_root.clone()); - manager.load_accounts().await.unwrap(); - - // Enable fixed account mode for acc1. - manager.set_preferred_account(Some("acc1".to_string())).await; - - // Disable acc1 on disk WITHOUT reloading the in-memory pool (simulates stale cache). - write_account("acc1", "a@test.com", true); - - let (_token, _project_id, email, account_id, _wait_ms) = manager - .get_token("gemini", false, Some("sid1"), "gemini-1.5-flash") - .await - .unwrap(); - - // Should fall back to another account instead of using the disabled preferred one. - assert_eq!(account_id, "acc2"); - assert_eq!(email, "b@test.com"); - assert!(manager.tokens.get("acc1").is_none()); - assert!(manager.get_preferred_account().await.is_none()); - - let _ = std::fs::remove_dir_all(&tmp_root); - } - - #[tokio::test] - async fn test_sticky_session_skips_bound_account_when_disabled_on_disk_without_reload() { - let tmp_root = std::env::temp_dir().join(format!( - "antigravity-token-manager-test-sticky-disabled-{}", - uuid::Uuid::new_v4() - )); - let accounts_dir = tmp_root.join("accounts"); - std::fs::create_dir_all(&accounts_dir).unwrap(); - - let now = chrono::Utc::now().timestamp(); - - let write_account = |id: &str, email: &str, percentage: i64, proxy_disabled: bool| { - let account_path = accounts_dir.join(format!("{}.json", id)); - let json = serde_json::json!({ - "id": id, - "email": email, - "token": { - "access_token": format!("atk-{}", id), - "refresh_token": format!("rtk-{}", id), - "expires_in": 3600, - "expiry_timestamp": now + 3600, - "project_id": format!("pid-{}", id) - }, - "quota": { - "models": [ - { "name": "gemini-1.5-flash", "percentage": percentage } - ] - }, - "disabled": false, - "proxy_disabled": proxy_disabled, - "proxy_disabled_reason": if proxy_disabled { "manual" } else { "" }, - "created_at": now, - "last_used": now - }); - std::fs::write(&account_path, serde_json::to_string_pretty(&json).unwrap()).unwrap(); - }; - - // Two accounts in pool. acc1 has higher quota -> should be selected and bound first. - write_account("acc1", "a@test.com", 90, false); - write_account("acc2", "b@test.com", 10, false); - - let manager = TokenManager::new(tmp_root.clone()); - manager.load_accounts().await.unwrap(); - - // Prime: first request should bind the session to acc1. - let (_token, _project_id, _email, account_id, _wait_ms) = manager - .get_token("gemini", false, Some("sid1"), "gemini-1.5-flash") - .await - .unwrap(); - assert_eq!(account_id, "acc1"); - assert_eq!( - manager.session_accounts.get("sid1").map(|v| v.clone()), - Some("acc1".to_string()) - ); - - // Disable acc1 on disk WITHOUT reloading the in-memory pool (simulates stale cache). - write_account("acc1", "a@test.com", 90, true); - - let (_token, _project_id, email, account_id, _wait_ms) = manager - .get_token("gemini", false, Some("sid1"), "gemini-1.5-flash") - .await - .unwrap(); - - // Should fall back to another account instead of reusing the disabled bound one. - assert_eq!(account_id, "acc2"); - assert_eq!(email, "b@test.com"); - assert!(manager.tokens.get("acc1").is_none()); - assert_ne!( - manager.session_accounts.get("sid1").map(|v| v.clone()), - Some("acc1".to_string()) - ); - - let _ = std::fs::remove_dir_all(&tmp_root); - } - - /// 创建测试用的 ProxyToken - fn create_test_token( - email: &str, - tier: Option<&str>, - health_score: f32, - reset_time: Option, - remaining_quota: Option, - ) -> ProxyToken { - ProxyToken { - account_id: email.to_string(), - access_token: "test_token".to_string(), - refresh_token: "test_refresh".to_string(), - expires_in: 3600, - timestamp: chrono::Utc::now().timestamp() + 3600, - email: email.to_string(), - account_path: PathBuf::from("/tmp/test"), - project_id: None, - subscription_tier: tier.map(|s| s.to_string()), - remaining_quota, - protected_models: HashSet::new(), - health_score, - reset_time, - validation_blocked: false, - validation_blocked_until: 0, - model_quotas: HashMap::new(), - } - } - - /// 测试排序比较函数(与 get_token_internal 中的逻辑一致) - fn compare_tokens(a: &ProxyToken, b: &ProxyToken) -> Ordering { - const RESET_TIME_THRESHOLD_SECS: i64 = 600; // 10 分钟阈值 - - let tier_priority = |tier: &Option| { - let t = tier.as_deref().unwrap_or("").to_lowercase(); - if t.contains("ultra") { 0 } - else if t.contains("pro") { 1 } - else if t.contains("free") { 2 } - else { 3 } - }; - - // First: compare by subscription tier - let tier_cmp = tier_priority(&a.subscription_tier).cmp(&tier_priority(&b.subscription_tier)); - if tier_cmp != Ordering::Equal { - return tier_cmp; - } - - // Second: compare by health score (higher is better) - let health_cmp = b.health_score.partial_cmp(&a.health_score).unwrap_or(Ordering::Equal); - if health_cmp != Ordering::Equal { - return health_cmp; - } - - // Third: compare by reset time (earlier/closer is better) - let reset_a = a.reset_time.unwrap_or(i64::MAX); - let reset_b = b.reset_time.unwrap_or(i64::MAX); - let reset_diff = (reset_a - reset_b).abs(); - - if reset_diff >= RESET_TIME_THRESHOLD_SECS { - let reset_cmp = reset_a.cmp(&reset_b); - if reset_cmp != Ordering::Equal { - return reset_cmp; - } - } - - // Fourth: compare by remaining quota percentage (higher is better) - let quota_a = a.remaining_quota.unwrap_or(0); - let quota_b = b.remaining_quota.unwrap_or(0); - quota_b.cmp("a_a) - } - - #[test] - fn test_sorting_tier_priority() { - // ULTRA > PRO > FREE - let ultra = create_test_token("ultra@test.com", Some("ULTRA"), 1.0, None, Some(50)); - let pro = create_test_token("pro@test.com", Some("PRO"), 1.0, None, Some(50)); - let free = create_test_token("free@test.com", Some("FREE"), 1.0, None, Some(50)); - - assert_eq!(compare_tokens(&ultra, &pro), Ordering::Less); - assert_eq!(compare_tokens(&pro, &free), Ordering::Less); - assert_eq!(compare_tokens(&ultra, &free), Ordering::Less); - assert_eq!(compare_tokens(&free, &ultra), Ordering::Greater); - } - - #[test] - fn test_sorting_health_score_priority() { - // 同等级下,健康分高的优先 - let high_health = create_test_token("high@test.com", Some("PRO"), 1.0, None, Some(50)); - let low_health = create_test_token("low@test.com", Some("PRO"), 0.5, None, Some(50)); - - assert_eq!(compare_tokens(&high_health, &low_health), Ordering::Less); - assert_eq!(compare_tokens(&low_health, &high_health), Ordering::Greater); - } - - #[test] - fn test_sorting_reset_time_priority() { - let now = chrono::Utc::now().timestamp(); - - // 刷新时间更近(30分钟后)的优先于更远(5小时后)的 - let soon_reset = create_test_token("soon@test.com", Some("PRO"), 1.0, Some(now + 1800), Some(50)); // 30分钟后 - let late_reset = create_test_token("late@test.com", Some("PRO"), 1.0, Some(now + 18000), Some(50)); // 5小时后 - - assert_eq!(compare_tokens(&soon_reset, &late_reset), Ordering::Less); - assert_eq!(compare_tokens(&late_reset, &soon_reset), Ordering::Greater); - } - - #[test] - fn test_sorting_reset_time_threshold() { - let now = chrono::Utc::now().timestamp(); - - // 差异小于10分钟(600秒)视为相同优先级,此时按配额排序 - let reset_a = create_test_token("a@test.com", Some("PRO"), 1.0, Some(now + 1800), Some(80)); // 30分钟后, 80%配额 - let reset_b = create_test_token("b@test.com", Some("PRO"), 1.0, Some(now + 2100), Some(50)); // 35分钟后, 50%配额 - - // 差5分钟 < 10分钟阈值,视为相同,按配额排序(80% > 50%) - assert_eq!(compare_tokens(&reset_a, &reset_b), Ordering::Less); - } - - #[test] - fn test_sorting_reset_time_beyond_threshold() { - let now = chrono::Utc::now().timestamp(); - - // 差异超过10分钟,按刷新时间排序(忽略配额) - let soon_low_quota = create_test_token("soon@test.com", Some("PRO"), 1.0, Some(now + 1800), Some(20)); // 30分钟后, 20% - let late_high_quota = create_test_token("late@test.com", Some("PRO"), 1.0, Some(now + 18000), Some(90)); // 5小时后, 90% - - // 差4.5小时 > 10分钟,刷新时间优先,30分钟 < 5小时 - assert_eq!(compare_tokens(&soon_low_quota, &late_high_quota), Ordering::Less); - } - - #[test] - fn test_sorting_quota_fallback() { - // 其他条件相同时,配额高的优先 - let high_quota = create_test_token("high@test.com", Some("PRO"), 1.0, None, Some(80)); - let low_quota = create_test_token("low@test.com", Some("PRO"), 1.0, None, Some(20)); - - assert_eq!(compare_tokens(&high_quota, &low_quota), Ordering::Less); - assert_eq!(compare_tokens(&low_quota, &high_quota), Ordering::Greater); - } - - #[test] - fn test_sorting_missing_reset_time() { - let now = chrono::Utc::now().timestamp(); - - // 没有 reset_time 的账号应该排在有 reset_time 的后面 - let with_reset = create_test_token("with@test.com", Some("PRO"), 1.0, Some(now + 1800), Some(50)); - let without_reset = create_test_token("without@test.com", Some("PRO"), 1.0, None, Some(50)); - - assert_eq!(compare_tokens(&with_reset, &without_reset), Ordering::Less); - } - - #[test] - fn test_full_sorting_integration() { - let now = chrono::Utc::now().timestamp(); - - let mut tokens = vec![ - create_test_token("free_high@test.com", Some("FREE"), 1.0, Some(now + 1800), Some(90)), - create_test_token("pro_low_health@test.com", Some("PRO"), 0.5, Some(now + 1800), Some(90)), - create_test_token("pro_soon@test.com", Some("PRO"), 1.0, Some(now + 1800), Some(50)), // 30分钟后 - create_test_token("pro_late@test.com", Some("PRO"), 1.0, Some(now + 18000), Some(90)), // 5小时后 - create_test_token("ultra@test.com", Some("ULTRA"), 1.0, Some(now + 36000), Some(10)), - ]; - - tokens.sort_by(compare_tokens); - - // 预期顺序: - // 1. ULTRA (最高等级,即使刷新时间最远) - // 2. PRO + 高健康分 + 30分钟后刷新 - // 3. PRO + 高健康分 + 5小时后刷新 - // 4. PRO + 低健康分 - // 5. FREE (最低等级,即使配额最高) - assert_eq!(tokens[0].email, "ultra@test.com"); - assert_eq!(tokens[1].email, "pro_soon@test.com"); - assert_eq!(tokens[2].email, "pro_late@test.com"); - assert_eq!(tokens[3].email, "pro_low_health@test.com"); - assert_eq!(tokens[4].email, "free_high@test.com"); - } - - #[test] - fn test_realistic_scenario() { - // 模拟用户描述的场景: - // a 账号 claude 4h55m 后刷新 - // b 账号 claude 31m 后刷新 - // 应该优先使用 b(31分钟后刷新) - let now = chrono::Utc::now().timestamp(); - - let account_a = create_test_token("a@test.com", Some("PRO"), 1.0, Some(now + 295 * 60), Some(80)); // 4h55m - let account_b = create_test_token("b@test.com", Some("PRO"), 1.0, Some(now + 31 * 60), Some(30)); // 31m - - // b 应该排在 a 前面(刷新时间更近) - assert_eq!(compare_tokens(&account_b, &account_a), Ordering::Less); - - let mut tokens = vec![account_a.clone(), account_b.clone()]; - tokens.sort_by(compare_tokens); - - assert_eq!(tokens[0].email, "b@test.com"); - assert_eq!(tokens[1].email, "a@test.com"); - } - - #[test] - fn test_extract_earliest_reset_time() { - let manager = TokenManager::new(PathBuf::from("/tmp/test")); - - // 测试包含 claude 模型的 reset_time 提取 - let account_with_claude = serde_json::json!({ - "quota": { - "models": [ - {"name": "gemini-flash", "reset_time": "2025-01-31T10:00:00Z"}, - {"name": "claude-sonnet", "reset_time": "2025-01-31T08:00:00Z"}, - {"name": "claude-opus", "reset_time": "2025-01-31T08:00:00Z"} - ] - } - }); - - let result = manager.extract_earliest_reset_time(&account_with_claude); - assert!(result.is_some()); - // 应该返回 claude 的时间(08:00)而不是 gemini 的(10:00) - let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-31T08:00:00Z") - .unwrap() - .timestamp(); - assert_eq!(result.unwrap(), expected_ts); - } - - #[test] - fn test_extract_reset_time_no_claude() { - let manager = TokenManager::new(PathBuf::from("/tmp/test")); - - // 没有 claude 模型时,应该取任意模型的最近时间 - let account_no_claude = serde_json::json!({ - "quota": { - "models": [ - {"name": "gemini-flash", "reset_time": "2025-01-31T10:00:00Z"}, - {"name": "gemini-pro", "reset_time": "2025-01-31T08:00:00Z"} - ] - } - }); - - let result = manager.extract_earliest_reset_time(&account_no_claude); - assert!(result.is_some()); - let expected_ts = chrono::DateTime::parse_from_rfc3339("2025-01-31T08:00:00Z") - .unwrap() - .timestamp(); - assert_eq!(result.unwrap(), expected_ts); - } - - #[test] - fn test_extract_reset_time_missing_quota() { - let manager = TokenManager::new(PathBuf::from("/tmp/test")); - - // 没有 quota 字段时应返回 None - let account_no_quota = serde_json::json!({ - "email": "test@test.com" - }); - - assert!(manager.extract_earliest_reset_time(&account_no_quota).is_none()); - } - - // ===== P2C 算法测试 ===== - - /// 创建带 protected_models 的测试 Token - fn create_test_token_with_protected( - email: &str, - remaining_quota: Option, - protected_models: HashSet, - ) -> ProxyToken { - ProxyToken { - account_id: email.to_string(), - access_token: "test_token".to_string(), - refresh_token: "test_refresh".to_string(), - expires_in: 3600, - timestamp: chrono::Utc::now().timestamp() + 3600, - email: email.to_string(), - account_path: PathBuf::from("/tmp/test"), - project_id: None, - subscription_tier: Some("PRO".to_string()), - remaining_quota, - protected_models, - health_score: 1.0, - reset_time: None, - validation_blocked: false, - validation_blocked_until: 0, - model_quotas: HashMap::new(), - } - } - - #[test] - fn test_p2c_selects_higher_quota() { - // P2C 应选择配额更高的账号 - let manager = TokenManager::new(PathBuf::from("/tmp/test")); - - let low_quota = create_test_token("low@test.com", Some("PRO"), 1.0, None, Some(20)); - let high_quota = create_test_token("high@test.com", Some("PRO"), 1.0, None, Some(80)); - - let candidates = vec![low_quota, high_quota]; - let attempted: HashSet = HashSet::new(); - - // 运行多次确保选择高配额账号 - for _ in 0..10 { - let result = manager.select_with_p2c(&candidates, &attempted, "claude-sonnet", false); - assert!(result.is_some()); - // P2C 从两个候选中选择配额更高的 - // 由于只有两个候选,应该总是选择 high_quota - assert_eq!(result.unwrap().email, "high@test.com"); - } - } - - #[test] - fn test_p2c_skips_attempted() { - // P2C 应跳过已尝试的账号 - let manager = TokenManager::new(PathBuf::from("/tmp/test")); - - let token_a = create_test_token("a@test.com", Some("PRO"), 1.0, None, Some(80)); - let token_b = create_test_token("b@test.com", Some("PRO"), 1.0, None, Some(50)); - - let candidates = vec![token_a, token_b]; - let mut attempted: HashSet = HashSet::new(); - attempted.insert("a@test.com".to_string()); - - let result = manager.select_with_p2c(&candidates, &attempted, "claude-sonnet", false); - assert!(result.is_some()); - assert_eq!(result.unwrap().email, "b@test.com"); - } - - #[test] - fn test_p2c_skips_protected_models() { - // P2C 应跳过对目标模型有保护的账号 (quota_protection_enabled = true) - let manager = TokenManager::new(PathBuf::from("/tmp/test")); - - let mut protected = HashSet::new(); - protected.insert("claude-sonnet".to_string()); - - let protected_account = create_test_token_with_protected("protected@test.com", Some(90), protected); - let normal_account = create_test_token_with_protected("normal@test.com", Some(50), HashSet::new()); - - let candidates = vec![protected_account, normal_account]; - let attempted: HashSet = HashSet::new(); - - let result = manager.select_with_p2c(&candidates, &attempted, "claude-sonnet", true); - assert!(result.is_some()); - assert_eq!(result.unwrap().email, "normal@test.com"); - } - - #[test] - fn test_p2c_single_candidate() { - // 单候选时直接返回 - let manager = TokenManager::new(PathBuf::from("/tmp/test")); - - let token = create_test_token("single@test.com", Some("PRO"), 1.0, None, Some(50)); - let candidates = vec![token]; - let attempted: HashSet = HashSet::new(); - - let result = manager.select_with_p2c(&candidates, &attempted, "claude-sonnet", false); - assert!(result.is_some()); - assert_eq!(result.unwrap().email, "single@test.com"); - } - - #[test] - fn test_p2c_empty_candidates() { - // 空候选返回 None - let manager = TokenManager::new(PathBuf::from("/tmp/test")); - - let candidates: Vec = vec![]; - let attempted: HashSet = HashSet::new(); - - let result = manager.select_with_p2c(&candidates, &attempted, "claude-sonnet", false); - assert!(result.is_none()); - } - - #[test] - fn test_p2c_all_attempted() { - // 所有账号都已尝试时返回 None - let manager = TokenManager::new(PathBuf::from("/tmp/test")); - - let token_a = create_test_token("a@test.com", Some("PRO"), 1.0, None, Some(80)); - let token_b = create_test_token("b@test.com", Some("PRO"), 1.0, None, Some(50)); - - let candidates = vec![token_a, token_b]; - let mut attempted: HashSet = HashSet::new(); - attempted.insert("a@test.com".to_string()); - attempted.insert("b@test.com".to_string()); - - let result = manager.select_with_p2c(&candidates, &attempted, "claude-sonnet", false); - assert!(result.is_none()); - } -} diff --git a/src-tauri/src/proxy/token_manager/loading.rs b/src-tauri/src/proxy/token_manager/loading.rs new file mode 100644 index 000000000..df42e1fbb --- /dev/null +++ b/src-tauri/src/proxy/token_manager/loading.rs @@ -0,0 +1,430 @@ +// Account Loading and Reloading Logic + +use super::manager::TokenManager; +use super::models::ProxyToken; +use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; +use std::sync::atomic::Ordering; + +impl TokenManager { + /// Extract earliest reset time from account quota models + /// + /// Claude models (sonnet/opus) share the same reset time, so we prioritize claude series. + /// Returns Unix timestamp (seconds) for sorting comparison. + fn extract_earliest_reset_time(&self, account: &serde_json::Value) -> Option { + let models = account + .get("quota") + .and_then(|q| q.get("models")) + .and_then(|m| m.as_array())?; + + let mut earliest_ts: Option = None; + + // First pass: prioritize Claude models + for model in models { + let model_name = model.get("name").and_then(|n| n.as_str()).unwrap_or(""); + if !model_name.contains("claude") { + continue; + } + + if let Some(reset_time_str) = model.get("reset_time").and_then(|r| r.as_str()) { + if reset_time_str.is_empty() { + continue; + } + // Parse ISO 8601 time string to timestamp + if let Ok(dt) = chrono::DateTime::parse_from_rfc3339(reset_time_str) { + let ts = dt.timestamp(); + if earliest_ts.is_none() || ts < earliest_ts.unwrap() { + earliest_ts = Some(ts); + } + } + } + } + + // Second pass: if no claude model time found, try any model + if earliest_ts.is_none() { + for model in models { + if let Some(reset_time_str) = model.get("reset_time").and_then(|r| r.as_str()) { + if reset_time_str.is_empty() { + continue; + } + if let Ok(dt) = chrono::DateTime::parse_from_rfc3339(reset_time_str) { + let ts = dt.timestamp(); + if earliest_ts.is_none() || ts < earliest_ts.unwrap() { + earliest_ts = Some(ts); + } + } + } + } + } + + earliest_ts + } + + /// Load all accounts from the accounts directory + /// [FIX] Only loads accounts that exist in account_index.json to prevent resurrection + pub async fn load_accounts(&self) -> Result { + let accounts_dir = self.data_dir.join("accounts"); + + if !accounts_dir.exists() { + return Err(format!("账号目录不存在: {:?}", accounts_dir)); + } + + self.tokens.clear(); + self.current_index.store(0, Ordering::SeqCst); + { + let mut last_used = self.last_used_account.lock().await; + *last_used = None; + } + + // [FIX] Load valid account IDs from index FIRST to prevent loading deleted accounts + let valid_ids: HashSet = match crate::modules::account::storage::load_account_index() { + Ok(index) => index.accounts.iter().map(|s| s.id.clone()).collect(), + Err(e) => { + tracing::warn!("Failed to load account index, falling back to directory scan: {}", e); + HashSet::new() + } + }; + + let use_index_filter = !valid_ids.is_empty(); + + let entries = std::fs::read_dir(&accounts_dir) + .map_err(|e| format!("读取账号目录失败: {}", e))?; + + let mut count = 0; + + for entry in entries { + let entry = entry.map_err(|e| format!("读取目录项失败: {}", e))?; + let path = entry.path(); + + if path.extension().and_then(|s| s.to_str()) != Some("json") { + continue; + } + + // [FIX] Skip files not in account_index.json (orphaned/deleted accounts) + if use_index_filter { + let file_stem = path.file_stem() + .and_then(|s| s.to_str()) + .unwrap_or(""); + + if !valid_ids.contains(file_stem) { + tracing::debug!( + "Skipping orphaned account file (not in index): {:?}", + path + ); + continue; + } + } + + match self.load_single_account(&path).await { + Ok(Some(token)) => { + let account_id = token.account_id.clone(); + self.tokens.insert(account_id, token); + count += 1; + } + Ok(None) => {} + Err(e) => { + tracing::debug!("加载账号失败 {:?}: {}", path, e); + } + } + } + + Ok(count) + } + + /// Reload a specific account + pub async fn reload_account(&self, account_id: &str) -> Result<(), String> { + let path = self + .data_dir + .join("accounts") + .join(format!("{}.json", account_id)); + if !path.exists() { + return Err(format!("账号文件不存在: {:?}", path)); + } + + match self.load_single_account(&path).await { + Ok(Some(token)) => { + self.tokens.insert(account_id.to_string(), token); + self.clear_rate_limit(account_id); + Ok(()) + } + Ok(None) => Err("账号加载失败".to_string()), + Err(e) => Err(format!("同步账号失败: {}", e)), + } + } + + /// Reload all accounts + pub async fn reload_all_accounts(&self) -> Result { + let count = self.load_accounts().await?; + self.clear_all_rate_limits(); + Ok(count) + } + + /// Load a single account from file + pub(crate) async fn load_single_account( + &self, + path: &PathBuf, + ) -> Result, String> { + let content = + std::fs::read_to_string(path).map_err(|e| format!("读取文件失败: {}", e))?; + + let mut account: serde_json::Value = + serde_json::from_str(&content).map_err(|e| format!("解析 JSON 失败: {}", e))?; + + // Check disabled status + if account + .get("disabled") + .and_then(|v| v.as_bool()) + .unwrap_or(false) + { + tracing::debug!( + "Skipping disabled account file: {:?} (email={})", + path, + account + .get("email") + .and_then(|v| v.as_str()) + .unwrap_or("") + ); + return Ok(None); + } + + // [FIX #1344] Check manual proxy disable BEFORE quota protection + // This ensures manually disabled accounts (non-quota reasons) are skipped first + let is_proxy_disabled = account + .get("proxy_disabled") + .and_then(|v| v.as_bool()) + .unwrap_or(false); + + let disabled_reason = account + .get("proxy_disabled_reason") + .and_then(|v| v.as_str()) + .unwrap_or(""); + + if is_proxy_disabled && disabled_reason != "quota_protection" { + // Account was manually disabled (non-quota protection reason) + tracing::debug!( + "Account skipped due to manual disable: {:?} (email={}, reason={})", + path, + account + .get("email") + .and_then(|v| v.as_str()) + .unwrap_or(""), + disabled_reason + ); + return Ok(None); + } + + // Quota protection check - only handles quota protection logic + // This allows auto-recovery of accounts whose quota has been restored + if self.check_and_protect_quota(&mut account, path).await { + tracing::debug!( + "Account skipped due to quota protection: {:?} (email={})", + path, + account + .get("email") + .and_then(|v| v.as_str()) + .unwrap_or("") + ); + return Ok(None); + } + + // [Compatibility] Re-check proxy_disabled after quota protection migration + // If account was disabled by old quota protection but quota restored, above check clears it + // This ensures we don't load accounts that are still disabled + if account + .get("proxy_disabled") + .and_then(|v| v.as_bool()) + .unwrap_or(false) + { + tracing::debug!( + "Skipping proxy-disabled account file: {:?} (email={})", + path, + account + .get("email") + .and_then(|v| v.as_str()) + .unwrap_or("") + ); + return Ok(None); + } + + // [FIX] Skip forbidden accounts (quota.is_forbidden) to prevent reselection after restart + if account + .get("quota") + .and_then(|q| q.get("is_forbidden")) + .and_then(|v| v.as_bool()) + .unwrap_or(false) + { + tracing::debug!( + "Skipping forbidden account file: {:?} (email={})", + path, + account + .get("email") + .and_then(|v| v.as_str()) + .unwrap_or("") + ); + return Ok(None); + } + + // Check validation block + if account + .get("validation_blocked") + .and_then(|v| v.as_bool()) + .unwrap_or(false) + { + let block_until = account + .get("validation_blocked_until") + .and_then(|v| v.as_i64()) + .unwrap_or(0); + + let now = chrono::Utc::now().timestamp(); + + if now < block_until { + tracing::debug!( + "Skipping validation-blocked account: {:?} (email={}, blocked until {})", + path, + account + .get("email") + .and_then(|v| v.as_str()) + .unwrap_or(""), + chrono::DateTime::from_timestamp(block_until, 0) + .map(|dt| dt.format("%H:%M:%S").to_string()) + .unwrap_or_else(|| block_until.to_string()) + ); + return Ok(None); + } else { + tracing::info!( + "Validation block expired for account: {:?} (email={}), clearing...", + path, + account + .get("email") + .and_then(|v| v.as_str()) + .unwrap_or("") + ); + account["validation_blocked"] = serde_json::Value::Bool(false); + account["validation_blocked_until"] = serde_json::Value::Null; + account["validation_blocked_reason"] = serde_json::Value::Null; + + if let Ok(json_str) = serde_json::to_string_pretty(&account) { + let _ = std::fs::write(path, json_str); + } + } + } + + // Extract required fields + let account_id = account["id"] + .as_str() + .ok_or("缺少 id 字段")? + .to_string(); + + let email = account["email"] + .as_str() + .ok_or("缺少 email 字段")? + .to_string(); + + let token_obj = account["token"].as_object().ok_or("缺少 token 字段")?; + + let access_token = token_obj["access_token"] + .as_str() + .ok_or("缺少 access_token")? + .to_string(); + + let refresh_token = token_obj["refresh_token"] + .as_str() + .ok_or("缺少 refresh_token")? + .to_string(); + + let expires_in = token_obj["expires_in"] + .as_i64() + .ok_or("缺少 expires_in")?; + + let timestamp = token_obj["expiry_timestamp"] + .as_i64() + .ok_or("缺少 expiry_timestamp")?; + + let project_id = token_obj + .get("project_id") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + + let subscription_tier = account + .get("quota") + .and_then(|q| q.get("subscription_tier")) + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + + let remaining_quota = account + .get("quota") + .and_then(|q| self.calculate_quota_stats(q)); + + let protected_models: HashSet = account + .get("protected_models") + .and_then(|v| v.as_array()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str()) + .map(|s| s.to_string()) + .collect() + }) + .unwrap_or_default(); + + let mut model_quotas = HashMap::new(); + if let Some(models) = account + .get("quota") + .and_then(|q| q.get("models")) + .and_then(|m| m.as_array()) + { + for m in models { + if let (Some(name), Some(pct)) = ( + m.get("name").and_then(|v| v.as_str()), + m.get("percentage").and_then(|v| v.as_i64()), + ) { + model_quotas.insert(name.to_string(), pct as i32); + } + } + } + + let health_score = self + .health_scores + .get(&account_id) + .map(|v| *v) + .unwrap_or(1.0); + + Ok(Some(ProxyToken { + account_id, + access_token, + refresh_token, + expires_in, + timestamp, + email, + account_path: path.clone(), + project_id, + subscription_tier, + remaining_quota, + protected_models, + health_score, + model_quotas, + verification_needed: account + .get("verification_needed") + .and_then(|v| v.as_bool()) + .unwrap_or(false), + verification_url: account + .get("verification_url") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + // [FIX] Extract reset_time from quota.models (not from root!) + reset_time: self.extract_earliest_reset_time(&account), + validation_blocked: account + .get("validation_blocked") + .and_then(|v| v.as_bool()) + .unwrap_or(false), + validation_blocked_until: account + .get("validation_blocked_until") + .and_then(|v| v.as_i64()) + .unwrap_or(0), + is_forbidden: account + .get("quota") + .and_then(|q| q.get("is_forbidden")) + .and_then(|v| v.as_bool()) + .unwrap_or(false), + })) + } +} diff --git a/src-tauri/src/proxy/token_manager/manager.rs b/src-tauri/src/proxy/token_manager/manager.rs new file mode 100644 index 000000000..d2802dbd9 --- /dev/null +++ b/src-tauri/src/proxy/token_manager/manager.rs @@ -0,0 +1,463 @@ +// Token Manager Core Structure + +use dashmap::DashMap; +use std::path::PathBuf; +use std::sync::atomic::AtomicUsize; +use std::sync::Arc; +use tokio_util::sync::CancellationToken; + +use super::models::ProxyToken; +use crate::proxy::rate_limit::RateLimitTracker; +use crate::proxy::sticky_config::StickySessionConfig; + +/// Central token manager for Google account pool +pub struct TokenManager { + pub(crate) tokens: Arc>, + pub(crate) current_index: Arc, + pub(crate) last_used_account: Arc>>, + pub(crate) data_dir: PathBuf, + pub(crate) rate_limit_tracker: Arc, + pub(crate) sticky_config: Arc>, + pub(crate) session_accounts: Arc>, + pub(crate) health_scores: Arc>, + pub(crate) active_requests: Arc>, + pub(crate) circuit_breaker_config: Arc>, + pub circuit_breaker: DashMap, + /// [FIX #820] Preferred account ID for fixed account mode + pub(crate) preferred_account_id: Arc>>, + /// [NEW] Cancellation token for graceful shutdown + cancel_token: CancellationToken, + /// [NEW] Handle for auto-cleanup background task + auto_cleanup_handle: Arc>>>, +} + +impl TokenManager { + /// Create a new TokenManager + pub fn new(data_dir: PathBuf) -> Self { + Self { + tokens: Arc::new(DashMap::new()), + current_index: Arc::new(AtomicUsize::new(0)), + last_used_account: Arc::new(tokio::sync::Mutex::new(None)), + data_dir, + rate_limit_tracker: Arc::new(RateLimitTracker::new()), + sticky_config: Arc::new(tokio::sync::RwLock::new(StickySessionConfig::default())), + session_accounts: Arc::new(DashMap::new()), + health_scores: Arc::new(DashMap::new()), + active_requests: Arc::new(DashMap::new()), + circuit_breaker_config: Arc::new(tokio::sync::RwLock::new( + crate::models::CircuitBreakerConfig::default(), + )), + circuit_breaker: DashMap::new(), + preferred_account_id: Arc::new(tokio::sync::RwLock::new(None)), + cancel_token: CancellationToken::new(), + auto_cleanup_handle: Arc::new(tokio::sync::Mutex::new(None)), + } + } + + /// Get the number of loaded tokens + pub fn len(&self) -> usize { + self.tokens.len() + } + + /// Check if token pool is empty + pub fn is_empty(&self) -> bool { + self.tokens.is_empty() + } + + /// Start auto-cleanup background task with cancellation support + pub async fn start_auto_cleanup(&self) { + let tracker = self.rate_limit_tracker.clone(); + let session_map = self.session_accounts.clone(); + let circuit_breaker_clone = self.circuit_breaker.clone(); + let cancel = self.cancel_token.child_token(); + + let handle = tokio::spawn(async move { + let mut interval = tokio::time::interval(std::time::Duration::from_secs(15)); + let mut session_cleanup_interval = 0; + + loop { + tokio::select! { + _ = cancel.cancelled() => { + tracing::info!("Auto-cleanup task received cancel signal"); + break; + } + _ = interval.tick() => { + let cleaned = tracker.cleanup_expired(); + if cleaned > 0 { + tracing::info!( + "🧹 Auto-cleanup: Removed {} expired rate limit record(s)", + cleaned + ); + } + + // Clean expired circuit breaker records + let now = std::time::Instant::now(); + let mut cb_cleaned = 0; + circuit_breaker_clone.retain(|_, (fail_time, _)| { + if now.duration_since(*fail_time).as_secs() > 600 { + cb_cleaned += 1; + false + } else { + true + } + }); + if cb_cleaned > 0 { + tracing::info!( + "🔓 Circuit Breaker: Unblocked {} recovered accounts", + cb_cleaned + ); + } + + // Session cleanup every 10 mins + session_cleanup_interval += 1; + if session_cleanup_interval >= 40 { + session_cleanup_interval = 0; + let now = std::time::Instant::now(); + let expiry = std::time::Duration::from_secs(24 * 3600); + let mut removed_sessions = 0; + + session_map.retain(|_, (_, ts)| { + if now.duration_since(*ts) > expiry { + removed_sessions += 1; + false + } else { + true + } + }); + + if removed_sessions > 0 { + tracing::info!( + "🧹 Session Cleanup: Removed {} expired sessions", + removed_sessions + ); + } + } + } + } + } + }); + + // Abort old task if exists (prevent task leak), then store new handle + let mut guard = self.auto_cleanup_handle.lock().await; + if let Some(old) = guard.take() { + old.abort(); + tracing::warn!("Aborted previous auto-cleanup task"); + } + *guard = Some(handle); + + tracing::info!("✅ Rate limit & Session auto-cleanup task started"); + } + + /// Graceful shutdown with timeout + /// + /// # Arguments + /// * `timeout` - Maximum time to wait for tasks to complete + pub async fn graceful_shutdown(&self, timeout: std::time::Duration) { + tracing::info!("Initiating graceful shutdown of background tasks..."); + + // Send cancel signal to all background tasks + self.cancel_token.cancel(); + + // Wait for tasks to complete with timeout + match tokio::time::timeout(timeout, self.abort_background_tasks()).await { + Ok(_) => tracing::info!("All background tasks cleaned up gracefully"), + Err(_) => tracing::warn!( + "Graceful cleanup timed out after {:?}, tasks were force-aborted", + timeout + ), + } + } + + /// Abort and wait for all background tasks to complete + pub async fn abort_background_tasks(&self) { + Self::abort_task(&self.auto_cleanup_handle, "Auto-cleanup task").await; + } + + /// Abort a single background task and log the result + /// + /// # Arguments + /// * `handle` - Mutex reference to the task handle + /// * `task_name` - Task name for logging + async fn abort_task( + handle: &tokio::sync::Mutex>>, + task_name: &str, + ) { + let Some(handle) = handle.lock().await.take() else { + return; + }; + + handle.abort(); + match handle.await { + Ok(()) => tracing::debug!("{} completed", task_name), + Err(e) if e.is_cancelled() => tracing::info!("{} aborted", task_name), + Err(e) => tracing::warn!("{} error: {}", task_name, e), + } + } + + /// Update circuit breaker configuration at runtime + pub async fn update_circuit_breaker_config(&self, config: crate::models::CircuitBreakerConfig) { + let mut w = self.circuit_breaker_config.write().await; + *w = config; + tracing::info!("🛡️ Circuit Breaker config updated: enabled={}", w.enabled); + } + + /// Get circuit breaker configuration + pub async fn get_circuit_breaker_config(&self) -> crate::models::CircuitBreakerConfig { + self.circuit_breaker_config.read().await.clone() + } + + /// Report account failure for circuit breaker + pub fn report_account_failure(&self, account_id: &str, status_code: u16, error_msg: &str) { + let should_block = matches!(status_code, 402 | 429 | 401); + + if should_block { + let now = std::time::Instant::now(); + self.circuit_breaker.insert( + account_id.to_string(), + (now, format!("Error {}: {}", status_code, error_msg)), + ); + tracing::warn!( + "🚫 [Circuit Breaker] Blocking account {} due to error {}: {}", + account_id, + status_code, + error_msg + ); + } + } + + /// Report account needing validation (Gemini 403 VALIDATION_REQUIRED) + pub fn report_account_validation_required(&self, account_id: &str, verification_url: &str) { + // [FIX] Check if account exists in index before writing + let exists = match crate::modules::account::storage::load_account_index() { + Ok(index) => index.accounts.iter().any(|s| s.id == account_id), + Err(_) => false, + }; + + if !exists { + tracing::warn!("report_account_validation_required: Account {} not in index, skipping", account_id); + self.tokens.remove(account_id); + return; + } + + if let Some(mut token) = self.tokens.get_mut(account_id) { + token.verification_needed = true; + token.verification_url = Some(verification_url.to_string()); + tracing::warn!("⚠️ Account {} marked as needing verification", token.email); + + let path = token.account_path.clone(); + drop(token); + + let url = verification_url.to_string(); + let aid = account_id.to_string(); + + tokio::task::spawn_blocking(move || { + match std::fs::read_to_string(&path) { + Ok(content) => { + if let Ok(mut json) = serde_json::from_str::(&content) { + json["proxy_disabled"] = serde_json::Value::Bool(true); + json["proxy_disabled_reason"] = + serde_json::Value::String("verification_required".to_string()); + json["verification_needed"] = serde_json::Value::Bool(true); + json["verification_url"] = serde_json::Value::String(url); + + if let Ok(new_content) = serde_json::to_string_pretty(&json) { + let _ = std::fs::write(&path, new_content); + tracing::info!( + "💾 Account {} updated on disk (Verification Required)", + aid + ); + } + } + } + Err(e) => { + tracing::error!("Failed to update account file for verification: {}", e) + } + } + }); + } + } + + /// Record successful request - increase health score + pub fn record_success(&self, account_id: &str) { + self.health_scores + .entry(account_id.to_string()) + .and_modify(|s| *s = (*s + 0.05).min(1.0)) + .or_insert(1.0); + tracing::debug!("📈 Health score increased for account {}", account_id); + } + + /// Record failed request - decrease health score + pub fn record_failure(&self, account_id: &str) { + self.health_scores + .entry(account_id.to_string()) + .and_modify(|s| *s = (*s - 0.2).max(0.0)) + .or_insert(0.8); + tracing::warn!("📉 Health score decreased for account {}", account_id); + } + + /// Report 429 penalty - heavily decrease health score + pub fn report_429_penalty(&self, account_id: &str) { + if let Some(mut score) = self.health_scores.get_mut(account_id) { + let old_score = *score; + *score = (*score * 0.5).max(0.01); + tracing::warn!( + "⚠️ Account {} hit 429! Health penalty: {:.2} -> {:.2}", + account_id, + old_score, + *score + ); + } + } + + /// Get account ID by email + pub fn get_account_id_by_email(&self, email: &str) -> Option { + for entry in self.tokens.iter() { + if entry.email == email { + return Some(entry.account_id.clone()); + } + } + None + } + + /// Convert email to account_id (internal helper) + pub(crate) fn email_to_account_id(&self, email: &str) -> Option { + self.tokens + .iter() + .find(|entry| entry.value().email == email) + .map(|entry| entry.value().account_id.clone()) + } + + /// Get effective account count (considering scheduling mode) + pub async fn effective_len(&self) -> usize { + let config = self.sticky_config.read().await; + if matches!( + config.mode, + crate::proxy::sticky_config::SchedulingMode::Selected + ) { + config.selected_accounts.len() + } else { + self.tokens.len() + } + } + + // ========================================================================= + // [FIX #820] Preferred Account Management + // ========================================================================= + + /// Set preferred account ID (fixed account mode) + pub async fn set_preferred_account(&self, account_id: Option) { + let mut preferred = self.preferred_account_id.write().await; + if let Some(ref id) = account_id { + tracing::info!("[FIX #820] Preferred account set to: {}", id); + } else { + tracing::info!("[FIX #820] Preferred account cleared"); + } + *preferred = account_id; + } + + /// Get current preferred account ID + pub async fn get_preferred_account(&self) -> Option { + self.preferred_account_id.read().await.clone() + } + + // ========================================================================= + // [FIX] Account Removal - Prevent resurrection after delete + // ========================================================================= + + /// Remove account from TokenManager completely + /// + /// This must be called BEFORE deleting the account file to prevent + /// race conditions where persist_token() could recreate the account. + pub fn remove_account(&self, account_id: &str) { + // Remove from token pool + if self.tokens.remove(account_id).is_some() { + tracing::info!("🗑️ Removed account {} from token pool", account_id); + } + + // Remove health score + self.health_scores.remove(account_id); + + // Remove from circuit breaker + self.circuit_breaker.remove(account_id); + + // Remove active request counter + self.active_requests.remove(account_id); + + // Clear any session bindings to this account + self.session_accounts.retain(|_, (aid, _)| aid != account_id); + } + + /// Remove multiple accounts from TokenManager + pub fn remove_accounts(&self, account_ids: &[String]) { + for account_id in account_ids { + self.remove_account(account_id); + } + tracing::info!("🗑️ Batch removed {} accounts from token pool", account_ids.len()); + } + + /// Check if account exists in token pool + pub fn has_account(&self, account_id: &str) -> bool { + self.tokens.contains_key(account_id) + } + + // ========================================================================= + // [FIX #1585] Forbidden Account Management (403 handling) + // ========================================================================= + + /// Set is_forbidden status for an account (called when proxy encounters 403) + /// This marks the account as forbidden and clears any sticky session bindings + pub async fn set_forbidden(&self, account_id: &str, reason: &str) -> Result<(), String> { + // 1. Persist to disk - update quota.is_forbidden in account JSON + let path = self.data_dir.join("accounts").join(format!("{}.json", account_id)); + if !path.exists() { + return Err(format!("Account file not found: {:?}", path)); + } + + let content = std::fs::read_to_string(&path) + .map_err(|e| format!("Failed to read account file: {}", e))?; + + let mut account: serde_json::Value = serde_json::from_str(&content) + .map_err(|e| format!("Failed to parse account JSON: {}", e))?; + + // Update quota.is_forbidden + if let Some(quota) = account.get_mut("quota") { + quota["is_forbidden"] = serde_json::Value::Bool(true); + } else { + // Create quota object if not exists + account["quota"] = serde_json::json!({ + "models": [], + "last_updated": chrono::Utc::now().timestamp(), + "is_forbidden": true + }); + } + + // 2. Clear sticky session bindings for this account + self.session_accounts.retain(|_, (aid, _)| aid != account_id); + + // 3. Remove from active token pool immediately to prevent any further selection + self.remove_account(account_id); + + let json_str = serde_json::to_string_pretty(&account) + .map_err(|e| format!("Failed to serialize account JSON: {}", e))?; + + std::fs::write(&path, json_str) + .map_err(|e| format!("Failed to write account file: {}", e))?; + + tracing::warn!( + "🚫 Account {} marked as forbidden (403): {}", + account_id, + truncate_reason(reason, 100) + ); + + Ok(()) + } +} + +/// Truncate long reason strings +pub(crate) fn truncate_reason(reason: &str, max_len: usize) -> String { + if reason.len() <= max_len { + reason.to_string() + } else { + format!("{}...", &reason[..max_len - 3]) + } +} diff --git a/src-tauri/src/proxy/token_manager/mod.rs b/src-tauri/src/proxy/token_manager/mod.rs new file mode 100644 index 000000000..b05d04203 --- /dev/null +++ b/src-tauri/src/proxy/token_manager/mod.rs @@ -0,0 +1,14 @@ +// Token Manager Module +// Handles OAuth token lifecycle, account selection, rate limiting, and scheduling + +mod models; +mod manager; +mod loading; +mod quota; +mod selection; // Now a directory module with submodules +mod rate_limiting; +mod persistence; +mod scheduling; + +// Re-export main types +pub use manager::TokenManager; diff --git a/src-tauri/src/proxy/token_manager/models.rs b/src-tauri/src/proxy/token_manager/models.rs new file mode 100644 index 000000000..76286d890 --- /dev/null +++ b/src-tauri/src/proxy/token_manager/models.rs @@ -0,0 +1,54 @@ +// Token Manager Data Models + +use dashmap::DashMap; +use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; + +/// RAII Guard for token lease - automatically releases connection on drop +#[derive(Debug)] +pub struct TokenLease { + pub access_token: String, + pub project_id: String, + pub email: String, + pub account_id: String, + pub(crate) active_requests: Arc>, +} + +impl Drop for TokenLease { + fn drop(&mut self) { + if let Some(counter) = self.active_requests.get(&self.account_id) { + counter.fetch_sub(1, Ordering::SeqCst); + tracing::debug!( + "⬇️ Connection released: {} (active: {})", + self.email, + counter.load(Ordering::SeqCst) + ); + } + } +} + +/// Represents a proxy-enabled Google account token +#[derive(Debug, Clone)] +pub struct ProxyToken { + pub account_id: String, + pub access_token: String, + pub refresh_token: String, + pub expires_in: i64, + pub timestamp: i64, + pub email: String, + pub account_path: PathBuf, + pub project_id: Option, + pub subscription_tier: Option, // "FREE" | "PRO" | "ULTRA" + pub remaining_quota: Option, + pub protected_models: HashSet, + pub health_score: f32, + pub model_quotas: HashMap, + pub verification_needed: bool, + pub verification_url: Option, + pub reset_time: Option, // [FIX] Quota reset timestamp for priority sorting + pub validation_blocked: bool, // [FIX] Temporary block for VALIDATION_REQUIRED + pub validation_blocked_until: i64, // [FIX] Timestamp until which account is blocked + pub is_forbidden: bool, +} diff --git a/src-tauri/src/proxy/token_manager/persistence.rs b/src-tauri/src/proxy/token_manager/persistence.rs new file mode 100644 index 000000000..e1561ec1a --- /dev/null +++ b/src-tauri/src/proxy/token_manager/persistence.rs @@ -0,0 +1,379 @@ +// Token Persistence Logic + +use super::manager::{truncate_reason, TokenManager}; + +impl TokenManager { + /// [FIX] Check if account exists in index before any file operation + fn account_exists_in_index(account_id: &str) -> bool { + match crate::modules::account::storage::load_account_index() { + Ok(index) => index.accounts.iter().any(|s| s.id == account_id), + Err(_) => false, + } + } + + /// Save project ID to account file + pub(crate) async fn save_project_id( + &self, + account_id: &str, + project_id: &str, + ) -> Result<(), String> { + // [FIX] Check if account exists in index before saving + if !Self::account_exists_in_index(account_id) { + tracing::warn!("save_project_id: Account {} not in index, skipping", account_id); + return Ok(()); + } + + let entry = self.tokens.get(account_id).ok_or("账号不存在")?; + + let path = &entry.account_path; + + let mut content: serde_json::Value = serde_json::from_str( + &std::fs::read_to_string(path).map_err(|e| format!("读取文件失败: {}", e))?, + ) + .map_err(|e| format!("解析 JSON 失败: {}", e))?; + + content["token"]["project_id"] = serde_json::Value::String(project_id.to_string()); + + let json_str = serde_json::to_string_pretty(&content) + .map_err(|e| format!("序列化 JSON 失败: {}", e))?; + + std::fs::write(path, json_str).map_err(|e| format!("写入文件失败: {}", e))?; + + tracing::debug!("已保存 project_id 到账号 {}", account_id); + Ok(()) + } + + /// Clear cached project ID from memory and account file + pub async fn clear_project_id_cache(&self, account_id: &str) -> Result<(), String> { + if !Self::account_exists_in_index(account_id) { + tracing::warn!( + "clear_project_id_cache: Account {} not in index, skipping", + account_id + ); + self.tokens.remove(account_id); + return Ok(()); + } + + if let Some(mut entry) = self.tokens.get_mut(account_id) { + entry.project_id = None; + } + + let path = if let Some(entry) = self.tokens.get(account_id) { + entry.account_path.clone() + } else { + self.data_dir + .join("accounts") + .join(format!("{}.json", account_id)) + }; + + let mut content: serde_json::Value = serde_json::from_str( + &std::fs::read_to_string(&path).map_err(|e| format!("读取文件失败: {}", e))?, + ) + .map_err(|e| format!("解析 JSON 失败: {}", e))?; + + if let Some(token_obj) = content.get_mut("token").and_then(|v| v.as_object_mut()) { + token_obj.insert("project_id".to_string(), serde_json::Value::Null); + } else { + return Err("账号文件缺少 token 对象,无法清理 project_id 缓存".to_string()); + } + + let json_str = serde_json::to_string_pretty(&content) + .map_err(|e| format!("序列化 JSON 失败: {}", e))?; + + std::fs::write(&path, json_str).map_err(|e| format!("写入文件失败: {}", e))?; + tracing::warn!("Cleared cached project_id for account {} ({:?})", account_id, path); + Ok(()) + } + + /// Save refreshed token to account file + pub(crate) async fn save_refreshed_token( + &self, + account_id: &str, + token_response: &crate::modules::oauth::TokenResponse, + ) -> Result<(), String> { + // [FIX] Check if account exists in index before saving + if !Self::account_exists_in_index(account_id) { + tracing::warn!("save_refreshed_token: Account {} not in index, skipping", account_id); + return Ok(()); + } + + let entry = self.tokens.get(account_id).ok_or("账号不存在")?; + + let path = &entry.account_path; + + let mut content: serde_json::Value = serde_json::from_str( + &std::fs::read_to_string(path).map_err(|e| format!("读取文件失败: {}", e))?, + ) + .map_err(|e| format!("解析 JSON 失败: {}", e))?; + + let now = chrono::Utc::now().timestamp(); + + content["token"]["access_token"] = + serde_json::Value::String(token_response.access_token.clone()); + content["token"]["expires_in"] = + serde_json::Value::Number(token_response.expires_in.into()); + content["token"]["expiry_timestamp"] = + serde_json::Value::Number((now + token_response.expires_in).into()); + + let json_str = serde_json::to_string_pretty(&content) + .map_err(|e| format!("序列化 JSON 失败: {}", e))?; + + std::fs::write(path, json_str).map_err(|e| format!("写入文件失败: {}", e))?; + + tracing::debug!("已保存刷新后的 token 到账号 {}", account_id); + Ok(()) + } + + /// Disable an account + pub(crate) async fn disable_account( + &self, + account_id: &str, + reason: &str, + ) -> Result<(), String> { + // [FIX] Check if account exists in index before saving + if !Self::account_exists_in_index(account_id) { + tracing::warn!("disable_account: Account {} not in index, skipping", account_id); + self.tokens.remove(account_id); + return Ok(()); + } + + let path = if let Some(entry) = self.tokens.get(account_id) { + entry.account_path.clone() + } else { + self.data_dir + .join("accounts") + .join(format!("{}.json", account_id)) + }; + + let mut content: serde_json::Value = serde_json::from_str( + &std::fs::read_to_string(&path).map_err(|e| format!("读取文件失败: {}", e))?, + ) + .map_err(|e| format!("解析 JSON 失败: {}", e))?; + + let now = chrono::Utc::now().timestamp(); + content["disabled"] = serde_json::Value::Bool(true); + content["disabled_at"] = serde_json::Value::Number(now.into()); + content["disabled_reason"] = serde_json::Value::String(truncate_reason(reason, 800)); + + let json_str = serde_json::to_string_pretty(&content) + .map_err(|e| format!("序列化 JSON 失败: {}", e))?; + + std::fs::write(&path, json_str).map_err(|e| format!("写入文件失败: {}", e))?; + + self.tokens.remove(account_id); + + tracing::warn!("Account disabled: {} ({:?})", account_id, path); + Ok(()) + } + + /// Set validation block for an account + pub async fn set_validation_block_public( + &self, + account_id: &str, + block_until: i64, + reason: &str, + ) -> Result<(), String> { + self.set_validation_block(account_id, block_until, reason) + .await + } + + /// Internal validation block setter + async fn set_validation_block( + &self, + account_id: &str, + block_until: i64, + reason: &str, + ) -> Result<(), String> { + // [FIX] Check if account exists in index before saving + if !Self::account_exists_in_index(account_id) { + tracing::warn!("set_validation_block: Account {} not in index, skipping", account_id); + self.tokens.remove(account_id); + return Ok(()); + } + + let path = if let Some(entry) = self.tokens.get(account_id) { + entry.account_path.clone() + } else { + self.data_dir + .join("accounts") + .join(format!("{}.json", account_id)) + }; + + let mut content: serde_json::Value = serde_json::from_str( + &std::fs::read_to_string(&path).map_err(|e| format!("读取文件失败: {}", e))?, + ) + .map_err(|e| format!("解析 JSON 失败: {}", e))?; + + content["validation_blocked"] = serde_json::Value::Bool(true); + content["validation_blocked_until"] = serde_json::Value::Number(block_until.into()); + content["validation_blocked_reason"] = + serde_json::Value::String(truncate_reason(reason, 500)); + + let json_str = serde_json::to_string_pretty(&content) + .map_err(|e| format!("序列化 JSON 失败: {}", e))?; + + std::fs::write(&path, json_str).map_err(|e| format!("写入文件失败: {}", e))?; + + self.tokens.remove(account_id); + + tracing::warn!( + "Account validation blocked until {}: {} ({:?})", + chrono::DateTime::from_timestamp(block_until, 0) + .map(|dt| dt.format("%Y-%m-%d %H:%M:%S").to_string()) + .unwrap_or_else(|| block_until.to_string()), + account_id, + path + ); + Ok(()) + } + + /// Get token by email (for warmup scenarios) + pub async fn get_token_by_email( + &self, + email: &str, + ) -> Result<(String, String, String, u64), String> { + let token_info = { + let mut found = None; + for entry in self.tokens.iter() { + let token = entry.value(); + if token.email == email { + found = Some(( + token.account_id.clone(), + token.access_token.clone(), + token.refresh_token.clone(), + token.timestamp, + token.expires_in, + chrono::Utc::now().timestamp(), + token.project_id.clone(), + )); + break; + } + } + found + }; + + let (account_id, current_access_token, refresh_token, timestamp, expires_in, now, project_id_opt) = + match token_info { + Some(info) => info, + None => return Err(format!("未找到账号: {}", email)), + }; + + let project_id = project_id_opt + .unwrap_or_else(|| crate::proxy::project_resolver::DEFAULT_PROJECT_ID.to_string()); + + if now < timestamp + expires_in - 300 { + return Ok((current_access_token, project_id, email.to_string(), 0)); + } + + tracing::info!("[Warmup] Token for {} is expiring, refreshing...", email); + + match crate::modules::oauth::refresh_access_token(&refresh_token, Some(&account_id)).await { + Ok(token_response) => { + tracing::info!("[Warmup] Token refresh successful for {}", email); + let new_now = chrono::Utc::now().timestamp(); + + if let Some(mut entry) = self.tokens.get_mut(&account_id) { + entry.access_token = token_response.access_token.clone(); + entry.expires_in = token_response.expires_in; + entry.timestamp = new_now; + } + + let _ = self + .save_refreshed_token(&account_id, &token_response) + .await; + + Ok(( + token_response.access_token, + project_id, + email.to_string(), + 0, + )) + } + Err(e) => Err(format!( + "[Warmup] Token refresh failed for {}: {}", + email, e + )), + } + } + + /// Add a new account + pub async fn add_account(&self, email: &str, refresh_token: &str) -> Result<(), String> { + // [FIX] Check if account exists in the index before adding + // This prevents resurrection of deleted accounts via persist_token race condition + let index = crate::modules::account::storage::load_account_index() + .map_err(|e| format!("Failed to load account index: {}", e))?; + + // If account with this email was recently deleted (not in index), skip + let email_exists = index.accounts.iter().any(|s| s.email == email); + if !email_exists { + // Check if we're trying to add a completely new account or resurrect a deleted one + // If email is not in index and we have no token for it, this is a new account - allow + // If email is not in index but we had a token, this might be resurrection - check carefully + let had_token = self.tokens.iter().any(|entry| entry.value().email == email); + if had_token { + tracing::warn!( + "[FIX] Skipping add_account for deleted email: {} (token still in memory)", + email + ); + return Ok(()); + } + } + + let token_info = crate::modules::oauth::refresh_access_token(refresh_token, None) + .await + .map_err(|e| format!("Invalid refresh token: {}", e))?; + + let project_id = + crate::proxy::project_resolver::fetch_project_id(&token_info.access_token) + .await + .unwrap_or_else(|_| crate::proxy::project_resolver::DEFAULT_PROJECT_ID.to_string()); + + let email_clone = email.to_string(); + let refresh_token_clone = refresh_token.to_string(); + + tokio::task::spawn_blocking(move || { + let token_data = crate::models::TokenData::new( + token_info.access_token, + refresh_token_clone, + token_info.expires_in, + Some(email_clone.clone()), + Some(project_id), + None, + ); + + crate::modules::account::upsert_account(email_clone, None, token_data) + }) + .await + .map_err(|e| format!("Task join error: {}", e))? + .map_err(|e| format!("Failed to save account: {}", e))?; + + self.reload_all_accounts().await.map(|_| ()) + } + + /// Exchange OAuth code for refresh token + pub async fn exchange_code(&self, code: &str, redirect_uri: &str) -> Result { + crate::modules::oauth::exchange_code(code, redirect_uri) + .await + .and_then(|t| { + t.refresh_token + .ok_or_else(|| "No refresh token returned by Google".to_string()) + }) + } + + /// Get OAuth URL with custom redirect + pub fn get_oauth_url_with_redirect(&self, redirect_uri: &str, state: &str) -> String { + crate::modules::oauth::get_auth_url(redirect_uri, state) + } + + /// Get user info from refresh token + pub async fn get_user_info( + &self, + refresh_token: &str, + ) -> Result { + let token = crate::modules::oauth::refresh_access_token(refresh_token, None) + .await + .map_err(|e| format!("刷新 Access Token 失败: {}", e))?; + + crate::modules::oauth::get_user_info(&token.access_token, None).await + } +} diff --git a/src-tauri/src/proxy/token_manager/quota.rs b/src-tauri/src/proxy/token_manager/quota.rs new file mode 100644 index 000000000..79cfe9dd4 --- /dev/null +++ b/src-tauri/src/proxy/token_manager/quota.rs @@ -0,0 +1,353 @@ +// Quota Protection Logic + +use super::manager::TokenManager; +use std::path::PathBuf; + +impl TokenManager { + /// [FIX] Check if account exists in index by extracting ID from path + fn account_exists_by_path(account_path: &PathBuf) -> bool { + let account_id = account_path + .file_stem() + .and_then(|s| s.to_str()) + .unwrap_or(""); + + if account_id.is_empty() { + return false; + } + + match crate::modules::account::storage::load_account_index() { + Ok(index) => index.accounts.iter().any(|s| s.id == account_id), + Err(_) => false, + } + } + + /// Check if account should be quota protected + pub(crate) async fn check_and_protect_quota( + &self, + account_json: &mut serde_json::Value, + account_path: &PathBuf, + ) -> bool { + // [FIX] Check if account exists in index before any operations + if !Self::account_exists_by_path(account_path) { + tracing::warn!("check_and_protect_quota: Account {:?} not in index, skipping", account_path); + return false; + } + + let config = match crate::modules::config::load_app_config() { + Ok(cfg) => cfg.quota_protection, + Err(_) => return false, + }; + + if !config.enabled { + return false; + } + + let quota = match account_json.get("quota") { + Some(q) => q.clone(), + None => return false, + }; + + let is_proxy_disabled = account_json + .get("proxy_disabled") + .and_then(|v| v.as_bool()) + .unwrap_or(false); + + let reason = account_json + .get("proxy_disabled_reason") + .and_then(|v| v.as_str()) + .unwrap_or(""); + + if is_proxy_disabled { + if reason == "quota_protection" { + return self + .check_and_restore_quota(account_json, account_path, "a, &config) + .await; + } + return true; + } + + let models = match quota.get("models").and_then(|m| m.as_array()) { + Some(m) => m, + None => return false, + }; + + let threshold = config.threshold_percentage as i32; + let mut changed = false; + + for model in models { + let name = model.get("name").and_then(|v| v.as_str()).unwrap_or(""); + // [FIX] Normalize model name to standard ID for proper matching + let standard_id = crate::proxy::common::model_mapping::normalize_to_standard_id(name) + .unwrap_or_else(|| name.to_string()); + if !config.monitored_models.iter().any(|m| m == &standard_id) { + continue; + } + + let percentage = model + .get("percentage") + .and_then(|v| v.as_i64()) + .unwrap_or(0) as i32; + let account_id = account_json + .get("id") + .and_then(|v| v.as_str()) + .unwrap_or("unknown") + .to_string(); + + if percentage <= threshold { + // [FIX] Pass normalized standard_id instead of raw name for consistency + if self + .trigger_quota_protection( + account_json, + &account_id, + account_path, + percentage, + threshold, + &standard_id, + ) + .await + .unwrap_or(false) + { + changed = true; + } + } else { + // [FIX] Use normalized standard_id for consistency with trigger + let protected_models = account_json + .get("protected_models") + .and_then(|v| v.as_array()); + let is_protected = protected_models + .map_or(false, |arr| arr.iter().any(|m| m.as_str() == Some(&standard_id))); + + if is_protected { + if self + .restore_quota_protection(account_json, &account_id, account_path, &standard_id) + .await + .unwrap_or(false) + { + changed = true; + } + } + } + } + + let _ = changed; + false + } + + /// Calculate max remaining quota percentage + pub(crate) fn calculate_quota_stats(&self, quota: &serde_json::Value) -> Option { + let models = match quota.get("models").and_then(|m| m.as_array()) { + Some(m) => m, + None => return None, + }; + + let mut max_percentage = 0; + let mut has_data = false; + + for model in models { + if let Some(pct) = model.get("percentage").and_then(|v| v.as_i64()) { + let pct_i32 = pct as i32; + if pct_i32 > max_percentage { + max_percentage = pct_i32; + } + has_data = true; + } + } + + if has_data { + Some(max_percentage) + } else { + None + } + } + + /// Trigger quota protection for a specific model + async fn trigger_quota_protection( + &self, + account_json: &mut serde_json::Value, + account_id: &str, + account_path: &PathBuf, + current_val: i32, + threshold: i32, + model_name: &str, + ) -> Result { + // [FIX] Check if account exists in index before writing + if !Self::account_exists_by_path(account_path) { + tracing::warn!("trigger_quota_protection: Account {} not in index, skipping", account_id); + return Ok(false); + } + + if account_json.get("protected_models").is_none() { + account_json["protected_models"] = serde_json::Value::Array(Vec::new()); + } + + let protected_models = account_json["protected_models"].as_array_mut().unwrap(); + + if !protected_models + .iter() + .any(|m| m.as_str() == Some(model_name)) + { + protected_models.push(serde_json::Value::String(model_name.to_string())); + + tracing::info!( + "账号 {} 的模型 {} 因配额受限({}% <= {}%)已被加入保护列表", + account_id, + model_name, + current_val, + threshold + ); + + let json_str = serde_json::to_string_pretty(account_json) + .map_err(|e| format!("序列化 JSON 失败: {}", e))?; + + // [FIX] Use tokio::fs::write instead of blocking std::fs::write + tokio::fs::write(account_path, json_str) + .await + .map_err(|e| format!("写入文件失败: {}", e))?; + + return Ok(true); + } + + Ok(false) + } + + /// Check and restore quota from account-level protection + async fn check_and_restore_quota( + &self, + account_json: &mut serde_json::Value, + account_path: &PathBuf, + quota: &serde_json::Value, + config: &crate::models::QuotaProtectionConfig, + ) -> bool { + // [FIX] Check if account exists in index before writing + if !Self::account_exists_by_path(account_path) { + tracing::warn!("check_and_restore_quota: Account {:?} not in index, skipping", account_path); + return false; + } + + tracing::info!( + "正在迁移账号 {} 从全局配额保护模式至模型级保护模式", + account_json + .get("email") + .and_then(|v| v.as_str()) + .unwrap_or("unknown") + ); + + account_json["proxy_disabled"] = serde_json::Value::Bool(false); + account_json["proxy_disabled_reason"] = serde_json::Value::Null; + account_json["proxy_disabled_at"] = serde_json::Value::Null; + + let threshold = config.threshold_percentage as i32; + let mut protected_list = Vec::new(); + + if let Some(models) = quota.get("models").and_then(|m| m.as_array()) { + for model in models { + let name = model.get("name").and_then(|v| v.as_str()).unwrap_or(""); + // [FIX] Normalize model name before comparing with monitored_models + let standard_id = crate::proxy::common::model_mapping::normalize_to_standard_id(name) + .unwrap_or_else(|| name.to_string()); + if !config.monitored_models.iter().any(|m| m == &standard_id) { + continue; + } + + let percentage = model + .get("percentage") + .and_then(|v| v.as_i64()) + .unwrap_or(0) as i32; + if percentage <= threshold { + protected_list.push(serde_json::Value::String(standard_id)); + } + } + } + + account_json["protected_models"] = serde_json::Value::Array(protected_list); + + if let Ok(json_str) = serde_json::to_string_pretty(account_json) { + // [FIX] Use tokio::fs::write instead of blocking std::fs::write + if let Err(e) = tokio::fs::write(account_path, json_str).await { + tracing::error!( + "[check_and_restore_quota] Failed to write account file: {}", + e + ); + } + } else { + tracing::error!("[check_and_restore_quota] Failed to serialize account json"); + } + + false + } + + /// Restore quota protection for a specific model + async fn restore_quota_protection( + &self, + account_json: &mut serde_json::Value, + account_id: &str, + account_path: &PathBuf, + model_name: &str, + ) -> Result { + // [FIX] Check if account exists in index before writing + if !Self::account_exists_by_path(account_path) { + tracing::warn!("restore_quota_protection: Account {} not in index, skipping", account_id); + return Ok(false); + } + + if let Some(arr) = account_json + .get_mut("protected_models") + .and_then(|v| v.as_array_mut()) + { + let original_len = arr.len(); + arr.retain(|m| m.as_str() != Some(model_name)); + + if arr.len() < original_len { + tracing::info!( + "账号 {} 的模型 {} 配额已恢复,移出保护列表", + account_id, + model_name + ); + let json_str = serde_json::to_string_pretty(account_json) + .map_err(|e| format!("序列化 JSON 失败: {}", e))?; + + // [FIX] Use tokio::fs::write instead of blocking std::fs::write + tokio::fs::write(account_path, json_str) + .await + .map_err(|e| format!("写入文件失败: {}", e))?; + return Ok(true); + } + } + + Ok(false) + } + + /// Read quota percentage for a specific model from JSON file + /// Used for precise sorting by target model's quota instead of max + /// + /// # Arguments + /// * `account_path` - Path to account JSON file + /// * `model_name` - Target model name (already normalized) + #[allow(dead_code)] + pub fn get_model_quota_from_json(account_path: &PathBuf, model_name: &str) -> Option { + let content = std::fs::read_to_string(account_path).ok()?; + let account: serde_json::Value = serde_json::from_str(&content).ok()?; + let models = account.get("quota")?.get("models")?.as_array()?; + + for model in models { + if let Some(name) = model.get("name").and_then(|v| v.as_str()) { + if crate::proxy::common::model_mapping::normalize_to_standard_id(name) + .unwrap_or_else(|| name.to_string()) + == model_name + { + return model + .get("percentage") + .and_then(|v| v.as_i64()) + .map(|p| p as i32); + } + } + } + None + } + + /// Test helper: public access to get_model_quota_from_json + #[cfg(test)] + pub fn get_model_quota_from_json_for_test(account_path: &PathBuf, model_name: &str) -> Option { + Self::get_model_quota_from_json(account_path, model_name) + } +} diff --git a/src-tauri/src/proxy/token_manager/rate_limiting.rs b/src-tauri/src/proxy/token_manager/rate_limiting.rs new file mode 100644 index 000000000..1ad354efc --- /dev/null +++ b/src-tauri/src/proxy/token_manager/rate_limiting.rs @@ -0,0 +1,286 @@ +// Rate Limiting Logic + +use super::manager::TokenManager; + +impl TokenManager { + /// Mark account as rate limited (sync version) + pub async fn mark_rate_limited( + &self, + email: &str, + status: u16, + retry_after_header: Option<&str>, + error_body: &str, + ) { + let config = self.circuit_breaker_config.read().await.clone(); + if !config.enabled { + return; + } + + let key = self + .email_to_account_id(email) + .unwrap_or_else(|| email.to_string()); + + self.rate_limit_tracker.parse_from_error( + &key, + status, + retry_after_header, + error_body, + None, + &config.backoff_steps, + ); + } + + /// Mark account as rate limited (async version with real-time quota refresh) + pub async fn mark_rate_limited_async( + &self, + email: &str, + status: u16, + retry_after_header: Option<&str>, + error_body: &str, + model: Option<&str>, + ) { + let config = self.circuit_breaker_config.read().await.clone(); + if !config.enabled { + return; + } + + let account_id = self + .email_to_account_id(email) + .unwrap_or_else(|| email.to_string()); + + let has_explicit_retry_time = + retry_after_header.is_some() || error_body.contains("quotaResetDelay"); + + if has_explicit_retry_time { + if let Some(m) = model { + tracing::debug!( + "账号 {} 的模型 {} 的 429 响应包含 quotaResetDelay", + account_id, + m + ); + } + self.rate_limit_tracker.parse_from_error( + &account_id, + status, + retry_after_header, + error_body, + model.map(|s| s.to_string()), + &config.backoff_steps, + ); + return; + } + + let reason = if error_body.to_lowercase().contains("model_capacity") { + crate::proxy::rate_limit::RateLimitReason::ModelCapacityExhausted + } else if error_body.to_lowercase().contains("exhausted") + || error_body.to_lowercase().contains("quota") + { + crate::proxy::rate_limit::RateLimitReason::QuotaExhausted + } else { + crate::proxy::rate_limit::RateLimitReason::Unknown + }; + + if let Some(m) = model { + tracing::info!( + "账号 {} 的模型 {} 的 429 响应未包含 quotaResetDelay,尝试实时刷新配额...", + account_id, + m + ); + } + + if self + .fetch_and_lock_with_realtime_quota(&account_id, reason, model.map(|s| s.to_string())) + .await + { + tracing::info!("账号 {} 已使用实时配额精确锁定", account_id); + return; + } + + if self.set_precise_lockout(&account_id, reason, model.map(|s| s.to_string())) { + tracing::info!("账号 {} 已使用本地缓存配额锁定", account_id); + return; + } + + tracing::warn!("账号 {} 无法获取配额刷新时间,使用指数退避策略", account_id); + self.rate_limit_tracker.parse_from_error( + &account_id, + status, + retry_after_header, + error_body, + model.map(|s| s.to_string()), + &config.backoff_steps, + ); + } + + /// Check if account is rate limited (async) + pub async fn is_rate_limited(&self, account_id: &str, model: Option<&str>) -> bool { + let config = self.circuit_breaker_config.read().await; + if !config.enabled { + return false; + } + self.rate_limit_tracker.is_rate_limited(account_id, model) + } + + /// Check if account is rate limited (sync version for iterators) + pub fn is_rate_limited_sync(&self, account_id: &str, model: Option<&str>) -> bool { + let config = self.circuit_breaker_config.blocking_read(); + if !config.enabled { + return false; + } + self.rate_limit_tracker.is_rate_limited(account_id, model) + } + + /// Get remaining wait time for rate limit reset + #[allow(dead_code)] + pub fn get_rate_limit_reset_seconds(&self, account_id: &str) -> Option { + self.rate_limit_tracker.get_reset_seconds(account_id) + } + + /// Clean expired rate limit records + #[allow(dead_code)] + pub fn clean_expired_rate_limits(&self) { + self.rate_limit_tracker.cleanup_expired(); + } + + /// Clear rate limit for specific account + pub fn clear_rate_limit(&self, account_id: &str) -> bool { + self.rate_limit_tracker.clear(account_id) + } + + /// Clear all rate limits + pub fn clear_all_rate_limits(&self) { + self.rate_limit_tracker.clear_all(); + } + + /// Mark account request as successful + pub fn mark_account_success(&self, email: &str, model: Option<&str>) { + if let Some(account_id) = self.email_to_account_id(email) { + self.rate_limit_tracker.mark_success(&account_id, model); + } else { + self.rate_limit_tracker.mark_success(email, model); + } + } + + /// Get quota reset time from account file + pub fn get_quota_reset_time(&self, email: &str) -> Option { + let accounts_dir = self.data_dir.join("accounts"); + + if let Ok(entries) = std::fs::read_dir(&accounts_dir) { + for entry in entries.flatten() { + if let Ok(content) = std::fs::read_to_string(entry.path()) { + if let Ok(account) = serde_json::from_str::(&content) { + if account.get("email").and_then(|e| e.as_str()) == Some(email) { + if let Some(models) = account + .get("quota") + .and_then(|q| q.get("models")) + .and_then(|m| m.as_array()) + { + let mut earliest_reset: Option<&str> = None; + for model in models { + if let Some(reset_time) = + model.get("reset_time").and_then(|r| r.as_str()) + { + if !reset_time.is_empty() { + match earliest_reset { + Some(current_min) => { + if reset_time < current_min { + earliest_reset = Some(reset_time); + } + } + None => { + earliest_reset = Some(reset_time); + } + } + } + } + } + if let Some(reset) = earliest_reset { + return Some(reset.to_string()); + } + } + } + } + } + } + } + None + } + + /// Set precise lockout using quota reset time + pub fn set_precise_lockout( + &self, + email: &str, + reason: crate::proxy::rate_limit::RateLimitReason, + model: Option, + ) -> bool { + if let Some(reset_time_str) = self.get_quota_reset_time(email) { + tracing::info!("找到账号 {} 的配额刷新时间: {}", email, reset_time_str); + self.rate_limit_tracker + .set_lockout_until_iso(email, &reset_time_str, reason, model) + } else { + tracing::debug!("未找到账号 {} 的配额刷新时间", email); + false + } + } + + /// Fetch and lock with real-time quota refresh + pub async fn fetch_and_lock_with_realtime_quota( + &self, + email: &str, + reason: crate::proxy::rate_limit::RateLimitReason, + model: Option, + ) -> bool { + let access_token = { + let mut found_token: Option = None; + for entry in self.tokens.iter() { + if entry.value().email == email { + found_token = Some(entry.value().access_token.clone()); + break; + } + } + found_token + }; + + let access_token = match access_token { + Some(t) => t, + None => { + tracing::warn!("无法找到账号 {} 的 access_token", email); + return false; + } + }; + + tracing::info!("账号 {} 正在实时刷新配额...", email); + match crate::modules::quota::fetch_quota(&access_token, email).await { + Ok((quota_data, _project_id)) => { + let earliest_reset = quota_data + .models + .iter() + .filter_map(|m| { + if !m.reset_time.is_empty() { + Some(m.reset_time.as_str()) + } else { + None + } + }) + .min(); + + if let Some(reset_time_str) = earliest_reset { + tracing::info!( + "账号 {} 实时配额刷新成功,reset_time: {}", + email, + reset_time_str + ); + self.rate_limit_tracker + .set_lockout_until_iso(email, reset_time_str, reason, model) + } else { + tracing::warn!("账号 {} 配额刷新成功但未找到 reset_time", email); + false + } + } + Err(e) => { + tracing::warn!("账号 {} 实时配额刷新失败: {:?}", email, e); + false + } + } + } +} diff --git a/src-tauri/src/proxy/token_manager/scheduling.rs b/src-tauri/src/proxy/token_manager/scheduling.rs new file mode 100644 index 000000000..410785f91 --- /dev/null +++ b/src-tauri/src/proxy/token_manager/scheduling.rs @@ -0,0 +1,29 @@ +// Scheduling and Session Management + +use super::manager::TokenManager; +use crate::proxy::sticky_config::StickySessionConfig; + +impl TokenManager { + /// Get current scheduling configuration + pub async fn get_sticky_config(&self) -> StickySessionConfig { + self.sticky_config.read().await.clone() + } + + /// Update scheduling configuration + pub async fn update_sticky_config(&self, new_config: StickySessionConfig) { + let mut config = self.sticky_config.write().await; + *config = new_config; + tracing::debug!("Scheduling configuration updated: {:?}", *config); + } + + /// Clear session binding for a specific session + #[allow(dead_code)] + pub fn clear_session_binding(&self, session_id: &str) { + self.session_accounts.remove(session_id); + } + + /// Clear all session bindings + pub fn clear_all_sessions(&self) { + self.session_accounts.clear(); + } +} diff --git a/src-tauri/src/proxy/token_manager/selection/mod.rs b/src-tauri/src/proxy/token_manager/selection/mod.rs new file mode 100644 index 000000000..f26dab6c0 --- /dev/null +++ b/src-tauri/src/proxy/token_manager/selection/mod.rs @@ -0,0 +1,792 @@ +// Token Selection Module +// Handles token acquisition, scheduling, and rotation logic + +mod scoring; +mod sticky; +mod round_robin; +mod token_ops; +mod p2c; + +use super::manager::TokenManager; +use super::models::{ProxyToken, TokenLease}; +use std::collections::HashSet; +use std::sync::atomic::{AtomicUsize, Ordering}; + +impl TokenManager { + /// Get a token with timeout protection + pub async fn get_token( + &self, + quota_group: &str, + force_rotate: bool, + session_id: Option<&str>, + target_model: &str, + ) -> Result { + // [FIX] Timeout for deadlock detection - reduced from 120s to 5s + const TOKEN_ACQUISITION_TIMEOUT_SECS: u64 = 5; + let timeout_duration = std::time::Duration::from_secs(TOKEN_ACQUISITION_TIMEOUT_SECS); + match tokio::time::timeout( + timeout_duration, + self.get_token_internal(quota_group, force_rotate, session_id, target_model), + ) + .await + { + Ok(result) => result, + Err(_) => Err(format!( + "Token acquisition timeout ({}s) - system too busy or deadlock detected", + TOKEN_ACQUISITION_TIMEOUT_SECS + )), + } + } + + /// Internal token selection logic + async fn get_token_internal( + &self, + quota_group: &str, + force_rotate: bool, + session_id: Option<&str>, + target_model: &str, + ) -> Result { + // [FIX] Process pending reload accounts from quota protection + let pending_accounts = crate::proxy::server::take_pending_reload_accounts(); + for account_id in pending_accounts { + if let Err(e) = self.reload_account(&account_id).await { + tracing::warn!("[Quota] Failed to reload account {}: {}", account_id, e); + } + } + + let mut tokens_snapshot: Vec = + self.tokens.iter().map(|e| e.value().clone()).collect(); + let total = tokens_snapshot.len(); + if total == 0 { + return Err("Token pool is empty".to_string()); + } + + // Normalize target model + let normalized_target = + crate::proxy::common::model_mapping::normalize_to_standard_id(target_model) + .unwrap_or_else(|| target_model.to_string()); + + // Check quota protection config + let quota_protection_enabled = crate::modules::config::load_app_config() + .map(|cfg| cfg.quota_protection.enabled) + .unwrap_or(false); + + // ===== [FIX #820] Fixed Account Mode: Prioritize preferred account ===== + if let Some(token_lease) = self.try_preferred_account(&tokens_snapshot, &normalized_target, quota_protection_enabled).await { + return Ok(token_lease); + } + // ===== [END FIX #820] ===== + + // Check circuit breaker config + let cb_enabled = self.circuit_breaker_config.read().await.enabled; + + // Filter tokens based on quota and circuit breaker + self.filter_tokens(&mut tokens_snapshot, target_model, &normalized_target, cb_enabled); + + if tokens_snapshot.is_empty() { + return Err(format!( + "No accounts available with remaining quota > 0 for model '{}'", + target_model + )); + } + + // Sort tokens by priority + self.sort_tokens(&mut tokens_snapshot); + + // Log top candidates + tracing::debug!( + "🔄 [Token Rotation] Candidates (Top 5): {:?}", + tokens_snapshot + .iter() + .take(5) + .map(|t| { + let active = self + .active_requests + .get(&t.account_id) + .map(|c| c.load(Ordering::SeqCst)) + .unwrap_or(0); + format!( + "{} [Active:{}, T:{:?}, Q:{:?}]", + t.email, active, t.subscription_tier, t.remaining_quota + ) + }) + .collect::>() + ); + + // Apply scheduling mode filters + let scheduling = self.sticky_config.read().await.clone(); + tracing::info!( + "🔍 [Debug] get_token_internal | Mode: {:?} | Selected Accs: {} | Target: {}", + scheduling.mode, + scheduling.selected_accounts.len(), + target_model + ); + + // Apply selected mode filtering + self.apply_selected_mode_filter(&mut tokens_snapshot, target_model, &normalized_target, &scheduling)?; + + let total = tokens_snapshot.len(); + let last_used_account_id = if quota_group != "image_gen" { + let last_used = self.last_used_account.lock().await; + last_used.clone() + } else { + None + }; + + let mut attempted: HashSet = HashSet::new(); + let mut last_error: Option = None; + let mut need_update_last_used: Option<(String, std::time::Instant)> = None; + + for attempt in 0..total { + let rotate = force_rotate || attempt > 0; + let mut target_token: Option = None; + + // Sticky session handling + if !rotate + && session_id.is_some() + && scheduling.mode != crate::proxy::sticky_config::SchedulingMode::PerformanceFirst + { + target_token = self + .try_sticky_session( + session_id.unwrap(), + &tokens_snapshot, + &attempted, + &normalized_target, + quota_protection_enabled, + &scheduling, + ) + .await; + } + + // 60s lock handling + if target_token.is_none() + && !rotate + && quota_group != "image_gen" + && scheduling.mode != crate::proxy::sticky_config::SchedulingMode::PerformanceFirst + { + target_token = self.try_60s_lock( + &tokens_snapshot, + &attempted, + &normalized_target, + quota_protection_enabled, + &last_used_account_id, + ).await; + + // Round-robin or P2C selection based on scheduling mode + if target_token.is_none() { + target_token = self.select_by_mode( + &tokens_snapshot, + &mut attempted, + &normalized_target, + quota_protection_enabled, + session_id, + &scheduling, + &mut need_update_last_used, + ).await; + } + } else if target_token.is_none() { + // Pure round-robin or P2C + target_token = self.select_by_mode( + &tokens_snapshot, + &mut attempted, + &normalized_target, + quota_protection_enabled, + session_id, + &scheduling, + &mut need_update_last_used, + ).await; + } + + let mut token = match target_token { + Some(t) => t, + None => { + match self.try_optimistic_reset(&tokens_snapshot, &attempted).await { + Ok(t) => t, + Err(e) => return Err(e), + } + } + }; + + // Refresh token if needed + if let Err(e) = self.try_refresh_token(&mut token, &mut attempted, &mut last_error, quota_group, &last_used_account_id, &mut need_update_last_used).await { + if e == "continue" { + continue; + } + return Err(e); + } + + // Ensure project ID + let project_id = match self.ensure_project_id(&mut token, &mut attempted, &mut last_error, quota_group, &last_used_account_id, &mut need_update_last_used).await { + Ok(pid) => pid, + Err(e) => { + if e == "continue" { + continue; + } + return Err(e); + } + }; + + // Update last used if needed + if let Some((new_account_id, new_time)) = need_update_last_used { + if quota_group != "image_gen" { + let mut last_used = self.last_used_account.lock().await; + if new_account_id.is_empty() { + *last_used = None; + } else { + *last_used = Some((new_account_id, new_time)); + } + } + } + + // Increment active requests + self.active_requests + .entry(token.account_id.clone()) + .or_insert(AtomicUsize::new(0)) + .fetch_add(1, Ordering::SeqCst); + + let active_count = self + .active_requests + .get(&token.account_id) + .unwrap() + .load(Ordering::SeqCst); + tracing::debug!( + "⬆️ Connection acquired: {} (active: {})", + token.email, + active_count + ); + + return Ok(TokenLease { + access_token: token.access_token, + project_id, + email: token.email, + account_id: token.account_id.clone(), + active_requests: self.active_requests.clone(), + }); + } + + Err(last_error.unwrap_or_else(|| "All accounts failed".to_string())) + } + + /// Try to use preferred account (Fixed Account Mode) + async fn try_preferred_account( + &self, + tokens_snapshot: &[ProxyToken], + normalized_target: &str, + quota_protection_enabled: bool, + ) -> Option { + let preferred_id = self.preferred_account_id.read().await.clone(); + if let Some(ref pref_id) = preferred_id { + if let Some(preferred_token) = tokens_snapshot.iter().find(|t| &t.account_id == pref_id) + { + let is_rate_limited = self + .is_rate_limited(&preferred_token.account_id, Some(normalized_target)) + .await; + let is_quota_protected = quota_protection_enabled + && preferred_token + .protected_models + .contains(normalized_target); + + if !is_rate_limited && !is_quota_protected { + tracing::info!( + "🔒 [FIX #820] Using preferred account: {} (fixed mode)", + preferred_token.email + ); + + let mut token = preferred_token.clone(); + + // Refresh token if needed (5 min before expiry) + let now = chrono::Utc::now().timestamp(); + if now >= token.timestamp - 300 { + tracing::debug!("Preferred account {} token expiring, refreshing...", token.email); + // [FIX #1583] Pass account_id for proper context + match crate::modules::oauth::refresh_access_token(&token.refresh_token, Some(&token.account_id)).await { + Ok(token_response) => { + token.access_token = token_response.access_token.clone(); + token.expires_in = token_response.expires_in; + token.timestamp = now + token_response.expires_in; + + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.access_token = token.access_token.clone(); + entry.expires_in = token.expires_in; + entry.timestamp = token.timestamp; + } + let _ = self.save_refreshed_token(&token.account_id, &token_response).await; + } + Err(e) => { + tracing::warn!("Preferred account token refresh failed: {}", e); + } + } + } + + // Ensure project_id exists (and purge legacy random mock IDs) + let project_id = if let Some(pid) = &token.project_id { + if crate::proxy::project_resolver::is_legacy_mock_project_id(pid) { + tracing::warn!( + "Preferred account {} has legacy mock project_id, resetting cache", + token.email + ); + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.project_id = None; + } + let _ = self.clear_project_id_cache(&token.account_id).await; + + match crate::proxy::project_resolver::fetch_project_id(&token.access_token).await { + Ok(new_pid) => { + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.project_id = Some(new_pid.clone()); + } + let _ = self.save_project_id(&token.account_id, &new_pid).await; + new_pid + } + Err(_) => { + let fallback = crate::proxy::project_resolver::DEFAULT_PROJECT_ID.to_string(); + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.project_id = Some(fallback.clone()); + } + let _ = self.save_project_id(&token.account_id, &fallback).await; + fallback + } + } + } else { + pid.clone() + } + } else { + match crate::proxy::project_resolver::fetch_project_id(&token.access_token).await { + Ok(pid) => { + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.project_id = Some(pid.clone()); + } + let _ = self.save_project_id(&token.account_id, &pid).await; + pid + } + Err(_) => { + let fallback = crate::proxy::project_resolver::DEFAULT_PROJECT_ID.to_string(); + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.project_id = Some(fallback.clone()); + } + let _ = self.save_project_id(&token.account_id, &fallback).await; + fallback + } + } + }; + + // Increment active requests + self.active_requests + .entry(token.account_id.clone()) + .or_insert(AtomicUsize::new(0)) + .fetch_add(1, Ordering::SeqCst); + + return Some(TokenLease { + access_token: token.access_token, + project_id, + email: token.email, + account_id: token.account_id.clone(), + active_requests: self.active_requests.clone(), + }); + } else { + if is_rate_limited { + tracing::warn!("🔒 [FIX #820] Preferred account {} is rate-limited, falling back to round-robin", preferred_token.email); + } else { + tracing::warn!("🔒 [FIX #820] Preferred account {} is quota-protected for {}, falling back to round-robin", preferred_token.email, normalized_target); + } + } + } else { + tracing::warn!("🔒 [FIX #820] Preferred account {} not found in pool, falling back to round-robin", pref_id); + } + } + None + } + + fn filter_tokens( + &self, + tokens_snapshot: &mut Vec, + target_model: &str, + normalized_target: &str, + cb_enabled: bool, + ) { + let initial_count = tokens_snapshot.len(); + + // [DIAG] Log initial state before filtering + tracing::info!( + "🔍 [Filter START] Model '{}' (normalized: '{}') | {} accounts in pool | CB enabled: {}", + target_model, + normalized_target, + initial_count, + cb_enabled + ); + + // [DIAG] Log details of each account (CHANGED TO INFO FOR DEBUGGING) + for t in tokens_snapshot.iter() { + tracing::info!( + " 📋 {} | forbidden:{} | v_needed:{} | v_blocked:{} | quotas: {:?} | CB: {}", + t.email, + t.is_forbidden, + t.verification_needed, + t.validation_blocked, + t.model_quotas, + self.circuit_breaker.contains_key(&t.account_id) + ); + } + + tokens_snapshot.retain(|t| { + if t.is_forbidden { + tracing::info!( + " ⛔ {} - SKIP: Forbidden account", + t.email + ); + return false; + } + + // [NEW] Verification required check (permanent block until manual verification) + if t.verification_needed { + tracing::info!( + " ⛔ {} - SKIP: Verification required (permanent)", + t.email + ); + return false; + } + + // [FIX] Validation blocked check (VALIDATION_REQUIRED temporary block) + if t.validation_blocked { + let now = chrono::Utc::now().timestamp(); + if now < t.validation_blocked_until { + tracing::info!( + " ⛔ {} - SKIP: Validation blocked until {}", + t.email, + t.validation_blocked_until + ); + return false; + } + } + + // NOTE: remaining_quota check removed - it was too aggressive + // The model_quotas check below handles per-model quota correctly + // remaining_quota is max percentage across ALL models, which can be 0 + // even if the target model has available quota + + // NOTE: Rate limit check is done later in select_round_robin() and try_60s_lock() + // Cannot use is_rate_limited_sync() here as it causes blocking_read() deadlock in async context + + // Circuit breaker check + if cb_enabled { + if let Some(fail_entry) = self.circuit_breaker.get(&t.account_id) { + let (fail_time, reason) = fail_entry.value(); + if fail_time.elapsed().as_secs() < 600 { + tracing::info!( + " ⛔ {} - SKIP: Circuit Breaker blocked ({}) [{}s remaining]", + t.email, + reason, + 600 - fail_time.elapsed().as_secs() + ); + return false; + } else { + drop(fail_entry); + self.circuit_breaker.remove(&t.account_id); + } + } + } + + // Model quota check + if let Some(&pct) = t.model_quotas.get(target_model) { + if pct <= 0 { + tracing::info!( + " ⛔ {} - SKIP: Zero quota for target model '{}' (pct={})", + t.email, target_model, pct + ); + return false; + } + } + + if normalized_target != target_model { + if let Some(&pct) = t.model_quotas.get(normalized_target) { + if pct <= 0 { + tracing::info!( + " ⛔ {} - SKIP: Zero quota for normalized model '{}' (pct={})", + t.email, normalized_target, pct + ); + return false; + } + } + } + + // Fuzzy match for related models + // [FIX] Only block if quota_model is MORE SPECIFIC (longer) than target + // This prevents "claude-opus-4: 0%" from blocking "claude-opus-4-5-thinking" + // But allows "claude-opus-4-5-thinking: 0%" to block "claude-opus-4-5-thinking" + if !t.model_quotas.is_empty() { + // Check if target is a prefix of quota_model (quota_model is more specific) + let is_more_specific_variant = |quota_model: &str, target: &str| -> bool { + quota_model.len() > target.len() + && quota_model.starts_with(target) + && quota_model.chars() + .nth(target.len()) + .map_or(false, |c| c == '-' || c == '.' || c == ':') + }; + + for (quota_model, &pct) in &t.model_quotas { + if pct <= 0 { + // Direct match always blocks + if quota_model == target_model || quota_model == normalized_target { + tracing::info!( + " ⛔ {} - SKIP: Zero quota for exact model '{}' (pct={})", + t.email, quota_model, pct + ); + return false; + } + + // Only block if quota_model is MORE specific variant of target + // e.g., "gemini-2.5-pro-preview: 0%" blocks "gemini-2.5-pro" + // But "gemini-2.5: 0%" does NOT block "gemini-2.5-pro" + if is_more_specific_variant(quota_model, target_model) + || is_more_specific_variant(quota_model, normalized_target) + { + tracing::info!( + " ⛔ {} - SKIP: Zero quota for more-specific variant '{}' (target: '{}')", + t.email, quota_model, target_model + ); + return false; + } + } + } + } + + true + }); + + // [FIX] Log filtering results for diagnostics + let filtered_count = initial_count - tokens_snapshot.len(); + if filtered_count > 0 { + tracing::info!( + "🔍 [Filter] Model '{}': {} of {} accounts filtered out, {} remaining", + target_model, + filtered_count, + initial_count, + tokens_snapshot.len() + ); + } + } + + /// Apply selected mode filtering + fn apply_selected_mode_filter( + &self, + tokens_snapshot: &mut Vec, + target_model: &str, + normalized_target: &str, + scheduling: &crate::proxy::sticky_config::StickySessionConfig, + ) -> Result<(), String> { + use crate::proxy::sticky_config::SchedulingMode; + + // [FIX] Store original tokens for potential fallback when strict_selected=false + let all_tokens_backup = if scheduling.mode == SchedulingMode::Selected && !scheduling.strict_selected { + Some(tokens_snapshot.clone()) + } else { + None + }; + + if scheduling.mode == SchedulingMode::Selected { + let selected_set: HashSet<&String> = scheduling.selected_accounts.iter().collect(); + + tokens_snapshot.retain(|t| { + if !selected_set.contains(&t.account_id) { + return false; + } + + if let Some(allowed_models) = scheduling.selected_models.get(&t.account_id) { + if !allowed_models.is_empty() { + let is_allowed = allowed_models.iter().any(|m| { + m == target_model + || m == normalized_target + || target_model.contains(m) + || m.contains(target_model) + }); + + if !is_allowed { + return false; + } + } + } + + true + }); + + if tokens_snapshot.is_empty() { + // [FIX] Handle strict_selected logic + if scheduling.strict_selected { + // Strict mode: fail immediately, no fallback + return Err(format!( + "Selected mode (strict) is active but no valid accounts match the selection for model '{}'. No fallback allowed.", + target_model + )); + } else if let Some(backup) = all_tokens_backup { + // Non-strict mode: fallback to all available accounts + tracing::warn!( + "🔄 [Selected Mode] No selected accounts available for model '{}', falling back to all {} accounts", + target_model, + backup.len() + ); + *tokens_snapshot = backup; + } else { + return Err(format!( + "Selected mode is active but no valid accounts match the selection for model '{}'.", + target_model + )); + } + } else { + tracing::debug!( + "🎯 [Selected Mode] Using subset of {} accounts for model {}{}", + tokens_snapshot.len(), + target_model, + if scheduling.strict_selected { " (strict)" } else { "" } + ); + } + } + + Ok(()) + } + + /// Try 60-second lock on last used account + async fn try_60s_lock( + &self, + tokens_snapshot: &[ProxyToken], + attempted: &HashSet, + normalized_target: &str, + quota_protection_enabled: bool, + last_used_account_id: &Option<(String, std::time::Instant)>, + ) -> Option { + if let Some((account_id, last_time)) = last_used_account_id { + if last_time.elapsed().as_secs() < 60 && !attempted.contains(account_id) { + if let Some(found) = + tokens_snapshot.iter().find(|t| &t.account_id == account_id) + { + if !self + .is_rate_limited(&found.account_id, Some(normalized_target)) + .await + && !(quota_protection_enabled + && found.protected_models.contains(normalized_target)) + { + tracing::debug!( + "60s Window: Force reusing last account: {}", + found.email + ); + return Some(found.clone()); + } + } + } + } + None + } + + /// Select token based on scheduling mode (P2C or Round-Robin) + async fn select_by_mode( + &self, + tokens_snapshot: &[ProxyToken], + attempted: &mut HashSet, + normalized_target: &str, + quota_protection_enabled: bool, + session_id: Option<&str>, + scheduling: &crate::proxy::sticky_config::StickySessionConfig, + need_update_last_used: &mut Option<(String, std::time::Instant)>, + ) -> Option { + use crate::proxy::sticky_config::SchedulingMode; + + if scheduling.mode == SchedulingMode::P2C { + // Pre-filter rate limited accounts for P2C (async context) + let mut available_for_p2c: Vec = Vec::new(); + for t in tokens_snapshot.iter() { + if !self.is_rate_limited(&t.account_id, Some(normalized_target)).await { + available_for_p2c.push(t.clone()); + } + } + + if let Some(selected) = self.select_with_p2c( + &available_for_p2c, + attempted, + normalized_target, + quota_protection_enabled, + ) { + *need_update_last_used = Some((selected.account_id.clone(), std::time::Instant::now())); + return Some(selected.clone()); + } + } else { + return self + .select_round_robin( + tokens_snapshot, + attempted, + normalized_target, + quota_protection_enabled, + session_id, + scheduling, + need_update_last_used, + ) + .await; + } + None + } + + /// Try optimistic reset when all accounts are rate-limited + async fn try_optimistic_reset( + &self, + tokens_snapshot: &[ProxyToken], + attempted: &HashSet, + ) -> Result { + // Optimistic reset strategy + let min_wait = tokens_snapshot + .iter() + .filter_map(|t| self.rate_limit_tracker.get_reset_seconds(&t.account_id)) + .min(); + + if let Some(wait_sec) = min_wait { + if wait_sec <= 2 { + let wait_ms = (wait_sec as f64 * 1000.0) as u64; + tracing::warn!( + "All accounts rate-limited but shortest wait is {}s. Applying {}ms buffer...", + wait_sec, wait_ms + ); + + tokio::time::sleep(tokio::time::Duration::from_millis(wait_ms)).await; + + let retry_token = tokens_snapshot.iter().find(|t| { + !attempted.contains(&t.account_id) + && !self.is_rate_limited_sync(&t.account_id, None) + }); + + if let Some(t) = retry_token { + tracing::info!( + "✅ Buffer delay successful! Found available account: {}", + t.email + ); + return Ok(t.clone()); + } else { + // [FIX] Use clear_expired_with_buffer instead of clear_all + // This only clears records expiring within 5s, preserving + // long-term QUOTA_EXHAUSTED locks to prevent cascade 429s + tracing::warn!( + "Buffer delay failed. Executing safe optimistic reset (5s buffer)..." + ); + let cleared = self.rate_limit_tracker.clear_expired_with_buffer(5); + + let final_token = tokens_snapshot + .iter() + .find(|t| !attempted.contains(&t.account_id)); + + if let Some(t) = final_token { + tracing::info!( + "✅ Optimistic reset successful! Cleared {} record(s), using account: {}", + cleared, + t.email + ); + return Ok(t.clone()); + } else { + return Err( + "All accounts failed after optimistic reset.".to_string() + ); + } + } + } else { + return Err(format!("All accounts limited. Wait {}s.", wait_sec)); + } + } else { + return Err("All accounts failed or unhealthy.".to_string()); + } + } +} diff --git a/src-tauri/src/proxy/token_manager/selection/p2c.rs b/src-tauri/src/proxy/token_manager/selection/p2c.rs new file mode 100644 index 000000000..c354ff35a --- /dev/null +++ b/src-tauri/src/proxy/token_manager/selection/p2c.rs @@ -0,0 +1,103 @@ +// Power of 2 Choices (P2C) Selection Algorithm + +use super::super::manager::TokenManager; +use super::super::models::ProxyToken; +use std::collections::HashSet; + +impl TokenManager { + /// P2C pool size - select from top N candidates + pub(crate) const P2C_POOL_SIZE: usize = 5; + + /// Check if there are available accounts for a model + pub async fn has_available_account(&self, _quota_group: &str, target_model: &str) -> bool { + let quota_protection_enabled = crate::modules::config::load_app_config() + .map(|cfg| cfg.quota_protection.enabled) + .unwrap_or(false); + + for entry in self.tokens.iter() { + let token = entry.value(); + + if self.is_rate_limited(&token.account_id, None).await { + continue; + } + + if quota_protection_enabled && token.protected_models.contains(target_model) { + continue; + } + + return true; + } + + tracing::info!( + "[Fallback Check] No available Google accounts for model {}", + target_model + ); + false + } + + /// Power of 2 Choices (P2C) selection algorithm + /// Randomly selects 2 from top 5 candidates, returns the one with higher quota + /// This avoids "hot spot" issues where all requests go to the same account + /// + /// # Arguments + /// * `candidates` - Pre-sorted candidate token list + /// * `attempted` - Set of already-attempted account IDs + /// * `normalized_target` - Normalized target model name + /// * `quota_protection_enabled` - Whether quota protection is enabled + pub(crate) fn select_with_p2c<'a>( + &self, + candidates: &'a [ProxyToken], + attempted: &HashSet, + normalized_target: &str, + quota_protection_enabled: bool, + ) -> Option<&'a ProxyToken> { + use rand::Rng; + + // Filter available tokens + let available: Vec<&ProxyToken> = candidates + .iter() + .filter(|t| !attempted.contains(&t.account_id)) + .filter(|t| !quota_protection_enabled || !t.protected_models.contains(normalized_target)) + .collect(); + + if available.is_empty() { + return None; + } + if available.len() == 1 { + return Some(available[0]); + } + + // P2C: randomly select 2 from top min(P2C_POOL_SIZE, len) candidates + let pool_size = available.len().min(Self::P2C_POOL_SIZE); + let mut rng = rand::thread_rng(); + + let pick1 = rng.gen_range(0..pool_size); + let mut pick2 = rng.gen_range(0..pool_size); + // Ensure we pick two different candidates + if pick2 == pick1 { + pick2 = (pick1 + 1) % pool_size; + } + + let c1 = available[pick1]; + let c2 = available[pick2]; + + // Select the one with higher quota + let selected = if c1.remaining_quota.unwrap_or(0) >= c2.remaining_quota.unwrap_or(0) { + c1 + } else { + c2 + }; + + tracing::debug!( + "🎲 [P2C] Selected {} ({}%) from [{}({}%), {}({}%)]", + selected.email, + selected.remaining_quota.unwrap_or(0), + c1.email, + c1.remaining_quota.unwrap_or(0), + c2.email, + c2.remaining_quota.unwrap_or(0) + ); + + Some(selected) + } +} diff --git a/src-tauri/src/proxy/token_manager/selection/round_robin.rs b/src-tauri/src/proxy/token_manager/selection/round_robin.rs new file mode 100644 index 000000000..987250a4a --- /dev/null +++ b/src-tauri/src/proxy/token_manager/selection/round_robin.rs @@ -0,0 +1,72 @@ +// Round-Robin Selection Logic + +use super::super::manager::TokenManager; +use super::super::models::ProxyToken; +use std::collections::HashSet; +use std::sync::atomic::Ordering; + +impl TokenManager { + /// Select token using round-robin + pub(crate) async fn select_round_robin( + &self, + tokens_snapshot: &[ProxyToken], + attempted: &mut HashSet, + normalized_target: &str, + quota_protection_enabled: bool, + session_id: Option<&str>, + scheduling: &crate::proxy::sticky_config::StickySessionConfig, + need_update_last_used: &mut Option<(String, std::time::Instant)>, + ) -> Option { + use crate::proxy::sticky_config::SchedulingMode; + + let total = tokens_snapshot.len(); + if total == 0 { + return None; + } + + // [FIX] Safe modulo operation to prevent race condition when pool size changes + // Use wrapping arithmetic to handle index overflow gracefully + let raw_index = self.current_index.fetch_add(1, Ordering::SeqCst); + let start_idx = raw_index % total; + + for offset in 0..total { + let idx = (start_idx + offset) % total; + let candidate = &tokens_snapshot[idx]; + + if attempted.contains(&candidate.account_id) { + continue; + } + + if quota_protection_enabled && candidate.protected_models.contains(normalized_target) { + continue; + } + + if self + .is_rate_limited(&candidate.account_id, Some(normalized_target)) + .await + { + continue; + } + + *need_update_last_used = Some((candidate.account_id.clone(), std::time::Instant::now())); + + if let Some(sid) = session_id { + if scheduling.mode != SchedulingMode::PerformanceFirst { + self.session_accounts.insert( + sid.to_string(), + (candidate.account_id.clone(), std::time::Instant::now()), + ); + tracing::debug!( + "Sticky Session: Bound new account {} to session {}", + candidate.email, + sid + ); + } + } + + return Some(candidate.clone()); + } + + None + } +} diff --git a/src-tauri/src/proxy/token_manager/selection/scoring.rs b/src-tauri/src/proxy/token_manager/selection/scoring.rs new file mode 100644 index 000000000..9f7db550d --- /dev/null +++ b/src-tauri/src/proxy/token_manager/selection/scoring.rs @@ -0,0 +1,97 @@ +// Token Scoring and Sorting Logic + +use super::super::manager::TokenManager; +use super::super::models::ProxyToken; +use std::sync::atomic::Ordering; + +impl TokenManager { + /// Sort tokens by priority (tier, health, reset_time, connections, quota) + pub(crate) fn sort_tokens(&self, tokens: &mut Vec) { + // [FIX] Reset time threshold: differences < 10 minutes are considered equal priority + const RESET_TIME_THRESHOLD_SECS: i64 = 600; + + tokens.sort_by(|a, b| { + let get_concurrency_limit = |tier: &Option| -> usize { + match tier.as_deref() { + Some(t) if t.contains("ultra") => 8, + Some(t) if t.contains("pro") => 3, + Some(_) => 1, + None => 1, + } + }; + + let limit_a = get_concurrency_limit(&a.subscription_tier); + let limit_b = get_concurrency_limit(&b.subscription_tier); + + let active_a = self + .active_requests + .get(&a.account_id) + .map(|c| c.load(Ordering::SeqCst)) + .unwrap_or(0); + let active_b = self + .active_requests + .get(&b.account_id) + .map(|c| c.load(Ordering::SeqCst)) + .unwrap_or(0); + + let overloaded_a = active_a >= limit_a; + let overloaded_b = active_b >= limit_b; + + // 1. Overloaded accounts go last + if overloaded_a != overloaded_b { + if overloaded_a { + return std::cmp::Ordering::Greater; + } else { + return std::cmp::Ordering::Less; + } + } + + // 2. Compare by subscription tier (ULTRA > PRO > FREE) + let tier_priority = |tier: &Option| match tier.as_deref() { + Some(t) if t.contains("ultra") || t.contains("ULTRA") => 0, + Some(t) if t.contains("pro") || t.contains("PRO") => 1, + Some(t) if t.contains("free") || t.contains("FREE") => 2, + _ => 3, + }; + + let tier_cmp = + tier_priority(&a.subscription_tier).cmp(&tier_priority(&b.subscription_tier)); + if tier_cmp != std::cmp::Ordering::Equal { + return tier_cmp; + } + + // 3. Compare by health score (higher is better) + let health_cmp = b + .health_score + .partial_cmp(&a.health_score) + .unwrap_or(std::cmp::Ordering::Equal); + if health_cmp != std::cmp::Ordering::Equal { + return health_cmp; + } + + // 4. [FIX] Compare by reset time (earlier/closer is better) + // Differences < 10 minutes are considered equal priority to avoid frequent switching + let reset_a = a.reset_time.unwrap_or(i64::MAX); + let reset_b = b.reset_time.unwrap_or(i64::MAX); + let reset_diff = (reset_a - reset_b).abs(); + + if reset_diff >= RESET_TIME_THRESHOLD_SECS { + let reset_cmp = reset_a.cmp(&reset_b); + if reset_cmp != std::cmp::Ordering::Equal { + return reset_cmp; + } + } + + // 5. Compare by active connections (fewer is better) + let active_cmp = active_a.cmp(&active_b); + if active_cmp != std::cmp::Ordering::Equal { + return active_cmp; + } + + // 6. Compare by remaining quota (higher is better) + let quota_a = a.remaining_quota.unwrap_or(0); + let quota_b = b.remaining_quota.unwrap_or(0); + quota_b.cmp("a_a) + }); + } +} diff --git a/src-tauri/src/proxy/token_manager/selection/sticky.rs b/src-tauri/src/proxy/token_manager/selection/sticky.rs new file mode 100644 index 000000000..536e0d50a --- /dev/null +++ b/src-tauri/src/proxy/token_manager/selection/sticky.rs @@ -0,0 +1,84 @@ +// Sticky Session Logic + +use super::super::manager::TokenManager; +use super::super::models::ProxyToken; +use std::collections::HashSet; + +impl TokenManager { + /// Try to use sticky session + pub(crate) async fn try_sticky_session( + &self, + session_id: &str, + tokens_snapshot: &[ProxyToken], + attempted: &HashSet, + normalized_target: &str, + quota_protection_enabled: bool, + scheduling: &crate::proxy::sticky_config::StickySessionConfig, + ) -> Option { + use crate::proxy::sticky_config::SchedulingMode; + + if let Some(bound_entry) = self.session_accounts.get(session_id) { + let (bound_id, _) = bound_entry.value(); + let bound_id = bound_id.clone(); + drop(bound_entry); + + if let Some(bound_token) = tokens_snapshot.iter().find(|t| t.account_id == bound_id) { + let key = self + .email_to_account_id(&bound_token.email) + .unwrap_or_else(|| bound_token.account_id.clone()); + let reset_sec = self + .rate_limit_tracker + .get_remaining_wait(&key, Some(normalized_target)); + + if reset_sec > 0 + && scheduling.mode == SchedulingMode::CacheFirst + && reset_sec <= scheduling.max_wait_seconds + { + tracing::info!( + "Sticky Session: Account {} limited ({}s), waiting...", + bound_token.email, + reset_sec + ); + tokio::time::sleep(std::time::Duration::from_secs(reset_sec)).await; + } + + let reset_sec_after_wait = self + .rate_limit_tracker + .get_remaining_wait(&key, Some(normalized_target)); + + if reset_sec_after_wait > 0 { + tracing::debug!( + "Sticky Session: Bound account {} is rate-limited, unbinding.", + bound_token.email + ); + self.session_accounts.remove(session_id); + } else if !attempted.contains(&bound_id) + && !(quota_protection_enabled + && bound_token.protected_models.contains(normalized_target)) + { + tracing::debug!( + "Sticky Session: Reusing bound account {} for session {}", + bound_token.email, + session_id + ); + if let Some(mut entry) = self.session_accounts.get_mut(session_id) { + entry.value_mut().1 = std::time::Instant::now(); + } + return Some(bound_token.clone()); + } else if quota_protection_enabled + && bound_token.protected_models.contains(normalized_target) + { + tracing::debug!( + "Sticky Session: Bound account {} is quota-protected, unbinding.", + bound_token.email + ); + self.session_accounts.remove(session_id); + } + } else { + tracing::debug!("Sticky Session: Bound account not found, unbinding"); + self.session_accounts.remove(session_id); + } + } + None + } +} diff --git a/src-tauri/src/proxy/token_manager/selection/token_ops.rs b/src-tauri/src/proxy/token_manager/selection/token_ops.rs new file mode 100644 index 000000000..11ef9d66c --- /dev/null +++ b/src-tauri/src/proxy/token_manager/selection/token_ops.rs @@ -0,0 +1,134 @@ +// Token Operations: Refresh and Project ID + +use super::super::manager::TokenManager; +use super::super::models::ProxyToken; +use std::collections::HashSet; + +impl TokenManager { + /// Try to refresh token if needed + pub(crate) async fn try_refresh_token( + &self, + token: &mut ProxyToken, + attempted: &mut HashSet, + last_error: &mut Option, + quota_group: &str, + last_used_account_id: &Option<(String, std::time::Instant)>, + need_update_last_used: &mut Option<(String, std::time::Instant)>, + ) -> Result<(), String> { + let now = chrono::Utc::now().timestamp(); + if now >= token.timestamp - 300 { + tracing::debug!("账号 {} 的 token 即将过期,正在刷新...", token.email); + + match crate::modules::oauth::refresh_access_token(&token.refresh_token, Some(&token.account_id)).await { + Ok(token_response) => { + tracing::debug!("Token 刷新成功!"); + token.access_token = token_response.access_token.clone(); + token.expires_in = token_response.expires_in; + token.timestamp = now + token_response.expires_in; + + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.access_token = token.access_token.clone(); + entry.expires_in = token.expires_in; + entry.timestamp = token.timestamp; + } + + if let Err(e) = self + .save_refreshed_token(&token.account_id, &token_response) + .await + { + tracing::debug!("保存刷新后的 token 失败 ({}): {}", token.email, e); + } + } + Err(e) => { + tracing::error!("Token 刷新失败 ({}): {}", token.email, e); + if e.contains("\"invalid_grant\"") || e.contains("invalid_grant") { + tracing::error!( + "Disabling account due to invalid_grant ({})", + token.email + ); + let _ = self + .disable_account(&token.account_id, &format!("invalid_grant: {}", e)) + .await; + self.tokens.remove(&token.account_id); + } + *last_error = Some(format!("Token refresh failed: {}", e)); + attempted.insert(token.account_id.clone()); + + if quota_group != "image_gen" { + if matches!(last_used_account_id, Some((id, _)) if id == &token.account_id) + { + *need_update_last_used = + Some((String::new(), std::time::Instant::now())); + } + } + return Err("continue".to_string()); + } + } + } + Ok(()) + } + + /// Ensure token has project ID + pub(crate) async fn ensure_project_id( + &self, + token: &mut ProxyToken, + attempted: &mut HashSet, + last_error: &mut Option, + quota_group: &str, + last_used_account_id: &Option<(String, std::time::Instant)>, + need_update_last_used: &mut Option<(String, std::time::Instant)>, + ) -> Result { + if let Some(pid) = &token.project_id { + if crate::proxy::project_resolver::is_legacy_mock_project_id(pid) { + tracing::warn!( + "Detected legacy mock project_id for {}, clearing cache and re-resolving", + token.email + ); + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.project_id = None; + } + let _ = self.clear_project_id_cache(&token.account_id).await; + } else { + return Ok(pid.clone()); + } + } + + tracing::debug!("账号 {} 缺少 project_id,尝试获取...", token.email); + match crate::proxy::project_resolver::fetch_project_id(&token.access_token).await { + Ok(pid) => { + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.project_id = Some(pid.clone()); + } + let _ = self.save_project_id(&token.account_id, &pid).await; + Ok(pid) + } + Err(e) => { + tracing::warn!( + "Failed to fetch project_id for {}: {}, fallback to default {}", + token.email, + e, + crate::proxy::project_resolver::DEFAULT_PROJECT_ID + ); + + let fallback = crate::proxy::project_resolver::DEFAULT_PROJECT_ID.to_string(); + if let Some(mut entry) = self.tokens.get_mut(&token.account_id) { + entry.project_id = Some(fallback.clone()); + } + let _ = self.save_project_id(&token.account_id, &fallback).await; + + if quota_group != "image_gen" + && matches!(last_used_account_id, Some((id, _)) if id == &token.account_id) + { + *need_update_last_used = Some((token.account_id.clone(), std::time::Instant::now())); + } + + // Keep account available with stable fallback instead of dropping it from current round. + *last_error = Some(format!( + "Using fallback project_id for {} after resolver error: {}", + token.email, e + )); + Ok(fallback) + } + } + } +} diff --git a/src-tauri/src/proxy/upstream/client.rs b/src-tauri/src/proxy/upstream/client.rs index e766567e4..fcaf7eb4e 100644 --- a/src-tauri/src/proxy/upstream/client.rs +++ b/src-tauri/src/proxy/upstream/client.rs @@ -1,56 +1,78 @@ // 上游客户端实现 // 基于高性能通讯接口封装 -use std::sync::Arc; -use dashmap::DashMap; use reqwest::{header, Client, Response, StatusCode}; use serde_json::Value; use tokio::time::Duration; use tokio::sync::RwLock; // Cloud Code v1internal endpoints (fallback order: Sandbox → Daily → Prod) -// 优先使用 Sandbox/Daily 环境以避免 Prod环境的 429 错误 (Ref: Issue #1176) +// 优先使用 Sandbox/Daily 环境以避免 Prod环境的 429/403 错误 (Ref: Issue #1176) const V1_INTERNAL_BASE_URL_PROD: &str = "https://cloudcode-pa.googleapis.com/v1internal"; const V1_INTERNAL_BASE_URL_DAILY: &str = "https://daily-cloudcode-pa.googleapis.com/v1internal"; const V1_INTERNAL_BASE_URL_SANDBOX: &str = "https://daily-cloudcode-pa.sandbox.googleapis.com/v1internal"; +use rand::Rng; // [FIX] Add rand for jitter + const V1_INTERNAL_BASE_URL_FALLBACKS: [&str; 3] = [ - V1_INTERNAL_BASE_URL_SANDBOX, // 优先级 1: Sandbox (已知有效且稳定) + V1_INTERNAL_BASE_URL_SANDBOX, // 优先级 1: Sandbox (已知有效且稳定,避免 VALIDATION_REQUIRED) V1_INTERNAL_BASE_URL_DAILY, // 优先级 2: Daily (备用) V1_INTERNAL_BASE_URL_PROD, // 优先级 3: Prod (仅作为兜底) ]; +use std::sync::atomic::{AtomicUsize, Ordering}; + pub struct UpstreamClient { - default_client: Client, - proxy_pool: Option>, - client_cache: DashMap, // proxy_id -> Client + http_client: RwLock, user_agent_override: RwLock>, + preferred_endpoint_index: AtomicUsize, // [NEW] Sticky endpoint index } impl UpstreamClient { - pub fn new( - proxy_config: Option, - proxy_pool: Option>, - ) -> Self { - let default_client = Self::build_client_internal(proxy_config) - .expect("Failed to create default HTTP client"); - + pub fn new(proxy_config: Option) -> Self { + let client = Self::build_http_client(proxy_config); Self { - default_client, - proxy_pool, - client_cache: DashMap::new(), + http_client: RwLock::new(client), user_agent_override: RwLock::new(None), + preferred_endpoint_index: AtomicUsize::new(0), } } - /// Internal helper to build a client with optional upstream proxy config - fn build_client_internal(proxy_config: Option) -> Result { + /// [NEW] 设置动态 User-Agent 覆盖 + pub async fn set_user_agent_override(&self, ua: Option) { + let mut lock = self.user_agent_override.write().await; + *lock = ua.clone(); + tracing::info!("UpstreamClient User-Agent override updated: {:?}", ua); + } + + /// [NEW] 获取当前生效的 User-Agent + async fn get_effective_user_agent(&self) -> String { + let ua_override = self.user_agent_override.read().await; + ua_override.as_ref().cloned().unwrap_or_else(|| crate::constants::USER_AGENT.clone()) + } + + /// [NEW] 重建并热更新内部 HTTP 客户端 + pub async fn rebuild_client(&self, proxy_config: Option) { + let new_client = Self::build_http_client(proxy_config); + let mut writer = self.http_client.write().await; + *writer = new_client; + tracing::info!("UpstreamClient underlying HTTP client has been reloaded"); + } + + /// 内部构建 HTTP Client 的逻辑 + fn build_http_client(proxy_config: Option) -> Client { + // [PERF] Connection pool size configurable via env, default 64 for high-load scenarios + let pool_size: usize = std::env::var("ABV_POOL_SIZE") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(64); + let mut builder = Client::builder() - // Connection settings (优化连接复用,减少建立开销) + // Connection settings (optimized for high concurrency) .connect_timeout(Duration::from_secs(20)) - .pool_max_idle_per_host(16) // 每主机最多 16 个空闲连接 - .pool_idle_timeout(Duration::from_secs(90)) // 空闲连接保持 90 秒 - .tcp_keepalive(Duration::from_secs(60)) // TCP 保活探测 60 秒 + .pool_max_idle_per_host(pool_size) // [PERF] Increased from 16 to 64 + .pool_idle_timeout(Duration::from_secs(90)) // Keep idle connections for 90s + .tcp_keepalive(Duration::from_secs(60)) // TCP keepalive probe every 60s .timeout(Duration::from_secs(600)) .user_agent(crate::constants::USER_AGENT.as_str()); @@ -63,74 +85,12 @@ impl UpstreamClient { } } - builder.build() - } - - /// Build a client with a specific PoolProxyConfig (from ProxyPool) - fn build_client_with_proxy(&self, proxy_config: crate::proxy::proxy_pool::PoolProxyConfig) -> Result { - // Reuse base settings similar to default client but with specific proxy - Client::builder() - .connect_timeout(Duration::from_secs(20)) - .pool_max_idle_per_host(16) - .pool_idle_timeout(Duration::from_secs(90)) - .tcp_keepalive(Duration::from_secs(60)) - .timeout(Duration::from_secs(600)) - .user_agent(crate::constants::USER_AGENT.as_str()) - .proxy(proxy_config.proxy) // Apply the specific proxy - .build() - } - - /// Set dynamic User-Agent override - pub async fn set_user_agent_override(&self, ua: Option) { - let mut lock = self.user_agent_override.write().await; - *lock = ua; - tracing::debug!("UpstreamClient User-Agent override updated: {:?}", lock); - } - - /// Get current User-Agent - pub async fn get_user_agent(&self) -> String { - let ua_override = self.user_agent_override.read().await; - ua_override.as_ref().cloned().unwrap_or_else(|| crate::constants::USER_AGENT.clone()) - } - - /// Get client for a specific account (or default if no proxy bound) - pub async fn get_client(&self, account_id: Option<&str>) -> Client { - if let Some(pool) = &self.proxy_pool { - if let Some(acc_id) = account_id { - // Try to get per-account proxy - match pool.get_proxy_for_account(acc_id).await { - Ok(Some(proxy_cfg)) => { - // Check cache - if let Some(client) = self.client_cache.get(&proxy_cfg.entry_id) { - return client.clone(); - } - // Build new client and cache it - match self.build_client_with_proxy(proxy_cfg.clone()) { - Ok(client) => { - self.client_cache.insert(proxy_cfg.entry_id.clone(), client.clone()); - tracing::info!("Using ProxyPool proxy ID: {} for account: {}", proxy_cfg.entry_id, acc_id); - return client; - } - Err(e) => { - tracing::error!("Failed to build client for proxy {}: {}, falling back to default", proxy_cfg.entry_id, e); - } - } - } - Ok(None) => { - // No proxy found or required for this account, use default - } - Err(e) => { - tracing::error!("Error getting proxy for account {}: {}, falling back to default", acc_id, e); - } - } - } - } - // Fallback to default client - self.default_client.clone() + builder.build().expect("Failed to create HTTP client") } - - /// Build v1internal URL + /// 构建 v1internal URL + /// + /// 构建 API 请求地址 fn build_url(base_url: &str, method: &str, query_string: Option<&str>) -> String { if let Some(qs) = query_string { format!("{}:{}?{}", base_url, method, qs) @@ -139,7 +99,13 @@ impl UpstreamClient { } } - /// Determine if we should try next endpoint (fallback logic) + /// 判断是否应尝试下一个端点 + /// + /// 当遇到以下错误时,尝试切换到备用端点: + /// - 429 Too Many Requests(限流) + /// - 408 Request Timeout(超时) + /// - 404 Not Found(端点不存在) + /// - 5xx Server Error(服务器错误) fn should_try_next_endpoint(status: StatusCode) -> bool { status == StatusCode::TOO_MANY_REQUESTS || status == StatusCode::REQUEST_TIMEOUT @@ -147,19 +113,17 @@ impl UpstreamClient { || status.is_server_error() } - /// Call v1internal API (Basic Method) + /// 调用 v1internal API(基础方法) /// - /// Initiates a basic network request, supporting multi-endpoint auto-fallback. - /// [UPDATED] Takes optional account_id for per-account proxy selection. + /// 发起基础网络请求,支持多端点自动 Fallback pub async fn call_v1_internal( &self, method: &str, access_token: &str, body: Value, query_string: Option<&str>, - account_id: Option<&str>, // [NEW] Account ID for proxy selection ) -> Result { - self.call_v1_internal_with_headers(method, access_token, body, query_string, std::collections::HashMap::new(), account_id).await + self.call_v1_internal_with_headers(method, access_token, body, query_string, std::collections::HashMap::new()).await } /// [FIX #765] 调用 v1internal API,支持透传额外的 Headers @@ -170,11 +134,7 @@ impl UpstreamClient { body: Value, query_string: Option<&str>, extra_headers: std::collections::HashMap, - account_id: Option<&str>, // [NEW] Account ID ) -> Result { - // [NEW] Get client based on account (cached in proxy pool manager) - let client = self.get_client(account_id).await; - // 构建 Headers (所有端点复用) let mut headers = header::HeaderMap::new(); headers.insert( @@ -186,11 +146,12 @@ impl UpstreamClient { header::HeaderValue::from_str(&format!("Bearer {}", access_token)) .map_err(|e| e.to_string())?, ); - - // [NEW] 支持自定义 User-Agent 覆盖 + + // [NEW] Используем динамический User-Agent с поддержкой override + let effective_ua = self.get_effective_user_agent().await; headers.insert( header::USER_AGENT, - header::HeaderValue::from_str(&self.get_user_agent().await) + header::HeaderValue::from_str(&effective_ua) .unwrap_or_else(|e| { tracing::warn!("Invalid User-Agent header value, using fallback: {}", e); header::HeaderValue::from_static("antigravity") @@ -208,12 +169,30 @@ impl UpstreamClient { let mut last_err: Option = None; + // 获取 Client 读锁 + let client_guard = self.http_client.read().await; + + // [FIX] Adaptive Routing: Start with preferred endpoint + // Create an ordered list of indices: [preferred, others...] + let current_preferred = self.preferred_endpoint_index.load(Ordering::Relaxed); + let mut indices: Vec = (0..V1_INTERNAL_BASE_URL_FALLBACKS.len()).collect(); + + // Move preferred to front if valid + if current_preferred < indices.len() { + if let Some(pos) = indices.iter().position(|&x| x == current_preferred) { + indices.remove(pos); + indices.insert(0, current_preferred); + } + } + // 遍历所有端点,失败时自动切换 - for (idx, base_url) in V1_INTERNAL_BASE_URL_FALLBACKS.iter().enumerate() { + for (attempt_idx, &fallback_idx) in indices.iter().enumerate() { + let base_url = V1_INTERNAL_BASE_URL_FALLBACKS[fallback_idx]; let url = Self::build_url(base_url, method, query_string); - let has_next = idx + 1 < V1_INTERNAL_BASE_URL_FALLBACKS.len(); + // Has next if this is not the last attempt + let has_next = attempt_idx + 1 < indices.len(); - let response = client + let response = client_guard .post(&url) .headers(headers.clone()) .json(&body) @@ -224,12 +203,24 @@ impl UpstreamClient { Ok(resp) => { let status = resp.status(); if status.is_success() { - if idx > 0 { + // [FIX] Update preferred endpoint on success if different + let current = self.preferred_endpoint_index.load(Ordering::Relaxed); + if current != fallback_idx { + self.preferred_endpoint_index.store(fallback_idx, Ordering::Relaxed); + tracing::info!( + "✨ Adaptive Routing: Switched preferred endpoint to {} (was {})", + base_url, + V1_INTERNAL_BASE_URL_FALLBACKS[current] + ); + } + + if attempt_idx > 0 { tracing::info!( - "✓ Upstream fallback succeeded | Endpoint: {} | Status: {} | Next endpoints available: {}", + "✓ Upstream fallback succeeded | Endpoint: {} | Status: {} | Attempt: {}/{}", base_url, status, - V1_INTERNAL_BASE_URL_FALLBACKS.len() - idx - 1 + attempt_idx + 1, + indices.len() ); } else { tracing::debug!("✓ Upstream request succeeded | Endpoint: {} | Status: {}", base_url, status); @@ -246,6 +237,11 @@ impl UpstreamClient { method ); last_err = Some(format!("Upstream {} returned {}", base_url, status)); + + // [FIX] Smart Jitter (50-250ms) to prevent thundering herd + let jitter = rand::thread_rng().gen_range(50..250); + tokio::time::sleep(Duration::from_millis(jitter)).await; + continue; } @@ -261,6 +257,11 @@ impl UpstreamClient { if !has_next { break; } + + // [FIX] Smart Jitter (50-250ms) + let jitter = rand::thread_rng().gen_range(50..250); + tokio::time::sleep(Duration::from_millis(jitter)).await; + continue; } } @@ -269,33 +270,11 @@ impl UpstreamClient { Err(last_err.unwrap_or_else(|| "All endpoints failed".to_string())) } - /// 调用 v1internal API(带 429 重试,支持闭包) - /// - /// 带容错和重试的核心请求逻辑 - /// - /// # Arguments - /// * `method` - API method (e.g., "generateContent") - /// * `query_string` - Optional query string (e.g., "?alt=sse") - /// * `get_credentials` - 闭包,获取凭证(支持账号轮换) - /// * `build_body` - 闭包,接收 project_id 构建请求体 - /// * `max_attempts` - 最大重试次数 - /// - /// # Returns - /// HTTP Response - // 已移除弃用的重试方法 (call_v1_internal_with_retry) - - // 已移除弃用的辅助方法 (parse_retry_delay) - - // 已移除弃用的辅助方法 (parse_duration_ms) - /// 获取可用模型列表 /// /// 获取远端模型列表,支持多端点自动 Fallback #[allow(dead_code)] // API ready for future model discovery feature - pub async fn fetch_available_models(&self, access_token: &str, account_id: Option<&str>) -> Result { - // [NEW] Get client based on account - let client = self.get_client(account_id).await; - + pub async fn fetch_available_models(&self, access_token: &str) -> Result { let mut headers = header::HeaderMap::new(); headers.insert( header::CONTENT_TYPE, @@ -306,11 +285,12 @@ impl UpstreamClient { header::HeaderValue::from_str(&format!("Bearer {}", access_token)) .map_err(|e| e.to_string())?, ); - - // [NEW] 支持自定义 User-Agent 覆盖 + + // [NEW] Используем динамический User-Agent с поддержкой override + let effective_ua = self.get_effective_user_agent().await; headers.insert( header::USER_AGENT, - header::HeaderValue::from_str(&self.get_user_agent().await) + header::HeaderValue::from_str(&effective_ua) .unwrap_or_else(|e| { tracing::warn!("Invalid User-Agent header value, using fallback: {}", e); header::HeaderValue::from_static("antigravity") @@ -318,12 +298,25 @@ impl UpstreamClient { ); let mut last_err: Option = None; + let client_guard = self.http_client.read().await; + + // [FIX] Adaptive Routing for models + let current_preferred = self.preferred_endpoint_index.load(Ordering::Relaxed); + let mut indices: Vec = (0..V1_INTERNAL_BASE_URL_FALLBACKS.len()).collect(); + + if current_preferred < indices.len() { + if let Some(pos) = indices.iter().position(|&x| x == current_preferred) { + indices.remove(pos); + indices.insert(0, current_preferred); + } + } // 遍历所有端点,失败时自动切换 - for (idx, base_url) in V1_INTERNAL_BASE_URL_FALLBACKS.iter().enumerate() { + for (attempt_idx, &fallback_idx) in indices.iter().enumerate() { + let base_url = V1_INTERNAL_BASE_URL_FALLBACKS[fallback_idx]; let url = Self::build_url(base_url, "fetchAvailableModels", None); - let response = client + let response = client_guard .post(&url) .headers(headers.clone()) .json(&serde_json::json!({})) @@ -334,7 +327,13 @@ impl UpstreamClient { Ok(resp) => { let status = resp.status(); if status.is_success() { - if idx > 0 { + // Update preferred on success + let current = self.preferred_endpoint_index.load(Ordering::Relaxed); + if current != fallback_idx { + self.preferred_endpoint_index.store(fallback_idx, Ordering::Relaxed); + } + + if attempt_idx > 0 { tracing::info!( "✓ Upstream fallback succeeded for fetchAvailableModels | Endpoint: {} | Status: {}", base_url, @@ -351,7 +350,7 @@ impl UpstreamClient { } // 如果有下一个端点且当前错误可重试,则切换 - let has_next = idx + 1 < V1_INTERNAL_BASE_URL_FALLBACKS.len(); + let has_next = attempt_idx + 1 < indices.len(); if has_next && Self::should_try_next_endpoint(status) { tracing::warn!( "fetchAvailableModels returned {} at {}, trying next endpoint", @@ -359,6 +358,11 @@ impl UpstreamClient { base_url ); last_err = Some(format!("Upstream error: {}", status)); + + // [FIX] Smart Jitter (50-250ms) + let jitter = rand::thread_rng().gen_range(50..250); + tokio::time::sleep(Duration::from_millis(jitter)).await; + continue; } @@ -371,9 +375,14 @@ impl UpstreamClient { last_err = Some(msg); // 如果是最后一个端点,退出循环 - if idx + 1 >= V1_INTERNAL_BASE_URL_FALLBACKS.len() { + if attempt_idx + 1 >= indices.len() { break; } + + // [FIX] Smart Jitter (50-250ms) + let jitter = rand::thread_rng().gen_range(50..250); + tokio::time::sleep(Duration::from_millis(jitter)).await; + continue; } } diff --git a/src-tauri/src/proxy/upstream/retry.rs b/src-tauri/src/proxy/upstream/retry.rs index 452ae7ef8..372908cff 100644 --- a/src-tauri/src/proxy/upstream/retry.rs +++ b/src-tauri/src/proxy/upstream/retry.rs @@ -1,11 +1,11 @@ // 429 重试策略 // Duration 解析 -use regex::Regex; use once_cell::sync::Lazy; +use regex::Regex; static DURATION_RE: Lazy = Lazy::new(|| { - Regex::new(r"([\d.]+)\s*(ms|s|m|h)").unwrap() + Regex::new(r"([\d.]+)\s*(ms|s|m|h)").expect("Failed to compile static DURATION_RE") }); /// 解析 Duration 字符串 (e.g., "1.5s", "200ms", "1h16m0.667s") diff --git a/src-tauri/src/shared/db_pool.rs b/src-tauri/src/shared/db_pool.rs new file mode 100644 index 000000000..b75528e46 --- /dev/null +++ b/src-tauri/src/shared/db_pool.rs @@ -0,0 +1,254 @@ +// File: src-tauri/src/shared/db_pool.rs +//! SQLite connection pooling using r2d2 +//! Eliminates "Too many open files" errors and improves performance + +use std::path::PathBuf; +use std::sync::OnceLock; +use std::time::Duration; + +use r2d2::{Pool, PooledConnection as R2D2PooledConnection}; +use r2d2_sqlite::SqliteConnectionManager; +use rusqlite::OpenFlags; + +use crate::error::{AppError, AppResult}; + +// ============================================================================ +// Type Aliases +// ============================================================================ + +/// SQLite connection pool type +pub type DbPool = Pool; + +/// Pooled connection type +pub type PooledConnection = R2D2PooledConnection; + +// ============================================================================ +// Global Pool Registry +// ============================================================================ + +/// Registry of database pools by path +static POOL_REGISTRY: OnceLock> = OnceLock::new(); + +fn get_registry() -> &'static dashmap::DashMap { + POOL_REGISTRY.get_or_init(dashmap::DashMap::new) +} + +// ============================================================================ +// Pool Configuration +// ============================================================================ + +/// Configuration for database pool +#[derive(Debug, Clone)] +pub struct PoolConfig { + /// Maximum number of connections in the pool + pub max_size: u32, + /// Minimum number of idle connections + pub min_idle: Option, + /// Connection timeout + pub connection_timeout: Duration, + /// Maximum lifetime of a connection + pub max_lifetime: Option, + /// Idle timeout for connections + pub idle_timeout: Option, +} + +impl Default for PoolConfig { + fn default() -> Self { + Self { + max_size: 10, + min_idle: Some(2), + connection_timeout: Duration::from_secs(30), + max_lifetime: Some(Duration::from_secs(3600)), // 1 hour + idle_timeout: Some(Duration::from_secs(600)), // 10 minutes + } + } +} + +// ============================================================================ +// Pool Creation +// ============================================================================ + +/// Create a new connection pool for the given database path +pub fn create_pool(db_path: &PathBuf, config: PoolConfig) -> AppResult { + let manager = SqliteConnectionManager::file(db_path) + .with_flags( + OpenFlags::SQLITE_OPEN_READ_WRITE + | OpenFlags::SQLITE_OPEN_CREATE + | OpenFlags::SQLITE_OPEN_NO_MUTEX, + ) + .with_init(|conn| { + // Enable WAL mode for better concurrent access + conn.execute_batch( + "PRAGMA journal_mode = WAL; + PRAGMA synchronous = NORMAL; + PRAGMA foreign_keys = ON; + PRAGMA busy_timeout = 5000;", + )?; + Ok(()) + }); + + let pool = Pool::builder() + .max_size(config.max_size) + .min_idle(config.min_idle) + .connection_timeout(config.connection_timeout) + .max_lifetime(config.max_lifetime) + .idle_timeout(config.idle_timeout) + .build(manager) + .map_err(|e| AppError::DatabasePool(format!("Failed to create pool: {}", e)))?; + + Ok(pool) +} + +/// Get or create a pool for the given database path +pub fn get_pool(db_path: &PathBuf) -> AppResult { + let key = db_path.to_string_lossy().to_string(); + let registry = get_registry(); + + if let Some(pool) = registry.get(&key) { + return Ok(pool.clone()); + } + + let pool = create_pool(db_path, PoolConfig::default())?; + registry.insert(key, pool.clone()); + Ok(pool) +} + +/// Get a connection from the pool for the given database path +pub fn get_connection(db_path: &PathBuf) -> AppResult { + let pool = get_pool(db_path)?; + pool.get() + .map_err(|e| AppError::DatabasePool(format!("Failed to get connection: {}", e))) +} + +/// Execute a function with a pooled connection +pub fn with_connection(db_path: &PathBuf, f: F) -> AppResult +where + F: FnOnce(&PooledConnection) -> AppResult, +{ + let conn = get_connection(db_path)?; + f(&conn) +} + +/// Execute a function with a mutable pooled connection +pub fn with_connection_mut(db_path: &PathBuf, f: F) -> AppResult +where + F: FnOnce(&mut PooledConnection) -> AppResult, +{ + let mut conn = get_connection(db_path)?; + f(&mut conn) +} + +// ============================================================================ +// Pool Management +// ============================================================================ + +/// Get pool statistics +#[derive(Debug, Clone, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct PoolStats { + pub path: String, + pub connections: u32, + pub idle_connections: u32, + pub max_size: u32, +} + +/// Get statistics for all pools +pub fn get_all_pool_stats() -> Vec { + let registry = get_registry(); + registry + .iter() + .map(|entry| { + let pool = entry.value(); + let state = pool.state(); + PoolStats { + path: entry.key().clone(), + connections: state.connections, + idle_connections: state.idle_connections, + max_size: pool.max_size(), + } + }) + .collect() +} + +/// Close all pools (for graceful shutdown) +pub fn close_all_pools() { + let registry = get_registry(); + registry.clear(); + tracing::info!("All database pools closed"); +} + +/// Remove a specific pool from registry +pub fn remove_pool(db_path: &PathBuf) { + let key = db_path.to_string_lossy().to_string(); + get_registry().remove(&key); +} + +// ============================================================================ +// Convenience Functions for Common Databases +// ============================================================================ + +/// Get connection to the main application database +pub fn get_app_db_connection() -> AppResult { + let data_dir = crate::modules::account::get_data_dir() + .map_err(|e| AppError::Config(format!("Failed to get data dir: {}", e)))?; + let db_path = data_dir.join("antigravity.db"); + get_connection(&db_path) +} + +/// Get connection to the token stats database +pub fn get_stats_db_connection() -> AppResult { + let data_dir = crate::modules::account::get_data_dir() + .map_err(|e| AppError::Config(format!("Failed to get data dir: {}", e)))?; + let db_path = data_dir.join("token_stats.db"); + get_connection(&db_path) +} + +/// Get connection to the proxy logs database +pub fn get_proxy_logs_db_connection() -> AppResult { + let data_dir = crate::modules::account::get_data_dir() + .map_err(|e| AppError::Config(format!("Failed to get data dir: {}", e)))?; + let db_path = data_dir.join("proxy_logs.db"); + get_connection(&db_path) +} + +/// Get connection to the security database +pub fn get_security_db_connection() -> AppResult { + let data_dir = crate::modules::account::get_data_dir() + .map_err(|e| AppError::Config(format!("Failed to get data dir: {}", e)))?; + let db_path = data_dir.join("security.db"); + get_connection(&db_path) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::env::temp_dir; + + #[test] + fn test_pool_creation() { + let temp_path = temp_dir().join("test_pool.db"); + let pool = create_pool(&temp_path, PoolConfig::default()); + assert!(pool.is_ok()); + + // Cleanup + let _ = std::fs::remove_file(&temp_path); + } + + #[test] + fn test_with_connection() { + let temp_path = temp_dir().join("test_with_conn.db"); + + let result = with_connection(&temp_path, |conn| { + conn.execute("CREATE TABLE IF NOT EXISTS test (id INTEGER)", []) + .map_err(AppError::Database)?; + Ok(42) + }); + + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 42); + + // Cleanup + remove_pool(&temp_path); + let _ = std::fs::remove_file(&temp_path); + } +} diff --git a/src-tauri/src/shared/mod.rs b/src-tauri/src/shared/mod.rs new file mode 100644 index 000000000..83b9e8961 --- /dev/null +++ b/src-tauri/src/shared/mod.rs @@ -0,0 +1,7 @@ +// File: src-tauri/src/shared/mod.rs +//! Shared infrastructure modules for Antigravity Manager +//! Contains cross-cutting concerns: database pooling, utilities, etc. + +pub mod db_pool; + +pub use db_pool::{DbPool, PooledConnection, get_pool, with_connection}; diff --git a/src-tauri/src/utils/crypto.rs b/src-tauri/src/utils/crypto.rs deleted file mode 100644 index 75b46badc..000000000 --- a/src-tauri/src/utils/crypto.rs +++ /dev/null @@ -1,63 +0,0 @@ -use aes_gcm::{ - aead::{Aead, KeyInit}, - Aes256Gcm, Nonce, -}; -use base64::{Engine as _, engine::general_purpose}; -use sha2::Digest; - -/// 生成加密密钥 (基于设备 ID) -fn get_encryption_key() -> [u8; 32] { - // 使用设备唯一标识生成密钥 - let device_id = machine_uid::get().unwrap_or_else(|_| "default".to_string()); - let mut key = [0u8; 32]; - let hash = sha2::Sha256::digest(device_id.as_bytes()); - key.copy_from_slice(&hash); - key -} - -use serde::{Deserialize, Deserializer, Serializer}; - -pub fn serialize_password(password: &str, serializer: S) -> Result -where - S: Serializer, -{ - let encrypted = encrypt_string(password).map_err(serde::ser::Error::custom)?; - serializer.serialize_str(&encrypted) -} - -pub fn deserialize_password<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let encrypted = String::deserialize(deserializer)?; - decrypt_string(&encrypted).map_err(serde::de::Error::custom) -} - -pub fn encrypt_string(password: &str) -> Result { - let key = get_encryption_key(); - let cipher = Aes256Gcm::new(&key.into()); - // In production, we should use a random nonce and prepend it to the ciphertext - // For simplicity in this demo, we use a fixed nonce (NOT SECURE for repeats) - // improving security: use random nonce - let nonce = Nonce::from_slice(b"antigravity_salt"); - - let ciphertext = cipher.encrypt(nonce, password.as_bytes()) - .map_err(|e| format!("Encryption failed: {}", e))?; - - Ok(general_purpose::STANDARD.encode(ciphertext)) -} - -pub fn decrypt_string(encrypted: &str) -> Result { - let key = get_encryption_key(); - let cipher = Aes256Gcm::new(&key.into()); - let nonce = Nonce::from_slice(b"antigravity_salt"); - - let ciphertext = general_purpose::STANDARD.decode(encrypted) - .map_err(|e| format!("Base64 decode failed: {}", e))?; - - let plaintext = cipher.decrypt(nonce, ciphertext.as_ref()) - .map_err(|e| format!("Decryption failed: {}", e))?; - - String::from_utf8(plaintext) - .map_err(|e| format!("UTF-8 conversion failed: {}", e)) -} diff --git a/src-tauri/src/utils/mod.rs b/src-tauri/src/utils/mod.rs index 9407d5094..ea7c15df7 100755 --- a/src-tauri/src/utils/mod.rs +++ b/src-tauri/src/utils/mod.rs @@ -1,3 +1,2 @@ pub mod http; pub mod protobuf; -pub mod crypto; diff --git a/src-tauri/src/utils/protobuf.rs b/src-tauri/src/utils/protobuf.rs index 55e1e4f93..dd5e67703 100755 --- a/src-tauri/src/utils/protobuf.rs +++ b/src-tauri/src/utils/protobuf.rs @@ -173,7 +173,6 @@ pub fn create_oauth_field(access_token: &str, refresh_token: &str, expiry: i64) field6 } - /// Create Email (Field 2) pub fn create_email_field(email: &str) -> Vec { let tag = (2 << 3) | 2; @@ -183,7 +182,7 @@ pub fn create_email_field(email: &str) -> Vec { f } -/// 编码长度分隔字段 (wire_type = 2) +/// Encode length-delimited field (wire_type = 2) pub fn encode_len_delim_field(field_num: u32, data: &[u8]) -> Vec { let tag = (field_num << 3) | 2; let mut f = encode_varint(tag as u64); @@ -192,12 +191,12 @@ pub fn encode_len_delim_field(field_num: u32, data: &[u8]) -> Vec { f } -/// 编码字符串字段 (wire_type = 2) +/// Encode string field (wire_type = 2) pub fn encode_string_field(field_num: u32, value: &str) -> Vec { encode_len_delim_field(field_num, value.as_bytes()) } -/// 创建 OAuthTokenInfo 消息(不包含 Field 6 包装,用于新格式) +/// Create OAuthTokenInfo message (without Field 6 wrapper, for new format) pub fn create_oauth_info(access_token: &str, refresh_token: &str, expiry: i64) -> Vec { // Field 1: access_token let field1 = encode_string_field(1, access_token); @@ -208,13 +207,12 @@ pub fn create_oauth_info(access_token: &str, refresh_token: &str, expiry: i64) - // Field 3: refresh_token let field3 = encode_string_field(3, refresh_token); - // Field 4: expiry (嵌套的 Timestamp 消息) + // Field 4: expiry (nested Timestamp message) let timestamp_tag = (1 << 3) | 0; let mut timestamp_msg = encode_varint(timestamp_tag); timestamp_msg.extend(encode_varint(expiry as u64)); let field4 = encode_len_delim_field(4, ×tamp_msg); - // 合并所有字段为 OAuthTokenInfo 消息 + // Merge all fields into OAuthTokenInfo message [field1, field2, field3, field4].concat() } - diff --git a/src-tauri/tauri.conf.dev.json b/src-tauri/tauri.conf.dev.json new file mode 100644 index 000000000..cf2f4aa9d --- /dev/null +++ b/src-tauri/tauri.conf.dev.json @@ -0,0 +1,52 @@ +{ + "$schema": "https://schema.tauri.app/config/2", + "productName": "Antigravity Tools DEV", + "version": "5.1.0", + "identifier": "com.lbjlaq.antigravity-tools-dev", + "build": { + "beforeDevCommand": "npm run dev:standalone", + "devUrl": "http://localhost:1421", + "beforeBuildCommand": "npm run build", + "frontendDist": "../dist" + }, + "app": { + "withGlobalTauri": false, + "windows": [ + { + "title": "Antigravity Tools [DEV]", + "width": 1024, + "height": 700, + "titleBarStyle": "Overlay", + "hiddenTitle": true, + "transparent": true, + "visible": false + } + ], + "security": { + "csp": "default-src 'self'; img-src 'self' asset: data:; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; connect-src ipc: http://ipc.localhost" + } + }, + "bundle": { + "active": true, + "targets": "all", + "icon": [ + "icons/32x32.png", + "icons/128x128.png", + "icons/128x128@2x.png", + "icons/icon.icns", + "icons/icon.ico" + ], + "macOS": { + "entitlements": "Entitlements.plist" + } + }, + "plugins": { + "updater": { + "active": false + }, + "process": null, + "fs": null, + "dialog": null, + "opener": null + } +} diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 5f2382371..1a13034ec 100755 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -1,7 +1,7 @@ { "$schema": "https://schema.tauri.app/config/2", "productName": "Antigravity Tools", - "version": "4.1.8", + "version": "5.1.0", "identifier": "com.lbjlaq.antigravity-tools", "build": { "beforeDevCommand": "npm run dev", @@ -29,7 +29,6 @@ "bundle": { "active": true, "targets": "all", - "createUpdaterArtifacts": true, "icon": [ "icons/32x32.png", "icons/128x128.png", @@ -43,12 +42,12 @@ }, "plugins": { "updater": { - "active": false, + "active": true, "endpoints": [ - "https://github.com/lbjlaq/Antigravity-Manager/releases/latest/download/updater.json" + "https://github.com/GofMan5/Antigravity-Manager/releases/latest/download/latest.json" ], "dialog": true, - "pubkey": "dW50cnVzdGVkIGNvbW1lbnQ6IG1pbmlzaWduIHB1YmxpYyBrZXk6IEJFRjVDRjdCREE1Rjg2NkYKUldSdmhsL2FlOC8xdnJuTzBtaTRhZkk1VWJ1cW5QWGU3ZWEwU20yZHRlZStxMnRWcUIzc3FwT2IK" + "pubkey": "dW50cnVzdGVkIGNvbW1lbnQ6IG1pbmlzaWduIHB1YmxpYyBrZXk6IDVDMTQ4NzQwMjUyMjg0NUUKUldSZWhDSWxRSWNVWExVd05NSmE3MS9XT0txN2dBUnNpQUNiaVhIVWI5VHlqMVpIcHdnOE14OHUK" }, "process": null, "fs": null, @@ -56,4 +55,4 @@ "opener": null, "single-instance": null } -} \ No newline at end of file +} diff --git a/src/App.tsx b/src/App.tsx deleted file mode 100755 index e2d0f89db..000000000 --- a/src/App.tsx +++ /dev/null @@ -1,158 +0,0 @@ -import { createBrowserRouter, RouterProvider } from 'react-router-dom'; - -import Layout from './components/layout/Layout'; -import Dashboard from './pages/Dashboard'; -import Accounts from './pages/Accounts'; -import Settings from './pages/Settings'; -import ApiProxy from './pages/ApiProxy'; -import Monitor from './pages/Monitor'; -import TokenStats from './pages/TokenStats'; -import Security from './pages/Security'; -import ThemeManager from './components/common/ThemeManager'; -import UserToken from './pages/UserToken'; -import { UpdateNotification } from './components/UpdateNotification'; -import DebugConsole from './components/debug/DebugConsole'; -import { useEffect, useState } from 'react'; -import { useConfigStore } from './stores/useConfigStore'; -import { useAccountStore } from './stores/useAccountStore'; -import { useTranslation } from 'react-i18next'; -import { listen } from '@tauri-apps/api/event'; -import { isTauri } from './utils/env'; -import { request as invoke } from './utils/request'; -import { AdminAuthGuard } from './components/common/AdminAuthGuard'; - -const router = createBrowserRouter([ - { - path: '/', - element: , - children: [ - { - index: true, - element: , - }, - { - path: 'accounts', - element: , - }, - { - path: 'api-proxy', - element: , - }, - { - path: 'monitor', - element: , - }, - { - path: 'token-stats', - element: , - }, - { - path: 'user-token', - element: , - }, - { - path: 'security', - element: , - }, - { - path: 'settings', - element: , - }, - ], - }, -]); - -function App() { - const { config, loadConfig } = useConfigStore(); - const { fetchCurrentAccount, fetchAccounts } = useAccountStore(); - const { i18n } = useTranslation(); - - useEffect(() => { - loadConfig(); - }, [loadConfig]); - - // Sync language from config - useEffect(() => { - if (config?.language) { - i18n.changeLanguage(config.language); - // Support RTL - if (config.language === 'ar') { - document.documentElement.dir = 'rtl'; - } else { - document.documentElement.dir = 'ltr'; - } - } - }, [config?.language, i18n]); - - // Listen for tray events - useEffect(() => { - if (!isTauri()) return; - const unlistenPromises: Promise<() => void>[] = []; - - // 监听托盘切换账号事件 - unlistenPromises.push( - listen('tray://account-switched', () => { - console.log('[App] Tray account switched, refreshing...'); - fetchCurrentAccount(); - fetchAccounts(); - }) - ); - - // 监听托盘刷新事件 - unlistenPromises.push( - listen('tray://refresh-current', () => { - console.log('[App] Tray refresh triggered, refreshing...'); - fetchCurrentAccount(); - fetchAccounts(); - }) - ); - - // Cleanup - return () => { - Promise.all(unlistenPromises).then(unlisteners => { - unlisteners.forEach(unlisten => unlisten()); - }); - }; - }, [fetchCurrentAccount, fetchAccounts]); - - // Update notification state - const [showUpdateNotification, setShowUpdateNotification] = useState(false); - - // Check for updates on startup - useEffect(() => { - const checkUpdates = async () => { - try { - console.log('[App] Checking if we should check for updates...'); - const shouldCheck = await invoke('should_check_updates'); - console.log('[App] Should check updates:', shouldCheck); - - if (shouldCheck) { - setShowUpdateNotification(true); - // 我们这里只负责显示通知组件,通知组件内部会去调用 check_for_updates - // 我们在显示组件后,标记已经检查过了(即便失败或无更新,组件内部也会处理) - await invoke('update_last_check_time'); - console.log('[App] Update check cycle initiated and last check time updated.'); - } - } catch (error) { - console.error('Failed to check update settings:', error); - } - }; - - // Delay check to avoid blocking initial render - const timer = setTimeout(checkUpdates, 2000); - return () => clearTimeout(timer); - }, []); - - return ( - - - - {showUpdateNotification && ( - setShowUpdateNotification(false)} /> - )} - - - ); -} - -export default App; \ No newline at end of file diff --git a/src/app/App.tsx b/src/app/App.tsx new file mode 100644 index 000000000..674f6cefb --- /dev/null +++ b/src/app/App.tsx @@ -0,0 +1,129 @@ +// File: src/app/App.tsx +// Main application component with all providers and global effects + +import { useEffect, useState } from 'react'; +import { RouterProvider } from 'react-router-dom'; +import { listen } from '@tauri-apps/api/event'; +import { useQueryClient } from '@tanstack/react-query'; + +import { router } from './router'; +import { QueryProvider, I18nProvider } from './providers'; + +// FSD imports +import { useConfigStore } from '@/entities/config'; +import { useDebugConsole } from '@/widgets/debug-console'; +import { isTauri } from '@/shared/lib'; +import { invoke } from '@/shared/api'; +import { showToast } from '@/shared/ui'; +import { accountKeys } from '@/features/accounts'; + +// Global components +import { UpdateNotification } from '@/widgets/update-notification'; +import { ThemeManager, AdminAuthGuard } from '@/app/providers'; +import { DebugConsole } from '@/widgets/debug-console'; + +function AppContent() { + const { config, loadConfig } = useConfigStore(); + const checkDebugConsoleEnabled = useDebugConsole(s => s.checkEnabled); + const queryClient = useQueryClient(); + + // Invalidate accounts queries (replaces fetchCurrentAccount/fetchAccounts) + const refreshAccounts = () => { + queryClient.invalidateQueries({ queryKey: accountKeys.all }); + }; + + // Load config on mount + useEffect(() => { + loadConfig(); + checkDebugConsoleEnabled(); + }, [loadConfig, checkDebugConsoleEnabled]); + + // Listen for tray events + useEffect(() => { + if (!isTauri()) return; + const unlistenPromises: Promise<() => void>[] = []; + + // Listen for tray account switch + unlistenPromises.push( + listen('tray://account-switched', () => { + console.log('[App] Tray account switched, refreshing...'); + refreshAccounts(); + }) + ); + + // Listen for tray refresh + unlistenPromises.push( + listen('tray://refresh-current', () => { + console.log('[App] Tray refresh triggered, refreshing...'); + refreshAccounts(); + }) + ); + + // Listen for account validation blocked event + unlistenPromises.push( + listen<{ account_id: string; email: string; blocked_until: number; reason: string }>('account-validation-blocked', (event) => { + console.log('[App] Account validation blocked:', event.payload); + const { email, blocked_until } = event.payload; + const blockedUntilDate = new Date(blocked_until * 1000); + const timeStr = blockedUntilDate.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' }); + showToast( + `Account ${email} temporarily blocked until ${timeStr} (verification required)`, + 'warning' + ); + refreshAccounts(); + }) + ); + + return () => { + Promise.all(unlistenPromises).then(unlisteners => { + unlisteners.forEach(unlisten => unlisten()); + }); + }; + }, [queryClient]); + + // Update notification state + const [showUpdateNotification, setShowUpdateNotification] = useState(false); + + // Check for updates on startup + useEffect(() => { + const checkUpdates = async () => { + try { + console.log('[App] Checking if we should check for updates...'); + const shouldCheck = await invoke('should_check_updates'); + console.log('[App] Should check updates:', shouldCheck); + + if (shouldCheck) { + setShowUpdateNotification(true); + await invoke('update_last_check_time'); + console.log('[App] Update check cycle initiated and last check time updated.'); + } + } catch (error) { + console.error('Failed to check update settings:', error); + } + }; + + const timer = setTimeout(checkUpdates, 2000); + return () => clearTimeout(timer); + }, []); + + return ( + + + + + {showUpdateNotification && ( + setShowUpdateNotification(false)} /> + )} + + + + ); +} + +export default function App() { + return ( + + + + ); +} diff --git a/src/app/index.ts b/src/app/index.ts new file mode 100644 index 000000000..e1eadd04a --- /dev/null +++ b/src/app/index.ts @@ -0,0 +1,6 @@ +// File: src/app/index.ts +// App layer barrel export + +export { default as App } from './App'; +export { router } from './router'; +export { QueryProvider, I18nProvider } from './providers'; diff --git a/src/components/common/AdminAuthGuard.tsx b/src/app/providers/AdminAuthGuard.tsx similarity index 63% rename from src/components/common/AdminAuthGuard.tsx rename to src/app/providers/AdminAuthGuard.tsx index f15077c74..236058992 100644 --- a/src/components/common/AdminAuthGuard.tsx +++ b/src/app/providers/AdminAuthGuard.tsx @@ -1,7 +1,7 @@ import React, { useState, useEffect } from 'react'; -import { Lock, Key, Globe, AlertCircle, Loader2 } from 'lucide-react'; +import { Lock, Key, Globe } from 'lucide-react'; import { useTranslation } from 'react-i18next'; -import { isTauri } from '../../utils/env'; +import { isTauri } from '@/shared/lib'; /** * AdminAuthGuard @@ -13,13 +13,10 @@ export const AdminAuthGuard: React.FC<{ children: React.ReactNode }> = ({ childr const [isAuthenticated, setIsAuthenticated] = useState(isTauri()); const [apiKey, setApiKey] = useState(''); const [showLangMenu, setShowLangMenu] = useState(false); - const [isLoading, setIsLoading] = useState(false); - const [error, setError] = useState(''); useEffect(() => { if (isTauri()) return; - // 检查 Session 存储 (优先) const sessionKey = sessionStorage.getItem('abv_admin_api_key'); if (sessionKey) { setIsAuthenticated(true); @@ -27,20 +24,18 @@ export const AdminAuthGuard: React.FC<{ children: React.ReactNode }> = ({ childr return; } - // 检查本地存储 (迁移逻辑) const savedKey = localStorage.getItem('abv_admin_api_key'); if (savedKey) { - // 迁移到 sessionStorage 并清理 localStorage sessionStorage.setItem('abv_admin_api_key', savedKey); localStorage.removeItem('abv_admin_api_key'); - setIsAuthenticated(true); setApiKey(savedKey); + setIsAuthenticated(true); } // 监听全局 401 事件 const handleUnauthorized = () => { sessionStorage.removeItem('abv_admin_api_key'); - localStorage.removeItem('abv_admin_api_key'); // 双重清理确保万一 + localStorage.removeItem('abv_admin_api_key'); setIsAuthenticated(false); }; @@ -48,48 +43,13 @@ export const AdminAuthGuard: React.FC<{ children: React.ReactNode }> = ({ childr return () => window.removeEventListener('abv-unauthorized', handleUnauthorized); }, []); - const handleLogin = async (e: React.FormEvent) => { + const handleLogin = (e: React.FormEvent) => { e.preventDefault(); - const trimmedKey = apiKey.trim(); - if (!trimmedKey) return; - - setIsLoading(true); - setError(''); - - try { - // 先临时存储 key,用于验证请求 - sessionStorage.setItem('abv_admin_api_key', trimmedKey); - - // 调用一个需要认证的 API 来验证密码是否正确 - const response = await fetch('/api/accounts', { - method: 'GET', - headers: { - 'Content-Type': 'application/json', - 'Authorization': `Bearer ${trimmedKey}`, - 'x-api-key': trimmedKey - } - }); - - if (response.ok || response.status === 204) { - // 验证成功 - localStorage.removeItem('abv_admin_api_key'); - setIsAuthenticated(true); - window.location.reload(); - } else if (response.status === 401) { - // 密码错误 - sessionStorage.removeItem('abv_admin_api_key'); - setError(t('login.error_invalid_key')); - } else { - // 其他错误,但可能密码是对的 - setIsAuthenticated(true); - window.location.reload(); - } - } catch (err) { - // 网络错误等 - sessionStorage.removeItem('abv_admin_api_key'); - setError(t('login.error_network')); - } finally { - setIsLoading(false); + if (apiKey.trim()) { + sessionStorage.setItem('abv_admin_api_key', apiKey.trim()); + localStorage.removeItem('abv_admin_api_key'); + setIsAuthenticated(true); + window.location.reload(); } }; @@ -108,9 +68,6 @@ export const AdminAuthGuard: React.FC<{ children: React.ReactNode }> = ({ childr { code: 'tr', name: 'Türkçe' }, { code: 'vi', name: 'Tiếng Việt' }, { code: 'pt', name: 'Português' }, - { code: 'ar', name: 'العربية' }, - { code: 'es', name: 'Español' }, - { code: 'my', name: 'Bahasa Melayu' }, ]; if (isAuthenticated) { @@ -161,32 +118,17 @@ export const AdminAuthGuard: React.FC<{ children: React.ReactNode }> = ({ childr { setApiKey(e.target.value); setError(''); }} + onChange={(e) => setApiKey(e.target.value)} autoFocus - disabled={isLoading} /> - {error && ( -
- - {error} -
- )} diff --git a/src/components/common/BackgroundTaskRunner.tsx b/src/app/providers/BackgroundTaskRunner.tsx old mode 100755 new mode 100644 similarity index 73% rename from src/components/common/BackgroundTaskRunner.tsx rename to src/app/providers/BackgroundTaskRunner.tsx index 3d9680783..5e3a4dfc4 --- a/src/components/common/BackgroundTaskRunner.tsx +++ b/src/app/providers/BackgroundTaskRunner.tsx @@ -1,10 +1,13 @@ +// Background task runner for auto-refresh and auto-sync + import { useEffect, useRef } from 'react'; -import { useConfigStore } from '../../stores/useConfigStore'; -import { useAccountStore } from '../../stores/useAccountStore'; +import { useConfigStore } from '@/entities/config'; +import { useRefreshAllQuotas, useSyncAccountFromDb } from '@/features/accounts'; -function BackgroundTaskRunner() { +export function BackgroundTaskRunner() { const { config } = useConfigStore(); - const { refreshAllQuotas } = useAccountStore(); + const refreshAllQuotasMutation = useRefreshAllQuotas(); + const syncAccountMutation = useSyncAccountFromDb(); // Use refs to track previous state to detect "off -> on" transitions const prevAutoRefreshRef = useRef(false); @@ -20,7 +23,7 @@ function BackgroundTaskRunner() { // Check if we just turned it on if (auto_refresh && !prevAutoRefreshRef.current) { console.log('[BackgroundTask] Auto-refresh enabled, executing immediately...'); - refreshAllQuotas(); + refreshAllQuotasMutation.mutate(); } prevAutoRefreshRef.current = auto_refresh; @@ -28,7 +31,7 @@ function BackgroundTaskRunner() { console.log(`[BackgroundTask] Starting auto-refresh quota timer: ${refresh_interval} mins`); intervalId = setInterval(() => { console.log('[BackgroundTask] Auto-refreshing all quotas...'); - refreshAllQuotas(); + refreshAllQuotasMutation.mutate(); }, refresh_interval * 60 * 1000); } @@ -38,7 +41,7 @@ function BackgroundTaskRunner() { clearInterval(intervalId); } }; - }, [config?.auto_refresh, config?.refresh_interval]); + }, [config?.auto_refresh, config?.refresh_interval, refreshAllQuotasMutation]); // Auto Sync Current Account Effect useEffect(() => { @@ -46,20 +49,19 @@ function BackgroundTaskRunner() { let intervalId: ReturnType | null = null; const { auto_sync, sync_interval } = config; - const { syncAccountFromDb } = useAccountStore.getState(); // Check if we just turned it on if (auto_sync && !prevAutoSyncRef.current) { console.log('[BackgroundTask] Auto-sync enabled, executing immediately...'); - syncAccountFromDb(); + syncAccountMutation.mutate(); } prevAutoSyncRef.current = auto_sync; if (auto_sync && sync_interval > 0) { console.log(`[BackgroundTask] Starting auto-sync account timer: ${sync_interval} seconds`); intervalId = setInterval(() => { - console.log('[BackgroundTask] Auto-syncing current account from DB...'); - syncAccountFromDb(); + console.log('[BackgroundTask] Auto-syncing account from DB...'); + syncAccountMutation.mutate(); }, sync_interval * 1000); } @@ -69,10 +71,7 @@ function BackgroundTaskRunner() { clearInterval(intervalId); } }; - }, [config?.auto_sync, config?.sync_interval]); + }, [config?.auto_sync, config?.sync_interval, syncAccountMutation]); - // Render nothing return null; } - -export default BackgroundTaskRunner; diff --git a/src/app/providers/I18nProvider.tsx b/src/app/providers/I18nProvider.tsx new file mode 100644 index 000000000..ad0d1dfeb --- /dev/null +++ b/src/app/providers/I18nProvider.tsx @@ -0,0 +1,25 @@ +// File: src/app/providers/I18nProvider.tsx +// I18n initialization provider + +import { useEffect } from 'react'; +import { useTranslation } from 'react-i18next'; +import '@/shared/config/i18n'; // Initialize i18n + +interface I18nProviderProps { + children: React.ReactNode; + language?: string; +} + +export function I18nProvider({ children, language }: I18nProviderProps) { + const { i18n } = useTranslation(); + + useEffect(() => { + if (language) { + i18n.changeLanguage(language); + // Support RTL languages + document.documentElement.dir = language === 'ar' ? 'rtl' : 'ltr'; + } + }, [language, i18n]); + + return <>{children}; +} diff --git a/src/app/providers/QueryProvider.tsx b/src/app/providers/QueryProvider.tsx new file mode 100644 index 000000000..82a012a68 --- /dev/null +++ b/src/app/providers/QueryProvider.tsx @@ -0,0 +1,20 @@ +// File: src/app/providers/QueryProvider.tsx +// React Query provider wrapper + +import { QueryClientProvider } from '@tanstack/react-query'; +import { ReactQueryDevtools } from '@tanstack/react-query-devtools'; +import { queryClient } from '@/shared/api'; +import type { ReactNode } from 'react'; + +interface QueryProviderProps { + children: ReactNode; +} + +export function QueryProvider({ children }: QueryProviderProps) { + return ( + + {children} + + + ); +} diff --git a/src/components/common/ThemeManager.tsx b/src/app/providers/ThemeManager.tsx old mode 100755 new mode 100644 similarity index 95% rename from src/components/common/ThemeManager.tsx rename to src/app/providers/ThemeManager.tsx index 5b666e7b9..7b3e27318 --- a/src/components/common/ThemeManager.tsx +++ b/src/app/providers/ThemeManager.tsx @@ -1,11 +1,10 @@ - import { useEffect } from 'react'; -import { useConfigStore } from '../../stores/useConfigStore'; +import { useConfigStore } from '@/entities/config'; import { getCurrentWindow } from '@tauri-apps/api/window'; -import { isLinux } from '../../utils/env'; +import { isLinux } from '@/shared/lib'; -export default function ThemeManager() { +export function ThemeManager() { const { config, loadConfig } = useConfigStore(); // Load config on mount diff --git a/src/app/providers/index.ts b/src/app/providers/index.ts new file mode 100644 index 000000000..253ad929f --- /dev/null +++ b/src/app/providers/index.ts @@ -0,0 +1,5 @@ +export { QueryProvider } from './QueryProvider'; +export { I18nProvider } from './I18nProvider'; +export { ThemeManager } from './ThemeManager'; +export { BackgroundTaskRunner } from './BackgroundTaskRunner'; +export { AdminAuthGuard } from './AdminAuthGuard'; diff --git a/src/app/router/index.ts b/src/app/router/index.ts new file mode 100644 index 000000000..4f7c426ca --- /dev/null +++ b/src/app/router/index.ts @@ -0,0 +1,4 @@ +// File: src/app/router/index.ts +// Router barrel export + +export { router } from './routes'; diff --git a/src/app/router/routes.tsx b/src/app/router/routes.tsx new file mode 100644 index 000000000..693b920f0 --- /dev/null +++ b/src/app/router/routes.tsx @@ -0,0 +1,73 @@ +// File: src/app/router/routes.tsx +// Application routes configuration + +import { lazy, Suspense } from 'react'; +import { createBrowserRouter } from 'react-router-dom'; + +// Layout (FSD) +import { Layout } from '@/widgets/layout'; + +// Pages (FSD) +import { DashboardPage } from '@/pages/dashboard'; +import { AccountsPage } from '@/pages/accounts'; +import { SettingsPage } from '@/pages/settings'; +import { ApiProxyPage } from '@/pages/api-proxy'; +import { SecurityPage } from '@/pages/security'; +import { TokenStatsPage } from '@/pages/token-stats'; +import { MonitorPage } from '@/pages/monitor'; +import { LogsPage } from '@/pages/logs'; + +// Lazy loaded pages (heavy components) +const ConsolePage = lazy(() => import('@/pages/console/ui/ConsolePage')); + +// Loading fallback for lazy pages +const PageLoader = () => ( +
+
+
+); + +export const router = createBrowserRouter([ + { + path: '/', + element: , + children: [ + { + index: true, + element: , + }, + { + path: 'accounts', + element: , + }, + { + path: 'api-proxy', + element: , + }, + { + path: 'monitor', + element: , + }, + { + path: 'logs', + element: , + }, + { + path: 'token-stats', + element: , + }, + { + path: 'console', + element: }>, + }, + { + path: 'security', + element: , + }, + { + path: 'settings', + element: , + }, + ], + }, +]); diff --git a/src/App.css b/src/app/styles/global.css old mode 100755 new mode 100644 similarity index 82% rename from src/App.css rename to src/app/styles/global.css index 7a9e82a96..1a43d198d --- a/src/App.css +++ b/src/app/styles/global.css @@ -2,7 +2,7 @@ @tailwind components; @tailwind utilities; -/* 禁止过度滚动和橡皮筋效果 */ +/* Disable overscroll and rubber band effect */ html, body { overscroll-behavior: none; @@ -21,7 +21,7 @@ html.dark { background-color: #1d232a; } -/* 全局样式 */ +/* Global styles */ body { margin: 0; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', @@ -32,10 +32,9 @@ body { background-color: #FAFBFC; } -/* Dark mode override for body strictly */ +/* Dark mode override for body */ .dark body { background-color: #1d232a; - /* matches base-300 commonly used */ } #root { @@ -45,17 +44,17 @@ body { overscroll-behavior: none; } -/* 移除默认的 tap 高亮 */ +/* Remove default tap highlight */ * { -webkit-tap-highlight-color: transparent; } -/* 只移除链接的默认下划线,不强制颜色 */ +/* Remove link underlines only */ a { text-decoration: none; } -/* 滚动条优化 - 彻底隐藏但保留功能 */ +/* Scrollbar optimization - hidden but functional */ ::-webkit-scrollbar { width: 0px; background: transparent; @@ -77,7 +76,7 @@ a { background-color: rgba(0, 0, 0, 0.3); } -/* View Transitions API 主题切换动画 */ +/* View Transitions API theme switch animation */ ::view-transition-old(root), ::view-transition-new(root) { animation: none; @@ -100,7 +99,7 @@ a { z-index: 1; } -/* 暗色模式下 select 下拉菜单样式 */ +/* Dark mode select dropdown styles */ .dark select, .dark select option, .dark select optgroup { @@ -117,35 +116,35 @@ a { @font-face { font-family: 'Effra'; src: url('/font/Effra/Effra-Regular.ttf') format('truetype'); - font-weight: 400; /* Regular */ + font-weight: 400; font-style: normal; } @font-face { font-family: 'Effra'; src: url('/font/Effra/Effra-Medium.ttf') format('truetype'); - font-weight: 500; /* Medium */ + font-weight: 500; font-style: normal; } @font-face { font-family: 'Effra'; src: url('/font/Effra/Effra-SemiBold.ttf') format('truetype'); - font-weight: 600; /* SemiBold */ + font-weight: 600; font-style: normal; } @font-face { font-family: 'Effra'; src: url('/font/Effra/Effra-Bold.ttf') format('truetype'); - font-weight: 700; /* Bold */ + font-weight: 700; font-style: normal; } @font-face { font-family: 'Effra'; src: url('/font/Effra/Effra-ExtraBold.ttf') format('truetype'); - font-weight: 800; /* ExtraBold */ + font-weight: 800; font-style: normal; } @@ -154,4 +153,4 @@ a { font-family: 'Effra', -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif; -} \ No newline at end of file +} diff --git a/src/components/UpdateNotification.tsx b/src/components/UpdateNotification.tsx deleted file mode 100644 index 190a05a97..000000000 --- a/src/components/UpdateNotification.tsx +++ /dev/null @@ -1,241 +0,0 @@ -import React, { useEffect, useState } from 'react'; -import { X, Download, Sparkles, ArrowRight, Loader2, CheckCircle } from 'lucide-react'; -import { request as invoke } from '../utils/request'; -import { useTranslation } from 'react-i18next'; -import { check as tauriCheck } from '@tauri-apps/plugin-updater'; -import { relaunch as tauriRelaunch } from '@tauri-apps/plugin-process'; -import { isTauri } from '../utils/env'; -import { showToast } from './common/ToastContainer'; - -interface UpdateInfo { - has_update: boolean; - latest_version: string; - current_version: string; - download_url: string; - source?: string; -} - -type UpdateState = 'checking' | 'available' | 'downloading' | 'ready' | 'none'; - -interface UpdateNotificationProps { - onClose: () => void; -} - -export const UpdateNotification: React.FC = ({ onClose }) => { - const { t } = useTranslation(); - const [updateInfo, setUpdateInfo] = useState(null); - const [isVisible, setIsVisible] = useState(false); - const [isClosing, setIsClosing] = useState(false); - const [updateState, setUpdateState] = useState('checking'); - const [downloadProgress, setDownloadProgress] = useState(0); - - useEffect(() => { - checkForUpdates(); - }, []); - - const checkForUpdates = async () => { - try { - const info = await invoke('check_for_updates'); - if (info.has_update) { - setUpdateInfo(info); - setUpdateState('available'); - setTimeout(() => setIsVisible(true), 100); - } else { - onClose(); - } - } catch (error) { - console.error('Failed to check for updates:', error); - onClose(); - } - }; - - const handleAutoUpdate = async () => { - if (!isTauri()) { - handleManualDownload(); - return; - } - - setUpdateState('downloading'); - try { - const update = await tauriCheck(); - if (update) { - let downloaded = 0; - let contentLength = 0; - - await update.downloadAndInstall((event) => { - switch (event.event) { - case 'Started': - contentLength = event.data.contentLength || 0; - break; - case 'Progress': - downloaded += event.data.chunkLength; - if (contentLength > 0) { - setDownloadProgress(Math.round((downloaded / contentLength) * 100)); - } - break; - case 'Finished': - setUpdateState('ready'); - break; - } - }); - - setUpdateState('ready'); - setTimeout(async () => { - await tauriRelaunch(); - }, 1500); - } else { - // Native updater found no update (e.g. draft release or updater.json not ready) - // Fallback to manual download - console.warn('Native updater returned null, falling back to manual download'); - showToast(t('update_notification.toast.not_ready'), 'info'); - setUpdateState('available'); - handleManualDownload(); - } - } catch (error) { - console.error('Auto update failed:', error); - showToast(t('update_notification.toast.failed'), 'error'); - setUpdateState('available'); // Revert state so user can try again - handleManualDownload(); - } - }; - - const handleManualDownload = () => { - if (updateInfo?.download_url) { - window.open(updateInfo.download_url, '_blank'); - handleClose(); - } - }; - - const handleClose = () => { - setIsClosing(true); - setIsVisible(false); - setTimeout(onClose, 400); - }; - - if (!updateInfo && updateState !== 'checking') { - return null; - } - - return ( -
-
-
-
- -
-
-
-
- {updateState === 'ready' ? ( - - ) : ( - - )} -
-
-

- {updateState === 'ready' - ? t('update_notification.ready') - : t('update_notification.title')} -

- {updateInfo && ( -
-

- v{updateInfo.latest_version} -

- {updateInfo.source && updateInfo.source !== 'GitHub API' && ( -

- via {updateInfo.source} -

- )} -
- )} -
-
- - {updateState !== 'downloading' && updateState !== 'ready' && ( - - )} -
- -
-

- {updateState === 'downloading' && t('update_notification.downloading')} - {updateState === 'ready' && t('update_notification.restarting')} - {updateState === 'available' && updateInfo && t('update_notification.message', { current: updateInfo.current_version })} -

-
- - {updateState === 'downloading' && ( -
-
-
-
-

{downloadProgress}%

-
- )} - - {updateState === 'available' && ( -
- -
- )} - - {(updateState === 'downloading' || updateState === 'ready') && ( -
- {updateState === 'downloading' && } - {updateState === 'ready' && } -
- )} -
-
-
- ); -}; diff --git a/src/components/accounts/AccountCard.tsx b/src/components/accounts/AccountCard.tsx deleted file mode 100755 index 02ab337d2..000000000 --- a/src/components/accounts/AccountCard.tsx +++ /dev/null @@ -1,358 +0,0 @@ -import { useMemo, useState } from 'react'; -import { ArrowRightLeft, RefreshCw, Trash2, Download, Info, Lock, Ban, Diamond, Gem, Circle, ToggleLeft, ToggleRight, Fingerprint, Sparkles, Tag, X, Check } from 'lucide-react'; -import { Account } from '../../types/account'; -import { cn } from '../../utils/cn'; -import { useTranslation } from 'react-i18next'; -import { useConfigStore } from '../../stores/useConfigStore'; -import { QuotaItem } from './QuotaItem'; -import { MODEL_CONFIG, sortModels } from '../../config/modelConfig'; - -interface AccountCardProps { - account: Account; - selected: boolean; - onSelect: () => void; - isCurrent: boolean; - isRefreshing: boolean; - isSwitching?: boolean; - onSwitch: () => void; - onRefresh: () => void; - onViewDevice: () => void; - onViewDetails: () => void; - onExport: () => void; - onDelete: () => void; - onToggleProxy: () => void; - onWarmup?: () => void; - onUpdateLabel?: (label: string) => void; -} - -// 使用统一的模型配置 -const DEFAULT_MODELS = Object.entries(MODEL_CONFIG).map(([id, config]) => ({ - id, - label: config.label, - protectedKey: config.protectedKey, - Icon: config.Icon -})); - -function AccountCard({ account, selected, onSelect, isCurrent: propIsCurrent, isRefreshing, isSwitching = false, onSwitch, onRefresh, onViewDetails, onExport, onDelete, onToggleProxy, onViewDevice, onWarmup, onUpdateLabel }: AccountCardProps) { - const { t } = useTranslation(); - const { config, showAllQuotas } = useConfigStore(); - const isDisabled = Boolean(account.disabled); - - // 自定义标签编辑状态 - const [isEditingLabel, setIsEditingLabel] = useState(false); - const [labelInput, setLabelInput] = useState(account.custom_label || ''); - - // Use the prop directly from parent component - const isCurrent = propIsCurrent; - - const handleSaveLabel = () => { - if (onUpdateLabel) { - onUpdateLabel(labelInput.trim()); - } - setIsEditingLabel(false); - }; - - const handleCancelLabel = () => { - setLabelInput(account.custom_label || ''); - setIsEditingLabel(false); - }; - - const handleKeyDown = (e: React.KeyboardEvent) => { - if (e.key === 'Enter') { - handleSaveLabel(); - } else if (e.key === 'Escape') { - handleCancelLabel(); - } - }; - - const displayModels = useMemo(() => { - // Build map of friendly labels and icons from DEFAULT_MODELS - const iconMap = new Map(DEFAULT_MODELS.map(m => [m.id, m.Icon])); - - // Get all models from account (source of truth) - const accountModels = account.quota?.models?.map(m => { - // 注意:DEFAULT_MODELS 现在应该包含 shortLabel,我们需要确保它被正确映射 - // 但 DEFAULT_MODELS 是从 MODEL_CONFIG 生成的,我们需要确保它包含 shortLabel - // 这里为了安全,直接从 MODEL_CONFIG 获取 - const fullConfig = MODEL_CONFIG[m.name.toLowerCase()]; - return { - id: m.name, - label: fullConfig?.shortLabel || fullConfig?.label || m.name, - protectedKey: fullConfig?.protectedKey, - Icon: iconMap.get(m.name), - data: m - }; - }) || []; - - let models: typeof accountModels; - - if (showAllQuotas) { - models = accountModels; - } else { - // Filter for pinned or defaults - const pinned = config?.pinned_quota_models?.models; - if (pinned && pinned.length > 0) { - models = accountModels.filter(m => pinned.includes(m.id)); - } else { - // Default fallback: show known default models - models = accountModels.filter(m => DEFAULT_MODELS.some(d => d.id === m.id)); - } - } - - // 应用排序 - return sortModels(models); - }, [config, account, showAllQuotas]); - - const isModelProtected = (key?: string) => { - if (!key) return false; - return account.protected_models?.includes(key); - }; - - return ( -
- - {/* Header: Checkbox + Email + Badges */} -
- onSelect()} - onClick={(e) => e.stopPropagation()} - /> -
-

- {account.email} -

-
-
- {isCurrent && ( - - {t('accounts.current').toUpperCase()} - - )} - {isDisabled && ( - - - {t('accounts.disabled').toUpperCase()} - - )} - {account.proxy_disabled && ( - - - {t('accounts.proxy_disabled').toUpperCase()} - - )} - {account.quota?.is_forbidden && ( - - - {t('accounts.forbidden').toUpperCase()} - - )} - {/* 订阅类型徽章 */} - {account.quota?.subscription_tier && (() => { - const tier = account.quota.subscription_tier.toLowerCase(); - if (tier.includes('ultra')) { - return ( - - - ULTRA - - ); - } else if (tier.includes('pro')) { - return ( - - - PRO - - ); - } else { - return ( - - - FREE - - ); - } - })()} - {/* 自定义标签 */} - {account.custom_label && ( - - - {account.custom_label} - - )} -
- - {new Date(account.last_used * 1000).toLocaleString([], { year: 'numeric', month: '2-digit', day: '2-digit', hour: '2-digit', minute: '2-digit' })} - -
-
-
- - {/* 配额展示 */} -
- {account.quota?.is_forbidden ? ( -
- - {t('accounts.forbidden_msg')} -
- ) : ( -
- {displayModels.map((model) => ( - - ))} -
- )} -
- - {/* Footer: Actions Only */} -
- {/* 标签编辑弹出框 */} - {isEditingLabel && ( -
-
- setLabelInput(e.target.value)} - onKeyDown={handleKeyDown} - autoFocus - maxLength={15} - /> - - -
-
- )} -
- - - {/* 自定义标签按钮 */} - {onUpdateLabel && ( - - )} - - {onWarmup && ( - - )} - - - - -
-
-
- ); -} - -export default AccountCard; diff --git a/src/components/accounts/AccountDetailsDialog.tsx b/src/components/accounts/AccountDetailsDialog.tsx deleted file mode 100755 index 4f6921cee..000000000 --- a/src/components/accounts/AccountDetailsDialog.tsx +++ /dev/null @@ -1,143 +0,0 @@ -import { X, Clock, AlertCircle } from 'lucide-react'; -import { createPortal } from 'react-dom'; -import { Account } from '../../types/account'; -import { formatDate } from '../../utils/format'; -import { useTranslation } from 'react-i18next'; -import { MODEL_CONFIG, sortModels } from '../../config/modelConfig'; - -interface AccountDetailsDialogProps { - account: Account | null; - onClose: () => void; -} - -export default function AccountDetailsDialog({ account, onClose }: AccountDetailsDialogProps) { - const { t } = useTranslation(); - if (!account) return null; - - return createPortal( -
- {/* Draggable Top Region */} -
- -
- {/* Header */} -
-
-

{t('accounts.details.title')}

-
- {account.email} -
- {account.quota?.subscription_tier && ( -
- {account.quota.subscription_tier} -
- )} -
- -
- - {/* Status Alerts */} - {(account.disabled || account.proxy_disabled) && ( -
- {account.disabled && ( -
- - {t('accounts.status.disabled')}: - {account.disabled_reason || t('common.unknown')} -
- )} - {account.proxy_disabled && ( -
- - {t('accounts.status.proxy_disabled')}: - {account.proxy_disabled_reason || t('common.unknown')} -
- )} -
- )} - - {/* Content */} -
- {/* Protected Models Section */} - {account.protected_models && account.protected_models.length > 0 && ( -
-

- - {t('accounts.details.protected_models')} -

-
- {account.protected_models.map(model => ( - - {model} - - ))} -
-
- )} - -

{t('accounts.details.model_quota')}

-
- {sortModels( - (account.quota?.models || []).map(model => ({ - id: model.name.toLowerCase(), - model - })) - ).map(({ model }) => ( -
-
-
- {(() => { - const Icon = MODEL_CONFIG[model.name.toLowerCase()]?.Icon; - return Icon ? : null; - })()} - - {MODEL_CONFIG[model.name.toLowerCase()]?.label || model.name} - -
- = 50 ? 'bg-green-50 text-green-700 dark:bg-green-900/30 dark:text-green-400' : - model.percentage >= 20 ? 'bg-orange-50 text-orange-700 dark:bg-orange-900/30 dark:text-orange-400' : - 'bg-red-50 text-red-700 dark:bg-red-900/30 dark:text-red-400' - }`} - > - {model.percentage}% - -
- - {/* Progress Bar */} -
-
= 50 ? 'bg-emerald-500' : - model.percentage >= 20 ? 'bg-orange-400' : - 'bg-red-500' - }`} - style={{ width: `${model.percentage}%` }} - >
-
- -
- - {t('accounts.reset_time')}: {formatDate(model.reset_time) || t('common.unknown')} -
-
- )) || ( -
- - {t('accounts.no_data')} -
- )} -
-
-
-
-
, - document.body - ); -} diff --git a/src/components/accounts/AccountGrid.tsx b/src/components/accounts/AccountGrid.tsx deleted file mode 100755 index 44cb143b8..000000000 --- a/src/components/accounts/AccountGrid.tsx +++ /dev/null @@ -1,61 +0,0 @@ -import { useTranslation } from 'react-i18next'; -import { Account } from '../../types/account'; -import AccountCard from './AccountCard'; - -interface AccountGridProps { - accounts: Account[]; - selectedIds: Set; - refreshingIds: Set; - onToggleSelect: (id: string) => void; - currentAccountId: string | null; - switchingAccountId: string | null; - onSwitch: (accountId: string) => void; - onRefresh: (accountId: string) => void; - onViewDevice: (accountId: string) => void; - onViewDetails: (accountId: string) => void; - onExport: (accountId: string) => void; - onDelete: (accountId: string) => void; - onToggleProxy: (accountId: string) => void; - onWarmup?: (accountId: string) => void; - onUpdateLabel?: (accountId: string, label: string) => void; -} - - -function AccountGrid({ accounts, selectedIds, refreshingIds, onToggleSelect, currentAccountId, switchingAccountId, onSwitch, onRefresh, onViewDetails, onExport, onDelete, onToggleProxy, onViewDevice, onWarmup, onUpdateLabel }: AccountGridProps) { - const { t } = useTranslation(); - if (accounts.length === 0) { - return ( -
-

{t('accounts.empty.title')}

-

{t('accounts.empty.desc')}

-
- ); - } - - return ( -
- {accounts.map((account) => ( - onToggleSelect(account.id)} - isCurrent={account.id === currentAccountId} - isSwitching={account.id === switchingAccountId} - onSwitch={() => onSwitch(account.id)} - onRefresh={() => onRefresh(account.id)} - onViewDevice={() => onViewDevice(account.id)} - onViewDetails={() => onViewDetails(account.id)} - onExport={() => onExport(account.id)} - onDelete={() => onDelete(account.id)} - onToggleProxy={() => onToggleProxy(account.id)} - onWarmup={onWarmup ? () => onWarmup(account.id) : undefined} - onUpdateLabel={onUpdateLabel ? (label: string) => onUpdateLabel(account.id, label) : undefined} - /> - ))} -
- ); -} - -export default AccountGrid; diff --git a/src/components/accounts/AccountRow.tsx b/src/components/accounts/AccountRow.tsx deleted file mode 100755 index 0fb1b353f..000000000 --- a/src/components/accounts/AccountRow.tsx +++ /dev/null @@ -1,353 +0,0 @@ -import { ArrowRightLeft, RefreshCw, Trash2, Download, Info, Lock, Ban, Diamond, Gem, Circle, Clock, ToggleLeft, ToggleRight, Fingerprint } from 'lucide-react'; -import { Account } from '../../types/account'; -import { getQuotaColor, formatTimeRemaining, getTimeRemainingColor } from '../../utils/format'; -import { cn } from '../../utils/cn'; -import { useTranslation } from 'react-i18next'; - -interface AccountRowProps { - account: Account; - selected: boolean; - onSelect: () => void; - isCurrent: boolean; - isRefreshing: boolean; - isSwitching?: boolean; - onSwitch: () => void; - onRefresh: () => void; - onViewDevice: () => void; - onViewDetails: () => void; - onExport: () => void; - onDelete: () => void; - onToggleProxy: () => void; -} - - - -function AccountRow({ account, selected, onSelect, isCurrent, isRefreshing, isSwitching = false, onSwitch, onRefresh, onViewDetails, onExport, onDelete, onToggleProxy, onViewDevice }: AccountRowProps) { - const { t } = useTranslation(); - const geminiProModel = account.quota?.models.find(m => m.name.toLowerCase() === 'gemini-3-pro-high'); - const geminiFlashModel = account.quota?.models.find(m => m.name.toLowerCase() === 'gemini-3-flash'); - const geminiImageModel = account.quota?.models.find(m => m.name.toLowerCase() === 'gemini-3-pro-image'); - const claudeModel = account.quota?.models.find(m => m.name.toLowerCase() === 'claude-sonnet-4-5-thinking'); - const isDisabled = Boolean(account.disabled); - - // 颜色映射,避免动态类名被 Tailwind purge - const getColorClass = (percentage: number) => { - const color = getQuotaColor(percentage); - switch (color) { - case 'success': return 'bg-emerald-500'; - case 'warning': return 'bg-amber-500'; - case 'error': return 'bg-rose-500'; - default: return 'bg-gray-500'; - } - }; - - const getTimeColorClass = (resetTime: string | undefined) => { - const color = getTimeRemainingColor(resetTime); - switch (color) { - case 'success': return 'text-emerald-500 dark:text-emerald-400'; - case 'warning': return 'text-amber-500 dark:text-amber-400'; - default: return 'text-gray-400 dark:text-gray-500 opacity-60'; - } - }; - - return ( - - {/* 序号 */} - - onSelect()} - onClick={(e) => e.stopPropagation()} - /> - - - {/* 邮箱 */} - -
- - {account.email} - - -
- {isCurrent && ( - - {t('accounts.current').toUpperCase()} - - )} - - {isDisabled && ( - - - {t('accounts.disabled')} - - )} - - {account.proxy_disabled && ( - - - {t('accounts.proxy_disabled')} - - )} - - {account.quota?.is_forbidden && ( - - - {t('accounts.forbidden')} - - )} - - {/* 订阅类型徽章 */} - {account.quota?.subscription_tier && (() => { - const tier = account.quota.subscription_tier.toLowerCase(); - if (tier.includes('ultra')) { - return ( - - - ULTRA - - ); - } else if (tier.includes('pro')) { - return ( - - - PRO - - ); - } else { - return ( - - - FREE - - ); - } - })()} -
-
- - - {/* 模型配额 */} - - {account.quota?.is_forbidden ? ( -
- - {t('accounts.forbidden_msg')} -
- ) : ( -
- {/* Gemini Pro */} -
- {geminiProModel && ( -
- )} -
- G3 Pro -
- {geminiProModel?.reset_time ? ( - - - {formatTimeRemaining(geminiProModel.reset_time)} - - ) : ( - N/A - )} -
- - {geminiProModel ? `${geminiProModel.percentage}%` : '-'} - -
-
- - {/* Gemini Flash */} -
- {geminiFlashModel && ( -
- )} -
- G3 Flash -
- {geminiFlashModel?.reset_time ? ( - - - {formatTimeRemaining(geminiFlashModel.reset_time)} - - ) : ( - N/A - )} -
- - {geminiFlashModel ? `${geminiFlashModel.percentage}%` : '-'} - -
-
- - {/* Gemini Image */} -
- {geminiImageModel && ( -
- )} -
- G3 Image -
- {geminiImageModel?.reset_time ? ( - - - {formatTimeRemaining(geminiImageModel.reset_time)} - - ) : ( - N/A - )} -
- - {geminiImageModel ? `${geminiImageModel.percentage}%` : '-'} - -
-
- - {/* Claude */} -
- {claudeModel && ( -
- )} -
- Claude 4.5 -
- {claudeModel?.reset_time ? ( - - - {formatTimeRemaining(claudeModel.reset_time)} - - ) : ( - N/A - )} -
- - {claudeModel ? `${claudeModel.percentage}%` : '-'} - -
-
-
- )} - - - {/* 最后使用 */} - -
- - {new Date(account.last_used * 1000).toLocaleDateString()} - - - {new Date(account.last_used * 1000).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })} - -
- - - {/* 操作 */} - -
- - - - - - - -
- - - ); -} - -export default AccountRow; diff --git a/src/components/accounts/AccountTable.tsx b/src/components/accounts/AccountTable.tsx deleted file mode 100755 index 6f27e9d16..000000000 --- a/src/components/accounts/AccountTable.tsx +++ /dev/null @@ -1,769 +0,0 @@ -/** - * 账号表格组件 - * 支持拖拽排序功能,用户可以通过拖拽行来调整账号顺序 - */ -import { useMemo, useState } from 'react'; -import { - DndContext, - closestCenter, - KeyboardSensor, - PointerSensor, - useSensor, - useSensors, - DragEndEvent, - DragStartEvent, - DragOverlay, -} from '@dnd-kit/core'; -import { - arrayMove, - SortableContext, - sortableKeyboardCoordinates, - useSortable, - verticalListSortingStrategy, -} from '@dnd-kit/sortable'; -import { CSS } from '@dnd-kit/utilities'; -import { - GripVertical, - ArrowRightLeft, - RefreshCw, - Trash2, - Download, - Fingerprint, - Info, - Lock, - Ban, - Diamond, - Gem, - Circle, - ToggleLeft, - ToggleRight, - Sparkles, - Tag, - X, - Check, -} from 'lucide-react'; -import { Account } from '../../types/account'; -import { useTranslation } from 'react-i18next'; -import { cn } from '../../utils/cn'; - -import { useConfigStore } from '../../stores/useConfigStore'; -import { QuotaItem } from './QuotaItem'; -import { MODEL_CONFIG, sortModels } from '../../config/modelConfig'; - -// ============================================================================ -// 类型定义 -// ============================================================================ - -interface AccountTableProps { - accounts: Account[]; - selectedIds: Set; - refreshingIds: Set; - onToggleSelect: (id: string) => void; - onToggleAll: () => void; - currentAccountId: string | null; - switchingAccountId: string | null; - onSwitch: (accountId: string) => void; - onRefresh: (accountId: string) => void; - onViewDevice: (accountId: string) => void; - onViewDetails: (accountId: string) => void; - onExport: (accountId: string) => void; - onDelete: (accountId: string) => void; - onToggleProxy: (accountId: string) => void; - onWarmup?: (accountId: string) => void; - onUpdateLabel?: (accountId: string, label: string) => void; - /** 拖拽排序回调,当用户完成拖拽时触发 */ - onReorder?: (accountIds: string[]) => void; -} - -interface SortableRowProps { - account: Account; - selected: boolean; - isRefreshing: boolean; - isCurrent: boolean; - isSwitching: boolean; - isDragging?: boolean; - onSelect: () => void; - onSwitch: () => void; - onRefresh: () => void; - onViewDevice: () => void; - onViewDetails: () => void; - onExport: () => void; - onDelete: () => void; - onToggleProxy: () => void; - onWarmup?: () => void; - onUpdateLabel?: (label: string) => void; -} - -interface AccountRowContentProps { - account: Account; - isCurrent: boolean; - isRefreshing: boolean; - isSwitching: boolean; - onSwitch: () => void; - onRefresh: () => void; - onViewDevice: () => void; - onViewDetails: () => void; - onExport: () => void; - onDelete: () => void; - onToggleProxy: () => void; - onWarmup?: () => void; - onUpdateLabel?: (label: string) => void; -} - -// ============================================================================ -// 辅助函数 -// ============================================================================ - - - -// ============================================================================ -// 模型分组配置 -// ============================================================================ - -const MODEL_GROUPS = { - CLAUDE: [ - 'claude-sonnet-4-5', - 'claude-sonnet-4-5-thinking', - 'claude-opus-4-5-thinking' - ], - GEMINI_PRO: [ - 'gemini-3-pro-high', - 'gemini-3-pro-low', - 'gemini-3-pro-preview' - ], - GEMINI_FLASH: [ - 'gemini-3-flash' - ] -}; - -function isModelProtected(protectedModels: string[] | undefined, modelName: string): boolean { - if (!protectedModels || protectedModels.length === 0) return false; - const lowerName = modelName.toLowerCase(); - - // Helper to check if any model in the group is protected - const isGroupProtected = (group: string[]) => { - return group.some(m => protectedModels.includes(m)); - }; - - // UI Column Keys Mapping (for backward compatibility with hardcoded UI calls) - if (lowerName === 'gemini-pro') return isGroupProtected(MODEL_GROUPS.GEMINI_PRO); - if (lowerName === 'gemini-flash') return isGroupProtected(MODEL_GROUPS.GEMINI_FLASH); - if (lowerName === 'claude-sonnet') return isGroupProtected(MODEL_GROUPS.CLAUDE); - - // 1. Gemini Pro Group - if (MODEL_GROUPS.GEMINI_PRO.some(m => lowerName === m)) { - return isGroupProtected(MODEL_GROUPS.GEMINI_PRO); - } - - // 2. Claude Group - if (MODEL_GROUPS.CLAUDE.some(m => lowerName === m)) { - return isGroupProtected(MODEL_GROUPS.CLAUDE); - } - - // 3. Gemini Flash Group - if (MODEL_GROUPS.GEMINI_FLASH.some(m => lowerName === m)) { - return isGroupProtected(MODEL_GROUPS.GEMINI_FLASH); - } - - // 兜底直接检查 (Strict check for exact match or normalized ID) - return protectedModels.includes(lowerName); -} - -// ============================================================================ -// 子组件 -// ============================================================================ - -/** - * 可拖拽的表格行组件 - * 使用 @dnd-kit/sortable 实现拖拽功能 - */ -function SortableAccountRow({ - account, - selected, - isRefreshing, - isCurrent, - isSwitching, - isDragging, - onSelect, - onSwitch, - onRefresh, - onViewDevice, - onViewDetails, - onExport, - onDelete, - onToggleProxy, - onWarmup, - onUpdateLabel, -}: SortableRowProps) { - const { t } = useTranslation(); - const { - attributes, - listeners, - setNodeRef, - transform, - transition, - isDragging: isSortableDragging, - } = useSortable({ id: account.id }); - - const style = { - transform: CSS.Transform.toString(transform), - transition, - opacity: isSortableDragging ? 0.5 : 1, - zIndex: isSortableDragging ? 1000 : 'auto', - }; - - return ( - - {/* 拖拽手柄 */} - -
- -
- - {/* 复选框 */} - - e.stopPropagation()} - /> - - - - ); -} - -/** - * 账号行内容组件 - * 渲染邮箱、配额、最后使用时间和操作按钮等列 - */ -function AccountRowContent({ - account, - isCurrent, - isRefreshing, - isSwitching, - onSwitch, - onRefresh, - onViewDevice, - onViewDetails, - onExport, - onDelete, - onToggleProxy, - onWarmup, - onUpdateLabel, -}: AccountRowContentProps) { - const { t } = useTranslation(); - const { config, showAllQuotas } = useConfigStore(); - - // 自定义标签编辑状态 - const [isEditingLabel, setIsEditingLabel] = useState(false); - const [labelInput, setLabelInput] = useState(account.custom_label || ''); - - const handleSaveLabel = () => { - if (onUpdateLabel) { - onUpdateLabel(labelInput.trim()); - } - setIsEditingLabel(false); - }; - - const handleCancelLabel = () => { - setLabelInput(account.custom_label || ''); - setIsEditingLabel(false); - }; - - const handleKeyDown = (e: React.KeyboardEvent) => { - if (e.key === 'Enter') { - handleSaveLabel(); - } else if (e.key === 'Escape') { - handleCancelLabel(); - } - }; - - // 使用统一的模型配置 - - // 获取要显示的模型列表 - const pinnedModels = config?.pinned_quota_models?.models || Object.keys(MODEL_CONFIG); - - // 根据 show_all 状态决定显示哪些模型 - const displayModels = sortModels( - showAllQuotas - ? (account.quota?.models || []).map(m => { - const config = MODEL_CONFIG[m.name.toLowerCase()]; - return { - id: m.name.toLowerCase(), - label: config?.shortLabel || config?.label || m.name, - protectedKey: config?.protectedKey || m.name.toLowerCase(), - data: m - }; - }) - : pinnedModels.filter(modelId => MODEL_CONFIG[modelId]).map(modelId => { - const config = MODEL_CONFIG[modelId]; - return { - id: modelId, - label: config.shortLabel || config.label, - protectedKey: config.protectedKey, - data: account.quota?.models.find(m => m.name.toLowerCase() === modelId) - }; - }) - ); - - const isDisabled = Boolean(account.disabled); - - return ( - <> - {/* 邮箱列 */} - -
- - {account.email} - - -
- {isCurrent && ( - - {t('accounts.current').toUpperCase()} - - )} - - {isDisabled && ( - - - {t('accounts.disabled')} - - )} - - {account.proxy_disabled && ( - - - {t('accounts.proxy_disabled')} - - )} - - {account.quota?.is_forbidden && ( - - - {t('accounts.forbidden')} - - )} - - {/* 订阅类型徽章 */} - {account.quota?.subscription_tier && (() => { - const tier = account.quota.subscription_tier.toLowerCase(); - if (tier.includes('ultra')) { - return ( - - - {t('accounts.ultra')} - - ); - } else if (tier.includes('pro')) { - return ( - - - {t('accounts.pro')} - - ); - } else { - return ( - - - {t('accounts.free')} - - ); - } - })()} - {/* 自定义标签 */} - {account.custom_label && !isEditingLabel && ( - - - {account.custom_label} - - )} - {/* 标签编辑输入框 */} - {isEditingLabel && ( -
- setLabelInput(e.target.value)} - onKeyDown={handleKeyDown} - autoFocus - maxLength={15} - onClick={(e) => e.stopPropagation()} - /> - - -
- )} -
-
- - - {/* 模型配额列 */} - - {account.quota?.is_forbidden ? ( -
- - {t('accounts.forbidden_msg')} -
- ) : ( -
- {displayModels.map((model) => { - const modelData = model.data; - - return ( - - ); - })} -
- )} - - - {/* 最后使用时间列 */} - -
- - {new Date(account.last_used * 1000).toLocaleDateString()} - - - {new Date(account.last_used * 1000).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })} - -
- - - {/* 操作列 */} - -
- - - {/* 自定义标签按钮 */} - {onUpdateLabel && ( - - )} - - {onWarmup && ( - - )} - - - - -
- - - ); -} - -// ============================================================================ -// 主组件 -// ============================================================================ - -/** - * 账号表格组件 - * 支持拖拽排序、多选、批量操作等功能 - */ -function AccountTable({ - accounts, - selectedIds, - refreshingIds, - onToggleSelect, - onToggleAll, - currentAccountId, - switchingAccountId, - onSwitch, - onRefresh, - onViewDevice, - onViewDetails, - onExport, - onDelete, - onToggleProxy, - onReorder, - onWarmup, - onUpdateLabel, -}: AccountTableProps) { - const { t } = useTranslation(); - - const [activeId, setActiveId] = useState(null); - // showAllQuotas 已经在 useConfigStore 中解构获取 - - // 配置拖拽传感器 - const sensors = useSensors( - useSensor(PointerSensor, { - activationConstraint: { distance: 8 }, // 需要移动 8px 才触发拖拽 - }), - useSensor(KeyboardSensor, { - coordinateGetter: sortableKeyboardCoordinates, - }) - ); - - const accountIds = useMemo(() => accounts.map(a => a.id), [accounts]); - const activeAccount = useMemo(() => accounts.find(a => a.id === activeId), [accounts, activeId]); - - const handleDragStart = (event: DragStartEvent) => { - setActiveId(event.active.id as string); - }; - - const handleDragEnd = (event: DragEndEvent) => { - const { active, over } = event; - setActiveId(null); - - if (over && active.id !== over.id) { - const oldIndex = accountIds.indexOf(active.id as string); - const newIndex = accountIds.indexOf(over.id as string); - - if (oldIndex !== -1 && newIndex !== -1 && onReorder) { - onReorder(arrayMove(accountIds, oldIndex, newIndex)); - } - } - }; - - if (accounts.length === 0) { - return ( -
-

{t('accounts.empty.title')}

-

{t('accounts.empty.desc')}

-
- ); - } - - return ( - -
- - - - - - - - - - - - - - {accounts.map((account) => ( - onToggleSelect(account.id)} - onSwitch={() => onSwitch(account.id)} - onRefresh={() => onRefresh(account.id)} - onViewDevice={() => onViewDevice(account.id)} - onViewDetails={() => onViewDetails(account.id)} - onExport={() => onExport(account.id)} - onDelete={() => onDelete(account.id)} - onToggleProxy={() => onToggleProxy(account.id)} - onWarmup={onWarmup ? () => onWarmup(account.id) : undefined} - onUpdateLabel={onUpdateLabel ? (label: string) => onUpdateLabel(account.id, label) : undefined} - /> - ))} - - -
- {t('accounts.drag_to_reorder')} - - 0 && selectedIds.size === accounts.length} - onChange={onToggleAll} - /> - {t('accounts.table.email')} - {t('accounts.table.quota')} - {t('accounts.table.last_used')}{t('accounts.table.actions')}
-
- - {/* 拖拽悬浮预览层 */} - - { - activeAccount ? ( - - - - - - { }} - onRefresh={() => { }} - onViewDevice={() => { }} - onViewDetails={() => { }} - onExport={() => { }} - onDelete={() => { }} - onToggleProxy={() => { }} - /> - - -
-
- -
-
- -
- ) : null - } -
-
- ); -} - -export default AccountTable; diff --git a/src/components/accounts/AddAccountDialog.tsx b/src/components/accounts/AddAccountDialog.tsx deleted file mode 100755 index 5f1925ec8..000000000 --- a/src/components/accounts/AddAccountDialog.tsx +++ /dev/null @@ -1,725 +0,0 @@ -import { useState, useEffect, useRef } from 'react'; -import { createPortal } from 'react-dom'; -import { Plus, Database, Globe, FileClock, Loader2, CheckCircle2, XCircle, Copy, Check, Info, Link2 } from 'lucide-react'; -import { useAccountStore } from '../../stores/useAccountStore'; -import { useTranslation } from 'react-i18next'; -import { listen } from '@tauri-apps/api/event'; -import { open } from '@tauri-apps/plugin-dialog'; -import { request as invoke } from '../../utils/request'; -import { isTauri } from '../../utils/env'; -import { copyToClipboard } from '../../utils/clipboard'; - -interface AddAccountDialogProps { - onAdd: (email: string, refreshToken: string) => Promise; - showText?: boolean; -} - -type Status = 'idle' | 'loading' | 'success' | 'error'; - -function AddAccountDialog({ onAdd, showText = true }: AddAccountDialogProps) { - const { t } = useTranslation(); - const fetchAccounts = useAccountStore(state => state.fetchAccounts); - const [isOpen, setIsOpen] = useState(false); - const [activeTab, setActiveTab] = useState<'oauth' | 'token' | 'import'>(isTauri() ? 'oauth' : 'token'); - const [refreshToken, setRefreshToken] = useState(''); - const [oauthUrl, setOauthUrl] = useState(''); - const [oauthUrlCopied, setOauthUrlCopied] = useState(false); - const [manualCode, setManualCode] = useState(''); - - // UI State - const [status, setStatus] = useState('idle'); - const [message, setMessage] = useState(''); - - const { startOAuthLogin, completeOAuthLogin, cancelOAuthLogin, importFromDb, importV1Accounts, importFromCustomDb } = useAccountStore(); - - const oauthUrlRef = useRef(oauthUrl); - const statusRef = useRef(status); - const activeTabRef = useRef(activeTab); - const isOpenRef = useRef(isOpen); - - useEffect(() => { - oauthUrlRef.current = oauthUrl; - statusRef.current = status; - activeTabRef.current = activeTab; - isOpenRef.current = isOpen; - }, [oauthUrl, status, activeTab, isOpen]); - - // Reset state when dialog opens or tab changes - useEffect(() => { - if (isOpen) { - resetState(); - } - }, [isOpen, activeTab]); - - // Listen for OAuth URL - useEffect(() => { - if (!isTauri()) return; - let unlisten: (() => void) | undefined; - - const setupListener = async () => { - unlisten = await listen('oauth-url-generated', (event) => { - setOauthUrl(event.payload as string); - // 自动复制到剪贴板? 可选,这里只设置状态让用户手动复制 - }); - }; - - setupListener(); - - return () => { - if (unlisten) unlisten(); - }; - }, []); - - // Listen for OAuth callback completion (user may open the URL manually without clicking Start) - useEffect(() => { - if (!isTauri()) return; - let unlisten: (() => void) | undefined; - - const setupListener = async () => { - unlisten = await listen('oauth-callback-received', async () => { - if (!isOpenRef.current) return; - if (activeTabRef.current !== 'oauth') return; - if (statusRef.current === 'loading' || statusRef.current === 'success') return; - if (!oauthUrlRef.current) return; - - // Auto-complete: exchange code and save account (no browser open) - setStatus('loading'); - setMessage(`${t('accounts.add.tabs.oauth')}...`); - - try { - await completeOAuthLogin(); - setStatus('success'); - setMessage(`${t('accounts.add.tabs.oauth')} ${t('common.success')}!`); - setTimeout(() => { - setIsOpen(false); - resetState(); - }, 1500); - } catch (error) { - setStatus('error'); - let errorMsg = String(error); - if (errorMsg.includes('Refresh Token') || errorMsg.includes('refresh_token')) { - setMessage(errorMsg); - } else if (errorMsg.includes('Tauri') || errorMsg.toLowerCase().includes('environment') || errorMsg.includes('环境')) { - setMessage(t('common.environment_error', { error: errorMsg })); - } else { - setMessage(`${t('accounts.add.tabs.oauth')} ${t('common.error')}: ${errorMsg}`); - } - } - }); - }; - - setupListener(); - - return () => { - if (unlisten) unlisten(); - }; - }, [completeOAuthLogin, t]); - - // Pre-generate OAuth URL when dialog opens on OAuth tab (so URL is shown BEFORE "Start OAuth") - useEffect(() => { - if (!isOpen) return; - if (activeTab !== 'oauth') return; - if (oauthUrl) return; - - invoke('prepare_oauth_url') - .then((res) => { - const url = typeof res === 'string' ? res : res?.url; - if (url && url.length > 0) setOauthUrl(url); - }) - .catch((e) => { - console.error('Failed to prepare OAuth URL:', e); - }); - }, [isOpen, activeTab, oauthUrl]); - - // If user navigates away from OAuth tab, cancel prepared flow to release the port. - useEffect(() => { - if (!isOpen) return; - if (activeTab === 'oauth') return; - if (!oauthUrl) return; - - cancelOAuthLogin().catch(() => { }); - setOauthUrl(''); - setOauthUrlCopied(false); - }, [isOpen, activeTab]); - - const resetState = () => { - setStatus('idle'); - setMessage(''); - setRefreshToken(''); - setOauthUrl(''); - setOauthUrlCopied(false); - }; - - const handleAction = async ( - actionName: string, - actionFn: () => Promise, - options?: { clearOauthUrl?: boolean } - ) => { - setStatus('loading'); - setMessage(`${actionName}...`); - if (options?.clearOauthUrl !== false) { - setOauthUrl(''); // Clear previous URL - } - try { - await actionFn(); - setStatus('success'); - setMessage(`${actionName} ${t('common.success')}!`); - - // 延迟关闭,让用户看到成功状态 - setTimeout(() => { - setIsOpen(false); - resetState(); - }, 1500); - } catch (error) { - setStatus('error'); - - // 改进错误信息显示 - let errorMsg = String(error); - - // 如果是 refresh_token 缺失错误,显示完整信息(包含解决方案) - if (errorMsg.includes('Refresh Token') || errorMsg.includes('refresh_token')) { - setMessage(errorMsg); - } else if (errorMsg.includes('Tauri') || errorMsg.toLowerCase().includes('environment') || errorMsg.includes('环境')) { - // 环境错误 - setMessage(t('common.environment_error', { error: errorMsg })); - } else { - // 其他错误 - setMessage(`${actionName} ${t('common.error')}: ${errorMsg}`); - } - } - }; - - const handleSubmit = async () => { - if (!refreshToken) { - setStatus('error'); - setMessage(t('accounts.add.token.error_token')); - return; - } - - setStatus('loading'); - - // 1. 尝试解析输入 - let tokens: string[] = []; - const input = refreshToken.trim(); - - try { - // 尝试解析为 JSON - if (input.startsWith('[') && input.endsWith(']')) { - const parsed = JSON.parse(input); - if (Array.isArray(parsed)) { - tokens = parsed - .map((item: any) => item.refresh_token) - .filter((t: any) => typeof t === 'string' && t.startsWith('1//')); - } - } - } catch (e) { - // JSON 解析失败,忽略 - console.debug('JSON parse failed, falling back to regex', e); - } - - // 2. 如果 JSON 解析没有结果,尝试正则提取 (或者输入不是 JSON) - if (tokens.length === 0) { - const regex = /1\/\/[a-zA-Z0-9_\-]+/g; - const matches = input.match(regex); - if (matches) { - tokens = matches; - } - } - - // 去重 - tokens = [...new Set(tokens)]; - - if (tokens.length === 0) { - setStatus('error'); - setMessage(t('accounts.add.token.error_token')); // 或者提示"未找到有效 Token" - return; - } - - // 3. 批量添加 - let successCount = 0; - let failCount = 0; - - for (let i = 0; i < tokens.length; i++) { - const currentToken = tokens[i]; - setMessage(t('accounts.add.token.batch_progress', { current: i + 1, total: tokens.length })); - - try { - await onAdd("", currentToken); - successCount++; - } catch (error) { - console.error(`Failed to add token ${i + 1}:`, error); - failCount++; - } - // 稍微延迟一下,避免太快 - await new Promise(r => setTimeout(r, 100)); - } - - // 4. 结果反馈 - if (successCount === tokens.length) { - setStatus('success'); - setMessage(t('accounts.add.token.batch_success', { count: successCount })); - setTimeout(() => { - setIsOpen(false); - resetState(); - }, 1500); - } else if (successCount > 0) { - // 部分成功 - setStatus('success'); // 还是用绿色,但提示部分失败 - setMessage(t('accounts.add.token.batch_partial', { success: successCount, fail: failCount })); - // 不自动关闭,让用户看到结果 - } else { - // 全部失败 - setStatus('error'); - setMessage(t('accounts.add.token.batch_fail')); - } - }; - - const handleOAuthWeb = async () => { - try { - setStatus('loading'); - setMessage(t('accounts.add.oauth.btn_start') + '...'); - - // 1. 获取 URL (指向 /auth/callback) - const res = await invoke('prepare_oauth_url'); - const url = typeof res === 'string' ? res : res.url; - - if (!url) { - throw new Error(t('accounts.add.oauth.error_no_url', 'OAuth URLを取得できませんでした')); - } - - setOauthUrl(url); // 确保链接在 UI 中可见,方便用户手动复制 - - // 2. 打开新标签页 (响应用户反馈:Web 端直接使用新标签体验更好) - const popup = window.open(url, '_blank'); - - if (!popup) { - setStatus('error'); - setMessage(t('accounts.add.oauth.popup_blocked', 'ポップアップがブロックされました')); - return; - } - - // 3. 监听消息 - const handleMessage = async (event: MessageEvent) => { - // 安全检查: 如果定义了 ORIGIN 校验更好,这里暂时检查 data type - if (event.data?.type === 'oauth-success') { - popup.close(); - window.removeEventListener('message', handleMessage); - - // 4. 成功后刷新列表 - await fetchAccounts(); - - setStatus('success'); - setMessage(t('accounts.add.oauth_success') || t('common.success')); - - setTimeout(() => { - setIsOpen(false); - resetState(); - }, 1500); - } - }; - - window.addEventListener('message', handleMessage); - - // 5. 检测窗口关闭 (用户手动关闭) - const timer = setInterval(() => { - if (popup.closed) { - clearInterval(timer); - window.removeEventListener('message', handleMessage); - if (statusRef.current === 'loading') { // 如果还在 loading 状态就关闭了,说明取消了 - setStatus('idle'); - setMessage(''); - } - } - }, 1000); - - } catch (error) { - console.error('OAuth Web Error:', error); - setStatus('error'); - setMessage(`${t('common.error')}: ${error}`); - } - }; - - const handleOAuth = () => { - if (!isTauri()) { - handleOAuthWeb(); - return; - } - // Default flow: opens the default browser and completes automatically. - // (If user opened the URL manually, completion is also triggered by oauth-callback-received.) - handleAction(t('accounts.add.tabs.oauth'), startOAuthLogin, { clearOauthUrl: false }); - }; - - const handleCompleteOAuth = () => { - // Manual flow: user already authorized in their preferred browser, just finish the flow. - handleAction(t('accounts.add.tabs.oauth'), completeOAuthLogin, { clearOauthUrl: false }); - }; - - const handleCopyUrl = async () => { - if (oauthUrl) { - const success = await copyToClipboard(oauthUrl); - if (success) { - setOauthUrlCopied(true); - window.setTimeout(() => setOauthUrlCopied(false), 1500); - } - } - }; - - const handleManualSubmit = async () => { - if (!manualCode.trim()) return; - - setStatus('loading'); - setMessage(t('accounts.add.oauth.manual_submitting', '認可コードを送信中...')); - - try { - await invoke('submit_oauth_code', { code: manualCode.trim(), state: null }); - - // 提交成功反馈 - setStatus('success'); - setMessage(t('accounts.add.oauth.manual_submitted', '認可コードを送信しました。バックエンドで処理中です...')); - - setManualCode(''); - - // 对齐 Web 模式下的刷新逻辑 - if (!isTauri()) { - setTimeout(async () => { - await fetchAccounts(); - setIsOpen(false); - resetState(); - }, 2000); - } - } catch (error) { - let errStr = String(error); - if (errStr.includes("No active OAuth flow")) { - setMessage(t('accounts.add.oauth.error_no_flow')); - setStatus('error'); - } else { - setMessage(`${t('common.error')}: ${errStr}`); - setStatus('error'); - } - } - }; - - const handleImportDb = () => { - handleAction(t('accounts.add.tabs.import'), importFromDb); - }; - - const handleImportV1 = () => { - handleAction(t('accounts.add.import.btn_v1'), importV1Accounts); - }; - - const handleImportCustomDb = async () => { - try { - if (!isTauri()) { - alert(t('common.tauri_api_not_loaded') || 'Storage import only works in desktop app.'); - return; - } - const selected = await open({ - multiple: false, - filters: [{ - name: 'VSCode DB', - extensions: ['vscdb'] - }, { - name: 'All Files', - extensions: ['*'] - }] - }); - - if (selected && typeof selected === 'string') { - handleAction(t('accounts.add.import.btn_custom_db') || 'Import Custom DB', () => importFromCustomDb(selected)); - } - } catch (err) { - console.error('Failed to open dialog:', err); - } - }; - - // 状态提示组件 - const StatusAlert = () => { - if (status === 'idle' || !message) return null; - - const styles = { - loading: 'alert-info', - success: 'alert-success', - error: 'alert-error' - }; - - const icons = { - loading: , - success: , - error: - }; - - return ( -
- {icons[status]} - {message} -
- ); - }; - - return ( - <> - - - {isOpen && createPortal( -
- {/* Draggable Top Region */} -
- - {/* Click outside to close */} -
setIsOpen(false)} /> - -
-

{t('accounts.add.title')}

- - {/* Tab 导航 - 胶囊风格 */} - -
- - - -
- - {/* 添加 Web 模式提示 */} - {!isTauri() && ( -
- - {t('accounts.add.oauth.web_hint', '将在新窗口中打开 Google 登录页')} -
- )} - - {/* 状态提示区 */} - - -
- {/* OAuth 授权 */} - {activeTab === 'oauth' && ( -
-
-
- -
-
-

{t('accounts.add.oauth.recommend')}

-

- {t('accounts.add.oauth.desc')} -

-
-
-
- - - {oauthUrl && ( -
-
- {t('accounts.add.oauth.link_label')} -
- - - -
- )} - - {/* Manual Code Entry - Always enabled to rescue stuck flows */} -
-
- {t('accounts.add.oauth.manual_hint')} -
-
-
- setManualCode(e.target.value)} - /> -
- -
-
-
-
- )} - - {/* Refresh Token */} - {activeTab === 'token' && ( -
-
-
- {t('accounts.add.token.label')} -
-