From c2dcd867228aae3f2fa0855226244cc701ba4f43 Mon Sep 17 00:00:00 2001 From: Sline Date: Thu, 30 Oct 2025 17:29:15 +0800 Subject: [PATCH] refactor: profile switch (#5197) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: proxy refresh * fix(proxy-store): properly hydrate and filter backend provider snapshots * fix(proxy-store): add monotonic fetch guard and event bridge cleanup * fix(proxy-store): tweak fetch sequencing guard to prevent snapshot invalidation from wiping fast responses * docs: UPDATELOG.md * fix(proxy-snapshot, proxy-groups): restore last-selected proxy and group info * fix(proxy): merge static and provider entries in snapshot; fix Virtuoso viewport height * fix(proxy-groups): restrict reduced-height viewport to chain-mode column * refactor(profiles): introduce a state machine * refactor:replace state machine with reducer * refactor:introduce a profile switch worker * refactor: hooked up a backend-driven profile switch flow * refactor(profile-switch): serialize switches with async queue and enrich frontend events * feat(profiles): centralize profile switching with reducer/driver queue to fix stuck UI on rapid toggles * chore: translate comments and log messages to English to avoid encoding issues * refactor: migrate backend queue to SwitchDriver actor * fix(profile): unify error string types in validation helper * refactor(profile): make switch driver fully async and handle panics safely * refactor(cmd): move switch-validation helper into new profile_switch module * refactor(profile): modularize switch logic into profile_switch.rs * refactor(profile_switch): modularize switch handler - Break monolithic switch handler into proper module hierarchy - Move shared globals, constants, and SwitchScope guard to state.rs - Isolate queue orchestration and async task spawning in driver.rs - Consolidate switch pipeline and config patching in workflow.rs - Extract request pre-checks/YAML validation into validation.rs * refactor(profile_switch): centralize state management and add cancellation flow - Introduced SwitchManager in state.rs to unify mutex, sequencing, and SwitchScope handling. - Added SwitchCancellation and SwitchRequest wrappers to encapsulate cancel tokens and notifications. - Updated driver to allocate task IDs via SwitchManager, cancel old tokens, and queue next jobs in order. - Updated workflow to check cancellation and sequence at each phase, replacing global flags with manager APIs. * feat(profile_switch): integrate explicit state machine for profile switching - workflow.rs:24 now delegates each switch to SwitchStateMachine, passing an owned SwitchRequest. Queue cancellation and state-sequence checks are centralized inside the machine instead of scattered guards. - workflow.rs:176 replaces the old helper with `SwitchStateMachine::new(manager(), None, profiles).run().await`, ensuring manual profile patches follow the same workflow (locking, validation, rollback) as queued switches. - workflow.rs:180 & 275 expose `validate_profile_yaml` and `restore_previous_profile` for reuse inside the state machine. - workflow/state_machine.rs:1 introduces a dedicated state machine module. It manages global mutex acquisition, request/cancellation state, YAML validation, draft patching, `CoreManager::update_config`, failure rollback, and tray/notification side-effects. Transitions check for cancellations and stale sequences; completions release guards via `SwitchScope` drop. * refactor(profile-switch): integrate stage-aware panic handling - src-tauri/src/cmd/profile_switch/workflow/state_machine.rs:1 Defines SwitchStage and SwitchPanicInfo as crate-visible, wraps each transition in with_stage(...) with catch_unwind, and propagates CmdResult to distinguish validation failures from panics while keeping cancellation semantics. - src-tauri/src/cmd/profile_switch/workflow.rs:25 Updates run_switch_job to return Result, routing timeout, validation, config, and stage panic cases separately. Reuses SwitchPanicInfo for logging/UI notifications; patch_profiles_config maps state-machine panics into user-facing error strings. - src-tauri/src/cmd/profile_switch/driver.rs:1 Adds SwitchJobOutcome to unify workflow results: normal completions carry bool, and panics propagate SwitchPanicInfo. The driver loop now logs panics explicitly and uses AssertUnwindSafe(...).catch_unwind() to guard setup-phase panics. * refactor(profile-switch): add watchdog, heartbeat, and async timeout guards - Introduce SwitchHeartbeat for stage tracking and timing; log stage transitions with elapsed durations. - Add watchdog in driver to cancel stalled switches (5s heartbeat timeout). - Wrap blocking ops (Config::apply, tray updates, profiles_save_file_safe, etc.) with time::timeout to prevent async stalls. - Improve logs for stage transitions and watchdog timeouts to clarify cancellation points. * refactor(profile-switch): async post-switch tasks, early lock release, and spawn_blocking for IO * feat(profile-switch): track cleanup and coordinate pipeline - Add explicit cleanup tracking in the driver (`cleanup_profiles` map + `CleanupDone` messages) to know when background post-switch work is still running before starting a new workflow. (driver.rs:29-50) - Update `handle_enqueue` to detect “cleanup in progress”: same-profile retries are short-circuited; other requests collapse the pending queue, cancelling old tokens so only the latest intent survives. (driver.rs:176-247) - Rework scheduling helpers: `start_next_job` refuses to start while cleanup is outstanding; discarded requests release cancellation tokens; cleanup completion explicitly restarts the pipeline. (driver.rs:258-442) * feat(profile-switch): unify post-switch cleanup handling - workflow.rs (25-427) returns `SwitchWorkflowResult` (success + CleanupHandle) or `SwitchWorkflowError`. All failure/timeout paths stash post-switch work into a single CleanupHandle. Cleanup helpers (`notify_profile_switch_finished` and `close_connections_after_switch`) run inside that task for proper lifetime handling. - driver.rs (29-439) propagates CleanupHandle through `SwitchJobOutcome`, spawns a bridge to wait for completion, and blocks `start_next_job` until done. Direct driver-side panics now schedule failure cleanup via the shared helper. * tmp * Revert "tmp" This reverts commit e582cf4a652231a67a7c951802cb19b385f6afd7. * refactor: queue frontend events through async dispatcher * refactor: queue frontend switch/proxy events and throttle notices * chore: frontend debug log * fix: re-enable only ProfileSwitchFinished events - keep others suppressed for crash isolation - Re-enabled only ProfileSwitchFinished events; RefreshClash, RefreshProxy, and ProfileChanged remain suppressed (they log suppression messages) - Allows frontend to receive task completion notifications for UI feedback while crash isolation continues - src-tauri/src/core/handle.rs now only suppresses notify_profile_changed - Serialized emitter, frontend logging bridge, and other diagnostics unchanged * refactor: refreshClashData * refactor(proxy): stabilize proxy switch pipeline and rendering - Add coalescing buffer in notification.rs to emit only the latest proxies-updated snapshot - Replace nextTick with queueMicrotask in asyncQueue.ts for same-frame hydration - Hide auto-generated GLOBAL snapshot and preserve optional metadata in proxy-snapshot.ts - Introduce stable proxy rendering state in AppDataProvider (proxyTargetProfileId, proxyDisplayProfileId, isProxyRefreshPending) - Update proxy page to fade content during refresh and overlay status banner instead of showing incomplete snapshot * refactor(profiles): move manual activating logic to reducer for deterministic queue tracking * refactor: replace proxy-data event bridge with pure polling and simplify proxy store - Replaced the proxy-data event bridge with pure polling: AppDataProvider now fetches the initial snapshot and drives refreshes from the polled switchStatus, removing verge://refresh-* listeners (src/providers/app-data-provider.tsx). - Simplified proxy-store by dropping the proxies-updated listener queue and unused payload/normalizer helpers; relies on SWR/provider fetch path + calcuProxies for live updates (src/stores/proxy-store.ts). - Trimmed layout-level event wiring to keep only notice/show/hide subscriptions, removing obsolete refresh listeners (src/pages/_layout/useLayoutEvents.ts). * refactor(proxy): streamline proxies-updated handling and store event flow - AppDataProvider now treats `proxies-updated` as the fast path: the listener calls `applyLiveProxyPayload` immediately and schedules only a single fallback `fetchLiveProxies` ~600 ms later (replacing the old 0/250/1000/2000 cascade). Expensive provider/rule refreshes run in parallel via `Promise.allSettled`, and the multi-stage queue on profile updates completion was removed (src/providers/app-data-provider.tsx). - Rebuilt proxy-store to support the event flow: restored `setLive`, provider normalization, and an animation-frame + async queue that applies payloads without blocking. Exposed `applyLiveProxyPayload` so providers can push events directly into the store (src/stores/proxy-store.ts). * refactor: switch delay * refactor(app-data-provider): trigger getProfileSwitchStatus revalidation on profile-switch-finished - AppDataProvider now listens to `profile-switch-finished` and calls `mutate("getProfileSwitchStatus")` to immediately update state and unlock buttons (src/providers/app-data-provider.tsx). - Retain existing detailed timing logs for monitoring other stages. - Frontend success notifications remain instant; background refreshes continue asynchronously. * fix(profiles): prevent duplicate toast on page remount * refactor(profile-switch): make active switches preemptible and prevent queue piling - Add notify mechanism to SwitchCancellation to await cancellation without busy-waiting (state.rs:82) - Collapse pending queue to a single entry in the driver; cancel in-flight task on newer request (driver.rs:232) - Update handle_update_core to watch cancel token and 30s timeout; release locks, discard draft, and exit early if canceled (state_machine.rs:301) - Providers revalidate status immediately on profile-switch-finished events (app-data-provider.tsx:208) * refactor(core): make core reload phase controllable, reduce 0xcfffffff risk - CoreManager::apply_config now calls `reload_config_with_retry`, each attempt waits up to 5s, retries 3 times; on failure, returns error with duration logged and triggers core restart if needed (src-tauri/src/core/manager/config.rs:175, 205) - `reload_config_with_retry` logs attempt info on timeout or error; if error is a Mihomo connection issue, fallback to original restart logic (src-tauri/src/core/manager/config.rs:211) - `reload_config_once` retains original Mihomo call for retry wrapper usage (src-tauri/src/core/manager/config.rs:247) * chore(frontend-logs): downgrade routine event logs from info to debug - Logs like `emit_via_app entering spawn_blocking`, `Async emit…`, `Buffered proxies…` are now debug-level (src-tauri/src/core/notification.rs:155, :265, :309…) - Genuine warnings/errors (failures/timeouts) remain at warn/error - Core stage logs remain info to keep backend tracking visible * refactor(frontend-emit): make emit_via_app fire-and-forget async - `emit_via_app` now a regular function; spawns with `tokio::spawn` and logs a warn if `emit_to` fails, caller returns immediately (src-tauri/src/core/notification.rs:269) - Removed `.await` at Async emit and flush_proxies calls; only record dispatch duration and warn on failure (src-tauri/src/core/notification.rs:211, :329) * refactor(ui): restructure profile switch for event-driven speed + polling stability - Backend - SwitchManager maintains a lightweight event queue: added `event_sequence`, `recent_events`, and `SwitchResultEvent`; provides `push_event` / `events_after` (state.rs) - `handle_completion` pushes events on success/failure and keeps `last_result` (driver.rs) for frontend incremental fetch - New Tauri command `get_profile_switch_events(after_sequence)` exposes `events_after` (profile_switch/mod.rs → profile.rs → lib.rs) - Notification system - `NotificationSystem::process_event` only logs debug, disables WebView `emit_to`, fixes 0xcfffffff - Related emit/buffer functions now safe no-op, removed unused structures and warnings (notification.rs) - Frontend - services/cmds.ts defines `SwitchResultEvent` and `getProfileSwitchEvents` - `AppDataProvider` holds `switchEventSeqRef`, polls incremental events every 0.25s (busy) / 1s (idle); each event triggers: - immediate `globalMutate("getProfiles")` to refresh current profile - background refresh of proxies/providers/rules via `Promise.allSettled` (failures logged, non-blocking) - forced `mutateSwitchStatus` to correct state - original switchStatus effect calls `handleSwitchResult` as fallback; other toast/activation logic handled in profiles.tsx - Commands / API cleanup - removed `pub use profile_switch::*;` in cmd::mod.rs to avoid conflicts; frontend uses new command polling * refactor(frontend): optimize profile switch with optimistic updates * refactor(profile-switch): switch to event-driven flow with Profile Store - SwitchManager pushes events; frontend polls get_profile_switch_events - Zustand store handles optimistic profiles; AppDataProvider applies updates and background-fetches - UI flicker removed * fix(app-data): re-hook profile store updates during switch hydration * fix(notification): restore frontend event dispatch and non-blocking emits * fix(app-data-provider): restore proxy refresh and seed snapshot after refactor * fix: ensure switch completion events are received and handle proxies-updated * fix(app-data-provider): dedupe switch results by taskId and fix stale profile state * fix(profile-switch): ensure patch_profiles_config_by_profile_index waits for real completion and handle join failures in apply_config_with_timeout * docs: UPDATELOG.md * chore: add necessary comments * fix(core): always dispatch async proxy snapshot after RefreshClash event * fix(proxy-store, provider): handle pending snapshots and proxy profiles - Added pending snapshot tracking in proxy-store so `lastAppliedFetchId` no longer jumps on seed. Profile adoption is deferred until a qualifying fetch completes. Exposed `clearPendingProfile` for rollback support. - Cleared pending snapshot state whenever live payloads apply or the store resets, preventing stale optimistic profile IDs after failures. - In provider integration, subscribed to the pending proxy profile and fed it into target-profile derivation. Cleared it on failed switch results so hydration can advance and UI status remains accurate. * fix(proxy): re-hook tray refresh events into proxy refresh queue - Reattached listen("verge://refresh-proxy-config", …) at src/providers/app-data-provider.tsx:402 and registered it for cleanup. - Added matching window fallback handler at src/providers/app-data-provider.tsx:430 so in-app dispatches share the same refresh path. * fix(proxy-snapshot/proxy-groups): address review findings on snapshot placeholders - src/utils/proxy-snapshot.ts:72-95 now derives snapshot group members solely from proxy-groups.proxies, so provider ids under `use` no longer generate placeholder proxy items. - src/components/proxy/proxy-groups.tsx:665-677 lets the hydration overlay capture pointer events (and shows a wait cursor) so users can’t interact with snapshot-only placeholders before live data is ready. * fix(profile-switch): preserve queued requests and avoid stale connection teardown - Keep earlier queued switches intact by dropping the blanket “collapse” call: after removing duplicates for the same profile, new requests are simply appended, leaving other profiles pending (driver.rs:376). Resolves queue-loss scenario. - Gate connection cleanup on real successes so cancelled/stale runs no longer tear down Mihomo connections; success handler now skips close_connections_after_switch when success == false (workflow.rs:419). * fix(profile-switch, layout): improve profile validation and restore backend refresh - Hardened profile validation using `tokio::fs` with a 5s timeout and offloading YAML parsing to `AsyncHandler::spawn_blocking`, preventing slow disks or malformed files from freezing the runtime (src-tauri/src/cmd/profile_switch/validation.rs:9, 71). - Restored backend-triggered refresh handling by listening for `verge://refresh-clash-config` / `verge://refresh-verge-config` and invoking shared refresh services so SWR caches stay in sync with core events (src/pages/_layout/useLayoutEvents.ts:6, 45, 55). * feat(profile-switch): handle cancellations for superseded requests - Added a `cancelled` flag and constructor so superseded requests publish an explicit cancellation instead of a failure (src-tauri/src/cmd/profile_switch/state.rs:249, src-tauri/src/cmd/profile_switch/driver.rs:482) - Updated the profile switch effect to log cancellations as info, retain the shared `mutate` call, and skip emitting error toasts while still refreshing follow-up work (src/pages/profiles.tsx:554, src/pages/profiles.tsx:581) - Exposed the new flag on the TypeScript contract to keep downstream consumers type-safe (src/services/cmds.ts:20) * fix(profiles): wrap logging payload for Tauri frontend_log * fix(profile-switch): add rollback and error propagation for failed persistence - Added rollback on apply failure so Mihomo restores to the previous profile before exiting the success path early (state_machine.rs:474). - Reworked persist_profiles_with_timeout to surface timeout/join/save errors, convert them into CmdResult failures, and trigger rollback + error propagation when persistence fails (state_machine.rs:703). * fix(profile-switch): prevent mid-finalize reentrancy and lingering tasks * fix(profile-switch): preserve pending queue and surface discarded switches * fix(profile-switch): avoid draining Mihomo sockets on failed/cancelled switches * fix(app-data-provider): restore backend-driven refresh and reattach fallbacks * fix(profile-switch): queue concurrent updates and add bounded wait/backoff * fix(proxy): trigger live refresh on app start for proxy snapshot * refactor(profile-switch): split flow into layers and centralize async cleanup - Introduced `SwitchDriver` to encapsulate queue and driver logic while keeping the public Tauri command API. - Added workflow/cleanup helpers for notification dispatch and Mihomo connection draining, re-exported for API consistency. - Replaced monolithic state machine with `core.rs`, `context.rs`, and `stages.rs`, plus a thin `mod.rs` re-export layer; stage methods are now individually testable. - Removed legacy `workflow/state_machine.rs` and adjusted visibility on re-exported types/constants to ensure compilation. --- UPDATELOG.md | 2 + src-tauri/src/cmd/frontend.rs | 48 + src-tauri/src/cmd/mod.rs | 3 + src-tauri/src/cmd/profile.rs | 720 ++++++--------- src-tauri/src/cmd/profile_switch/driver.rs | 683 ++++++++++++++ src-tauri/src/cmd/profile_switch/mod.rs | 34 + src-tauri/src/cmd/profile_switch/state.rs | 353 ++++++++ .../src/cmd/profile_switch/validation.rs | 113 +++ src-tauri/src/cmd/profile_switch/workflow.rs | 385 ++++++++ .../cmd/profile_switch/workflow/cleanup.rs | 65 ++ .../workflow/state_machine/context.rs | 178 ++++ .../workflow/state_machine/core.rs | 284 ++++++ .../workflow/state_machine/mod.rs | 11 + .../workflow/state_machine/stages.rs | 597 +++++++++++++ src-tauri/src/core/handle.rs | 131 ++- src-tauri/src/core/manager/config.rs | 239 ++++- src-tauri/src/core/notification.rs | 269 +++++- src-tauri/src/lib.rs | 26 + src-tauri/src/utils/draft.rs | 7 + src/components/home/current-proxy-card.tsx | 22 +- src/components/proxy/provider-button.tsx | 310 ++++--- src/components/proxy/proxy-groups.tsx | 122 ++- src/components/proxy/use-render-list.ts | 89 +- src/hooks/use-current-proxy.ts | 12 +- src/hooks/use-profiles.ts | 77 +- src/pages/_layout/useLayoutEvents.ts | 36 +- src/pages/profiles.tsx | 838 +++++++++++------- src/providers/app-data-context.ts | 9 +- src/providers/app-data-provider.tsx | 743 ++++++++++++---- src/services/cmds.ts | 146 ++- src/services/noticeService.ts | 18 +- src/services/refresh.ts | 24 + src/stores/profile-store.ts | 59 ++ src/stores/proxy-store.ts | 298 +++++++ src/utils/asyncQueue.ts | 31 + src/utils/proxy-snapshot.ts | 205 +++++ 36 files changed, 5912 insertions(+), 1275 deletions(-) create mode 100644 src-tauri/src/cmd/frontend.rs create mode 100644 src-tauri/src/cmd/profile_switch/driver.rs create mode 100644 src-tauri/src/cmd/profile_switch/mod.rs create mode 100644 src-tauri/src/cmd/profile_switch/state.rs create mode 100644 src-tauri/src/cmd/profile_switch/validation.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/cleanup.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs create mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs create mode 100644 src/services/refresh.ts create mode 100644 src/stores/profile-store.ts create mode 100644 src/stores/proxy-store.ts create mode 100644 src/utils/asyncQueue.ts create mode 100644 src/utils/proxy-snapshot.ts diff --git a/UPDATELOG.md b/UPDATELOG.md index 20900405e..51236c737 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -30,6 +30,7 @@ - 修复悬浮跳转导航失效 - 修复小键盘热键映射错误 - 修复前端无法及时刷新操作状态 +- 修复切换订阅卡死
✨ 新增功能 @@ -76,6 +77,7 @@ - 优化首页当前节点对MATCH规则的支持 - 允许在 `界面设置` 修改 `悬浮跳转导航延迟` - 添加热键绑定错误的提示信息 +- 重构订阅切换,保证代理页面的及时刷新 - 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题
diff --git a/src-tauri/src/cmd/frontend.rs b/src-tauri/src/cmd/frontend.rs new file mode 100644 index 000000000..8559c5899 --- /dev/null +++ b/src-tauri/src/cmd/frontend.rs @@ -0,0 +1,48 @@ +use super::CmdResult; +use crate::{logging, utils::logging::Type}; +use serde::Deserialize; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct FrontendLogPayload { + pub level: Option, + pub message: String, + pub context: Option, +} + +#[tauri::command] +pub fn frontend_log(payload: FrontendLogPayload) -> CmdResult<()> { + let level = payload.level.as_deref().unwrap_or("info"); + match level { + "trace" | "debug" => logging!( + debug, + Type::Frontend, + "[frontend] {}", + payload.message.as_str() + ), + "warn" => logging!( + warn, + Type::Frontend, + "[frontend] {}", + payload.message.as_str() + ), + "error" => logging!( + error, + Type::Frontend, + "[frontend] {}", + payload.message.as_str() + ), + _ => logging!( + info, + Type::Frontend, + "[frontend] {}", + payload.message.as_str() + ), + } + + if let Some(context) = payload.context { + logging!(info, Type::Frontend, "[frontend] context: {}", context); + } + + Ok(()) +} diff --git a/src-tauri/src/cmd/mod.rs b/src-tauri/src/cmd/mod.rs index 6c7486873..2cf768981 100644 --- a/src-tauri/src/cmd/mod.rs +++ b/src-tauri/src/cmd/mod.rs @@ -7,10 +7,12 @@ pub type CmdResult = Result; pub mod app; pub mod backup; pub mod clash; +pub mod frontend; pub mod lightweight; pub mod media_unlock_checker; pub mod network; pub mod profile; +mod profile_switch; pub mod proxy; pub mod runtime; pub mod save_profile; @@ -25,6 +27,7 @@ pub mod webdav; pub use app::*; pub use backup::*; pub use clash::*; +pub use frontend::*; pub use lightweight::*; pub use media_unlock_checker::*; pub use network::*; diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index 151779363..37cdd2e9f 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -1,5 +1,4 @@ -use super::CmdResult; -use super::StringifyErr; +use super::{CmdResult, StringifyErr, profile_switch}; use crate::{ config::{ Config, IProfiles, PrfItem, PrfOption, @@ -9,77 +8,191 @@ use crate::{ }, profiles_append_item_safe, }, - core::{CoreManager, handle, timer::Timer, tray::Tray}, - feat, logging, - process::AsyncHandler, - ret_err, + core::{CoreManager, handle, timer::Timer}, + feat, logging, ret_err, utils::{dirs, help, logging::Type}, }; +use once_cell::sync::Lazy; +use parking_lot::RwLock; use smartstring::alias::String; -use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; -use std::time::Duration; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -// 全局请求序列号跟踪,用于避免队列化执行 -static CURRENT_REQUEST_SEQUENCE: AtomicU64 = AtomicU64::new(0); +use crate::cmd::profile_switch::{ProfileSwitchStatus, SwitchResultEvent}; -static CURRENT_SWITCHING_PROFILE: AtomicBool = AtomicBool::new(false); - -#[tauri::command] -pub async fn get_profiles() -> CmdResult { - // 策略1: 尝试快速获取latest数据 - let latest_result = tokio::time::timeout(Duration::from_millis(500), async { - let profiles = Config::profiles().await; - let latest = profiles.latest_ref(); - IProfiles { - current: latest.current.clone(), - items: latest.items.clone(), - } - }) - .await; - - match latest_result { - Ok(profiles) => { - logging!(info, Type::Cmd, "快速获取配置列表成功"); - return Ok(profiles); - } - Err(_) => { - logging!(warn, Type::Cmd, "快速获取配置超时(500ms)"); - } - } - - // 策略2: 如果快速获取失败,尝试获取data() - let data_result = tokio::time::timeout(Duration::from_secs(2), async { - let profiles = Config::profiles().await; - let data = profiles.latest_ref(); - IProfiles { - current: data.current.clone(), - items: data.items.clone(), - } - }) - .await; - - match data_result { - Ok(profiles) => { - logging!(info, Type::Cmd, "获取draft配置列表成功"); - return Ok(profiles); - } - Err(join_err) => { - logging!( - error, - Type::Cmd, - "获取draft配置任务失败或超时: {}", - join_err - ); - } - } - - // 策略3: fallback,尝试重新创建配置 - logging!(warn, Type::Cmd, "所有获取配置策略都失败,尝试fallback"); - - Ok(IProfiles::new().await) +#[derive(Clone)] +struct CachedProfiles { + snapshot: IProfiles, + captured_at: Instant, } -/// 增强配置文件 +static PROFILES_CACHE: Lazy>> = Lazy::new(|| RwLock::new(None)); + +#[derive(Default)] +struct SnapshotMetrics { + fast_hits: AtomicU64, + cache_hits: AtomicU64, + blocking_hits: AtomicU64, + refresh_scheduled: AtomicU64, + last_log_ms: AtomicU64, +} + +static SNAPSHOT_METRICS: Lazy = Lazy::new(SnapshotMetrics::default); + +/// Store the latest snapshot so cache consumers can reuse it without hitting the lock again. +fn update_profiles_cache(snapshot: &IProfiles) { + *PROFILES_CACHE.write() = Some(CachedProfiles { + snapshot: snapshot.clone(), + captured_at: Instant::now(), + }); +} + +/// Return the cached snapshot and how old it is, if present. +fn cached_profiles_snapshot() -> Option<(IProfiles, u128)> { + PROFILES_CACHE.read().as_ref().map(|entry| { + ( + entry.snapshot.clone(), + entry.captured_at.elapsed().as_millis(), + ) + }) +} + +/// Return the latest profiles snapshot, preferring cached data so UI requests never block. +#[tauri::command] +pub async fn get_profiles() -> CmdResult { + let started_at = Instant::now(); + + // Resolve snapshots in three tiers so UI reads never stall on a mutex: + // 1) try a non-blocking read, 2) fall back to the last cached copy while a + // writer holds the lock, 3) block and refresh the cache as a final resort. + if let Some(snapshot) = read_profiles_snapshot_nonblocking().await { + let item_count = snapshot + .items + .as_ref() + .map(|items| items.len()) + .unwrap_or(0); + update_profiles_cache(&snapshot); + SNAPSHOT_METRICS.fast_hits.fetch_add(1, Ordering::Relaxed); + logging!( + debug, + Type::Cmd, + "[Profiles] Snapshot served (path=fast, items={}, elapsed={}ms)", + item_count, + started_at.elapsed().as_millis() + ); + maybe_log_snapshot_metrics(); + return Ok(snapshot); + } + + if let Some((cached, age_ms)) = cached_profiles_snapshot() { + SNAPSHOT_METRICS.cache_hits.fetch_add(1, Ordering::Relaxed); + logging!( + debug, + Type::Cmd, + "[Profiles] Served cached snapshot while lock busy (age={}ms)", + age_ms + ); + schedule_profiles_snapshot_refresh(); + maybe_log_snapshot_metrics(); + return Ok(cached); + } + + let snapshot = read_profiles_snapshot_blocking().await; + let item_count = snapshot + .items + .as_ref() + .map(|items| items.len()) + .unwrap_or(0); + update_profiles_cache(&snapshot); + SNAPSHOT_METRICS + .blocking_hits + .fetch_add(1, Ordering::Relaxed); + logging!( + debug, + Type::Cmd, + "[Profiles] Snapshot served (path=blocking, items={}, elapsed={}ms)", + item_count, + started_at.elapsed().as_millis() + ); + maybe_log_snapshot_metrics(); + Ok(snapshot) +} + +/// Try to grab the latest profile data without waiting for the writer. +async fn read_profiles_snapshot_nonblocking() -> Option { + let profiles = Config::profiles().await; + profiles.try_latest_ref().map(|guard| (**guard).clone()) +} + +/// Fall back to a blocking read when we absolutely must have fresh data. +async fn read_profiles_snapshot_blocking() -> IProfiles { + let profiles = Config::profiles().await; + let guard = profiles.latest_ref(); + (**guard).clone() +} + +/// Schedule a background cache refresh once the exclusive lock becomes available again. +fn schedule_profiles_snapshot_refresh() { + crate::process::AsyncHandler::spawn(|| async { + // Once the lock is released we refresh the cached snapshot so the next + // request observes the latest data instead of the stale fallback. + SNAPSHOT_METRICS + .refresh_scheduled + .fetch_add(1, Ordering::Relaxed); + let snapshot = read_profiles_snapshot_blocking().await; + update_profiles_cache(&snapshot); + logging!( + debug, + Type::Cmd, + "[Profiles] Cache refreshed after busy snapshot" + ); + }); +} + +fn maybe_log_snapshot_metrics() { + const LOG_INTERVAL_MS: u64 = 5_000; + let now_ms = current_millis(); + let last_ms = SNAPSHOT_METRICS.last_log_ms.load(Ordering::Relaxed); + if now_ms.saturating_sub(last_ms) < LOG_INTERVAL_MS { + return; + } + + if SNAPSHOT_METRICS + .last_log_ms + .compare_exchange(last_ms, now_ms, Ordering::SeqCst, Ordering::Relaxed) + .is_err() + { + return; + } + + let fast = SNAPSHOT_METRICS.fast_hits.swap(0, Ordering::SeqCst); + let cache = SNAPSHOT_METRICS.cache_hits.swap(0, Ordering::SeqCst); + let blocking = SNAPSHOT_METRICS.blocking_hits.swap(0, Ordering::SeqCst); + let refresh = SNAPSHOT_METRICS.refresh_scheduled.swap(0, Ordering::SeqCst); + + if fast == 0 && cache == 0 && blocking == 0 && refresh == 0 { + return; + } + + logging!( + debug, + Type::Cmd, + "[Profiles][Metrics] 5s window => fast={}, cache={}, blocking={}, refresh_jobs={}", + fast, + cache, + blocking, + refresh + ); +} + +fn current_millis() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_millis() as u64 +} + +/// Run the optional enhancement pipeline and refresh Clash when it completes. #[tauri::command] pub async fn enhance_profiles() -> CmdResult { match feat::enhance_profiles().await { @@ -93,79 +206,106 @@ pub async fn enhance_profiles() -> CmdResult { Ok(()) } -/// 导入配置文件 +/// Download a profile from the given URL and persist it to the local catalog. #[tauri::command] pub async fn import_profile(url: std::string::String, option: Option) -> CmdResult { - logging!(info, Type::Cmd, "[导入订阅] 开始导入: {}", url); + logging!(info, Type::Cmd, "[Profile Import] Begin: {}", url); - // 直接依赖 PrfItem::from_url 自身的超时/重试逻辑,不再使用 tokio::time::timeout 包裹 + // Rely on PrfItem::from_url internal timeout/retry logic instead of wrapping with tokio::time::timeout let item = match PrfItem::from_url(&url, None, None, option).await { Ok(it) => { - logging!(info, Type::Cmd, "[导入订阅] 下载完成,开始保存配置"); + logging!( + info, + Type::Cmd, + "[Profile Import] Download complete; saving configuration" + ); it } Err(e) => { - logging!(error, Type::Cmd, "[导入订阅] 下载失败: {}", e); - return Err(format!("导入订阅失败: {}", e).into()); + logging!(error, Type::Cmd, "[Profile Import] Download failed: {}", e); + return Err(format!("Profile import failed: {}", e).into()); } }; match profiles_append_item_safe(item.clone()).await { Ok(_) => match profiles_save_file_safe().await { Ok(_) => { - logging!(info, Type::Cmd, "[导入订阅] 配置文件保存成功"); + logging!( + info, + Type::Cmd, + "[Profile Import] Configuration file saved successfully" + ); } Err(e) => { - logging!(error, Type::Cmd, "[导入订阅] 保存配置文件失败: {}", e); + logging!( + error, + Type::Cmd, + "[Profile Import] Failed to save configuration file: {}", + e + ); } }, Err(e) => { - logging!(error, Type::Cmd, "[导入订阅] 保存配置失败: {}", e); - return Err(format!("导入订阅失败: {}", e).into()); + logging!( + error, + Type::Cmd, + "[Profile Import] Failed to persist configuration: {}", + e + ); + return Err(format!("Profile import failed: {}", e).into()); } } - // 立即发送配置变更通知 + // Immediately emit a configuration change notification if let Some(uid) = &item.uid { - logging!(info, Type::Cmd, "[导入订阅] 发送配置变更通知: {}", uid); + logging!( + info, + Type::Cmd, + "[Profile Import] Emitting configuration change event: {}", + uid + ); handle::Handle::notify_profile_changed(uid.clone()); } - // 异步保存配置文件并发送全局通知 + // Save configuration asynchronously and emit a global notification let uid_clone = item.uid.clone(); if let Some(uid) = uid_clone { - // 延迟发送,确保文件已完全写入 + // Delay notification to ensure the file is fully written tokio::time::sleep(Duration::from_millis(100)).await; handle::Handle::notify_profile_changed(uid); } - logging!(info, Type::Cmd, "[导入订阅] 导入完成: {}", url); + logging!(info, Type::Cmd, "[Profile Import] Completed: {}", url); Ok(()) } -/// 调整profile的顺序 +/// Move a profile in the list relative to another entry. #[tauri::command] pub async fn reorder_profile(active_id: String, over_id: String) -> CmdResult { match profiles_reorder_safe(active_id, over_id).await { Ok(_) => { - log::info!(target: "app", "重新排序配置文件"); + log::info!(target: "app", "Reordered profiles"); Ok(()) } Err(err) => { - log::error!(target: "app", "重新排序配置文件失败: {}", err); - Err(format!("重新排序配置文件失败: {}", err).into()) + log::error!(target: "app", "Failed to reorder profiles: {}", err); + Err(format!("Failed to reorder profiles: {}", err).into()) } } } -/// 创建新的profile -/// 创建一个新的配置文件 +/// Create a new profile entry and optionally write its backing file. #[tauri::command] pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResult { match profiles_append_item_with_filedata_safe(item.clone(), file_data).await { Ok(_) => { - // 发送配置变更通知 + // Emit configuration change notification if let Some(uid) = &item.uid { - logging!(info, Type::Cmd, "[创建订阅] 发送配置变更通知: {}", uid); + logging!( + info, + Type::Cmd, + "[Profile Create] Emitting configuration change event: {}", + uid + ); handle::Handle::notify_profile_changed(uid.clone()); } Ok(()) @@ -177,7 +317,7 @@ pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResu } } -/// 更新配置文件 +/// Force-refresh a profile from its remote source, if available. #[tauri::command] pub async fn update_profile(index: String, option: Option) -> CmdResult { match feat::update_profile(index, option, Some(true)).await { @@ -189,11 +329,11 @@ pub async fn update_profile(index: String, option: Option) -> CmdResu } } -/// 删除配置文件 +/// Remove a profile and refresh the running configuration if necessary. #[tauri::command] pub async fn delete_profile(index: String) -> CmdResult { println!("delete_profile: {}", index); - // 使用Send-safe helper函数 + // Use send-safe helper function let should_update = profiles_delete_item_safe(index.clone()) .await .stringify_err()?; @@ -203,8 +343,13 @@ pub async fn delete_profile(index: String) -> CmdResult { match CoreManager::global().update_config().await { Ok(_) => { handle::Handle::refresh_clash(); - // 发送配置变更通知 - logging!(info, Type::Cmd, "[删除订阅] 发送配置变更通知: {}", index); + // Emit configuration change notification + logging!( + info, + Type::Cmd, + "[Profile Delete] Emitting configuration change event: {}", + index + ); handle::Handle::notify_profile_changed(index); } Err(e) => { @@ -216,361 +361,28 @@ pub async fn delete_profile(index: String) -> CmdResult { Ok(()) } -/// 验证新配置文件的语法 -async fn validate_new_profile(new_profile: &String) -> Result<(), ()> { - logging!(info, Type::Cmd, "正在切换到新配置: {}", new_profile); - - // 获取目标配置文件路径 - let config_file_result = { - let profiles_config = Config::profiles().await; - let profiles_data = profiles_config.latest_ref(); - match profiles_data.get_item(new_profile) { - Ok(item) => { - if let Some(file) = &item.file { - let path = dirs::app_profiles_dir().map(|dir| dir.join(file.as_str())); - path.ok() - } else { - None - } - } - Err(e) => { - logging!(error, Type::Cmd, "获取目标配置信息失败: {}", e); - None - } - } - }; - - // 如果获取到文件路径,检查YAML语法 - if let Some(file_path) = config_file_result { - if !file_path.exists() { - logging!( - error, - Type::Cmd, - "目标配置文件不存在: {}", - file_path.display() - ); - handle::Handle::notice_message( - "config_validate::file_not_found", - format!("{}", file_path.display()), - ); - return Err(()); - } - - // 超时保护 - let file_read_result = tokio::time::timeout( - Duration::from_secs(5), - tokio::fs::read_to_string(&file_path), - ) - .await; - - match file_read_result { - Ok(Ok(content)) => { - let yaml_parse_result = AsyncHandler::spawn_blocking(move || { - serde_yaml_ng::from_str::(&content) - }) - .await; - - match yaml_parse_result { - Ok(Ok(_)) => { - logging!(info, Type::Cmd, "目标配置文件语法正确"); - Ok(()) - } - Ok(Err(err)) => { - let error_msg = format!(" {err}"); - logging!( - error, - Type::Cmd, - "目标配置文件存在YAML语法错误:{}", - error_msg - ); - handle::Handle::notice_message( - "config_validate::yaml_syntax_error", - error_msg.clone(), - ); - Err(()) - } - Err(join_err) => { - let error_msg = format!("YAML解析任务失败: {join_err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::yaml_parse_error", - error_msg.clone(), - ); - Err(()) - } - } - } - Ok(Err(err)) => { - let error_msg = format!("无法读取目标配置文件: {err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::file_read_error", - error_msg.clone(), - ); - Err(()) - } - Err(_) => { - let error_msg = "读取配置文件超时(5秒)".to_string(); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::file_read_timeout", - error_msg.clone(), - ); - Err(()) - } - } - } else { - Ok(()) - } -} - -/// 执行配置更新并处理结果 -async fn restore_previous_profile(prev_profile: String) -> CmdResult<()> { - logging!(info, Type::Cmd, "尝试恢复到之前的配置: {}", prev_profile); - let restore_profiles = IProfiles { - current: Some(prev_profile), - items: None, - }; - Config::profiles() - .await - .draft_mut() - .patch_config(restore_profiles) - .stringify_err()?; - Config::profiles().await.apply(); - crate::process::AsyncHandler::spawn(|| async move { - if let Err(e) = profiles_save_file_safe().await { - log::warn!(target: "app", "异步保存恢复配置文件失败: {e}"); - } - }); - logging!(info, Type::Cmd, "成功恢复到之前的配置"); - Ok(()) -} - -async fn handle_success(current_sequence: u64, current_value: Option) -> CmdResult { - let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); - if current_sequence < latest_sequence { - logging!( - info, - Type::Cmd, - "内核操作后发现更新的请求 (序列号: {} < {}),忽略当前结果", - current_sequence, - latest_sequence - ); - Config::profiles().await.discard(); - return Ok(false); - } - - logging!( - info, - Type::Cmd, - "配置更新成功,序列号: {}", - current_sequence - ); - Config::profiles().await.apply(); - handle::Handle::refresh_clash(); - - if let Err(e) = Tray::global().update_tooltip().await { - log::warn!(target: "app", "异步更新托盘提示失败: {e}"); - } - - if let Err(e) = Tray::global().update_menu().await { - log::warn!(target: "app", "异步更新托盘菜单失败: {e}"); - } - - if let Err(e) = profiles_save_file_safe().await { - log::warn!(target: "app", "异步保存配置文件失败: {e}"); - } - - if let Some(current) = ¤t_value { - logging!( - info, - Type::Cmd, - "向前端发送配置变更事件: {}, 序列号: {}", - current, - current_sequence - ); - handle::Handle::notify_profile_changed(current.clone()); - } - - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(true) -} - -async fn handle_validation_failure( - error_msg: String, - current_profile: Option, -) -> CmdResult { - logging!(warn, Type::Cmd, "配置验证失败: {}", error_msg); - Config::profiles().await.discard(); - if let Some(prev_profile) = current_profile { - restore_previous_profile(prev_profile).await?; - } - handle::Handle::notice_message("config_validate::error", error_msg); - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(false) -} - -async fn handle_update_error(e: E, current_sequence: u64) -> CmdResult { - logging!( - warn, - Type::Cmd, - "更新过程发生错误: {}, 序列号: {}", - e, - current_sequence - ); - Config::profiles().await.discard(); - handle::Handle::notice_message("config_validate::boot_error", e.to_string()); - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(false) -} - -async fn handle_timeout(current_profile: Option, current_sequence: u64) -> CmdResult { - let timeout_msg = "配置更新超时(30秒),可能是配置验证或核心通信阻塞"; - logging!( - error, - Type::Cmd, - "{}, 序列号: {}", - timeout_msg, - current_sequence - ); - Config::profiles().await.discard(); - if let Some(prev_profile) = current_profile { - restore_previous_profile(prev_profile).await?; - } - handle::Handle::notice_message("config_validate::timeout", timeout_msg); - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - Ok(false) -} - -async fn perform_config_update( - current_sequence: u64, - current_value: Option, - current_profile: Option, -) -> CmdResult { - logging!( - info, - Type::Cmd, - "开始内核配置更新,序列号: {}", - current_sequence - ); - let update_result = tokio::time::timeout( - Duration::from_secs(30), - CoreManager::global().update_config(), - ) - .await; - - match update_result { - Ok(Ok((true, _))) => handle_success(current_sequence, current_value).await, - Ok(Ok((false, error_msg))) => handle_validation_failure(error_msg, current_profile).await, - Ok(Err(e)) => handle_update_error(e, current_sequence).await, - Err(_) => handle_timeout(current_profile, current_sequence).await, - } -} - -/// 修改profiles的配置 +/// Apply partial profile list updates through the switching workflow. #[tauri::command] pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { - if CURRENT_SWITCHING_PROFILE.load(Ordering::SeqCst) { - logging!(info, Type::Cmd, "当前正在切换配置,放弃请求"); - return Ok(false); - } - CURRENT_SWITCHING_PROFILE.store(true, Ordering::SeqCst); - - // 为当前请求分配序列号 - let current_sequence = CURRENT_REQUEST_SEQUENCE.fetch_add(1, Ordering::SeqCst) + 1; - let target_profile = profiles.current.clone(); - - logging!( - info, - Type::Cmd, - "开始修改配置文件,请求序列号: {}, 目标profile: {:?}", - current_sequence, - target_profile - ); - - let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); - if current_sequence < latest_sequence { - logging!( - info, - Type::Cmd, - "获取锁后发现更新的请求 (序列号: {} < {}),放弃当前请求", - current_sequence, - latest_sequence - ); - return Ok(false); - } - - // 保存当前配置,以便在验证失败时恢复 - let current_profile = Config::profiles().await.latest_ref().current.clone(); - logging!(info, Type::Cmd, "当前配置: {:?}", current_profile); - - // 如果要切换配置,先检查目标配置文件是否有语法错误 - if let Some(new_profile) = profiles.current.as_ref() - && current_profile.as_ref() != Some(new_profile) - && validate_new_profile(new_profile).await.is_err() - { - CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); - return Ok(false); - } - - // 检查请求有效性 - let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); - if current_sequence < latest_sequence { - logging!( - info, - Type::Cmd, - "在核心操作前发现更新的请求 (序列号: {} < {}),放弃当前请求", - current_sequence, - latest_sequence - ); - return Ok(false); - } - - // 更新profiles配置 - logging!( - info, - Type::Cmd, - "正在更新配置草稿,序列号: {}", - current_sequence - ); - - let current_value = profiles.current.clone(); - - let _ = Config::profiles().await.draft_mut().patch_config(profiles); - - // 在调用内核前再次验证请求有效性 - let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); - if current_sequence < latest_sequence { - logging!( - info, - Type::Cmd, - "在内核交互前发现更新的请求 (序列号: {} < {}),放弃当前请求", - current_sequence, - latest_sequence - ); - Config::profiles().await.discard(); - return Ok(false); - } - - perform_config_update(current_sequence, current_value, current_profile).await + profile_switch::patch_profiles_config(profiles).await } -/// 根据profile name修改profiles +/// Switch to the provided profile index and wait for completion before returning. #[tauri::command] pub async fn patch_profiles_config_by_profile_index(profile_index: String) -> CmdResult { - logging!(info, Type::Cmd, "切换配置到: {}", profile_index); - - let profiles = IProfiles { - current: Some(profile_index), - items: None, - }; - patch_profiles_config(profiles).await + profile_switch::patch_profiles_config_by_profile_index(profile_index).await } -/// 修改某个profile item的 +/// Enqueue a profile switch request and optionally notify on success. +#[tauri::command] +pub async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult { + profile_switch::switch_profile(profile_index, notify_success).await +} + +/// Update a specific profile item and refresh timers if its schedule changed. #[tauri::command] pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { - // 保存修改前检查是否有更新 update_interval + // Check for update_interval changes before saving let profiles = Config::profiles().await; let should_refresh_timer = if let Ok(old_profile) = profiles.latest_ref().get_item(&index) { let old_interval = old_profile.option.as_ref().and_then(|o| o.update_interval); @@ -589,15 +401,19 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { .await .stringify_err()?; - // 如果更新间隔或允许自动更新变更,异步刷新定时器 + // If the interval or auto-update flag changes, refresh the timer asynchronously if should_refresh_timer { let index_clone = index.clone(); crate::process::AsyncHandler::spawn(move || async move { - logging!(info, Type::Timer, "定时器更新间隔已变更,正在刷新定时器..."); + logging!( + info, + Type::Timer, + "Timer interval changed; refreshing timer..." + ); if let Err(e) = crate::core::Timer::global().refresh().await { - logging!(error, Type::Timer, "刷新定时器失败: {}", e); + logging!(error, Type::Timer, "Failed to refresh timer: {}", e); } else { - // 刷新成功后发送自定义事件,不触发配置重载 + // After refreshing successfully, emit a custom event without triggering a reload crate::core::handle::Handle::notify_timer_updated(index_clone); } }); @@ -606,7 +422,7 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { Ok(()) } -/// 查看配置文件 +/// Open the profile file in the system viewer. #[tauri::command] pub async fn view_profile(index: String) -> CmdResult { let profiles = Config::profiles().await; @@ -628,7 +444,7 @@ pub async fn view_profile(index: String) -> CmdResult { help::open_file(path).stringify_err() } -/// 读取配置文件内容 +/// Return the raw YAML contents for the given profile file. #[tauri::command] pub async fn read_profile_file(index: String) -> CmdResult { let profiles = Config::profiles().await; @@ -638,10 +454,22 @@ pub async fn read_profile_file(index: String) -> CmdResult { Ok(data) } -/// 获取下一次更新时间 +/// Report the scheduled refresh timestamp (if any) for the profile timer. #[tauri::command] pub async fn get_next_update_time(uid: String) -> CmdResult> { let timer = Timer::global(); let next_time = timer.get_next_update_time(&uid).await; Ok(next_time) } + +/// Return the latest driver snapshot describing active and queued switch tasks. +#[tauri::command] +pub async fn get_profile_switch_status() -> CmdResult { + profile_switch::get_switch_status() +} + +/// Fetch switch result events newer than the provided sequence number. +#[tauri::command] +pub async fn get_profile_switch_events(after_sequence: u64) -> CmdResult> { + profile_switch::get_switch_events(after_sequence) +} diff --git a/src-tauri/src/cmd/profile_switch/driver.rs b/src-tauri/src/cmd/profile_switch/driver.rs new file mode 100644 index 000000000..8815458e8 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/driver.rs @@ -0,0 +1,683 @@ +use super::{ + CmdResult, + state::{ + ProfileSwitchStatus, SwitchCancellation, SwitchManager, SwitchRequest, SwitchResultStatus, + SwitchTaskStatus, current_millis, manager, + }, + workflow::{self, SwitchPanicInfo, SwitchStage}, +}; +use crate::{logging, utils::logging::Type}; +use futures::FutureExt; +use once_cell::sync::OnceCell; +use smartstring::alias::String as SmartString; +use std::{ + collections::{HashMap, VecDeque}, + panic::AssertUnwindSafe, + time::Duration, +}; +use tokio::{ + sync::{ + Mutex as AsyncMutex, + mpsc::{self, error::TrySendError}, + oneshot, + }, + time::{self, MissedTickBehavior}, +}; + +// Single shared queue so profile switches are executed sequentially and can +// collapse redundant requests for the same profile. +const SWITCH_QUEUE_CAPACITY: usize = 32; +static SWITCH_QUEUE: OnceCell> = OnceCell::new(); + +type CompletionRegistry = AsyncMutex>>; + +static SWITCH_COMPLETION_WAITERS: OnceCell = OnceCell::new(); + +/// Global map of task id -> completion channel sender used when callers await the result. +fn completion_waiters() -> &'static CompletionRegistry { + SWITCH_COMPLETION_WAITERS.get_or_init(|| AsyncMutex::new(HashMap::new())) +} + +/// Register a oneshot sender so `switch_profile_and_wait` can be notified when its task finishes. +async fn register_completion_waiter(task_id: u64) -> oneshot::Receiver { + let (sender, receiver) = oneshot::channel(); + let mut guard = completion_waiters().lock().await; + if guard.insert(task_id, sender).is_some() { + logging!( + warn, + Type::Cmd, + "Replacing existing completion waiter for task {}", + task_id + ); + } + receiver +} + +/// Remove an outstanding completion waiter; used when enqueue fails or succeeds immediately. +async fn remove_completion_waiter(task_id: u64) -> Option> { + completion_waiters().lock().await.remove(&task_id) +} + +/// Fire-and-forget notify helper so we do not block the driver loop. +fn notify_completion_waiter(task_id: u64, result: SwitchResultStatus) { + tokio::spawn(async move { + let sender = completion_waiters().lock().await.remove(&task_id); + if let Some(sender) = sender { + let _ = sender.send(result); + } + }); +} + +const WATCHDOG_TIMEOUT: Duration = Duration::from_secs(5); +const WATCHDOG_TICK: Duration = Duration::from_millis(500); + +// Mutable snapshot of the driver's world; all mutations happen on the driver task. +#[derive(Debug, Default)] +struct SwitchDriverState { + active: Option, + queue: VecDeque, + latest_tokens: HashMap, + cleanup_profiles: HashMap>, + last_result: Option, +} + +// Messages passed through SWITCH_QUEUE so the driver can react to events in order. +#[derive(Debug)] +enum SwitchDriverMessage { + Request { + request: SwitchRequest, + respond_to: oneshot::Sender, + }, + Completion { + request: SwitchRequest, + outcome: SwitchJobOutcome, + }, + CleanupDone { + profile: SmartString, + }, +} + +#[derive(Debug)] +enum SwitchJobOutcome { + Completed { + success: bool, + cleanup: workflow::CleanupHandle, + }, + Panicked { + info: SwitchPanicInfo, + cleanup: workflow::CleanupHandle, + }, +} + +pub(super) async fn switch_profile( + profile_index: impl Into, + notify_success: bool, +) -> CmdResult { + switch_profile_impl(profile_index.into(), notify_success, false).await +} + +pub(super) async fn switch_profile_and_wait( + profile_index: impl Into, + notify_success: bool, +) -> CmdResult { + switch_profile_impl(profile_index.into(), notify_success, true).await +} + +async fn switch_profile_impl( + profile_index: SmartString, + notify_success: bool, + wait_for_completion: bool, +) -> CmdResult { + // wait_for_completion is used by CLI flows that must block until the switch finishes. + let manager = manager(); + let sender = switch_driver_sender(); + + let request = SwitchRequest::new( + manager.next_task_id(), + profile_index.clone(), + notify_success, + ); + + logging!( + info, + Type::Cmd, + "Queue profile switch task {} -> {} (notify={})", + request.task_id(), + profile_index, + notify_success + ); + + let task_id = request.task_id(); + let mut completion_rx = if wait_for_completion { + Some(register_completion_waiter(task_id).await) + } else { + None + }; + + let (tx, rx) = oneshot::channel(); + + let enqueue_result = match sender.try_send(SwitchDriverMessage::Request { + request, + respond_to: tx, + }) { + Ok(_) => match rx.await { + Ok(result) => Ok(result), + Err(err) => { + logging!( + error, + Type::Cmd, + "Failed to receive enqueue result for profile {}: {}", + profile_index, + err + ); + Err("switch profile queue unavailable".into()) + } + }, + Err(TrySendError::Full(msg)) => { + logging!( + warn, + Type::Cmd, + "Profile switch queue is full; waiting for space: {}", + profile_index + ); + match sender.send(msg).await { + Ok(_) => match rx.await { + Ok(result) => Ok(result), + Err(err) => { + logging!( + error, + Type::Cmd, + "Failed to receive enqueue result after wait for {}: {}", + profile_index, + err + ); + Err("switch profile queue unavailable".into()) + } + }, + Err(err) => { + logging!( + error, + Type::Cmd, + "Profile switch queue closed while waiting ({}): {}", + profile_index, + err + ); + Err("switch profile queue unavailable".into()) + } + } + } + Err(TrySendError::Closed(_)) => { + logging!( + error, + Type::Cmd, + "Profile switch queue is closed, cannot enqueue: {}", + profile_index + ); + Err("switch profile queue unavailable".into()) + } + }; + + let accepted = match enqueue_result { + Ok(result) => result, + Err(err) => { + if completion_rx.is_some() { + remove_completion_waiter(task_id).await; + } + return Err(err); + } + }; + + if !accepted { + if completion_rx.is_some() { + remove_completion_waiter(task_id).await; + } + return Ok(false); + } + + if let Some(rx_completion) = completion_rx.take() { + match rx_completion.await { + Ok(status) => Ok(status.success), + Err(err) => { + logging!( + error, + Type::Cmd, + "Switch task {} completion channel dropped: {}", + task_id, + err + ); + Err("profile switch completion unavailable".into()) + } + } + } else { + Ok(true) + } +} + +fn switch_driver_sender() -> &'static mpsc::Sender { + SWITCH_QUEUE.get_or_init(|| { + let (tx, rx) = mpsc::channel::(SWITCH_QUEUE_CAPACITY); + let driver_tx = tx.clone(); + tokio::spawn(async move { + let manager = manager(); + let driver = SwitchDriver::new(manager, driver_tx); + driver.run(rx).await; + }); + tx + }) +} + +struct SwitchDriver { + manager: &'static SwitchManager, + sender: mpsc::Sender, + state: SwitchDriverState, +} + +impl SwitchDriver { + fn new(manager: &'static SwitchManager, sender: mpsc::Sender) -> Self { + let state = SwitchDriverState::default(); + manager.set_status(state.snapshot(manager)); + Self { + manager, + sender, + state, + } + } + + async fn run(mut self, mut rx: mpsc::Receiver) { + while let Some(message) = rx.recv().await { + match message { + SwitchDriverMessage::Request { + request, + respond_to, + } => { + self.handle_enqueue(request, respond_to); + } + SwitchDriverMessage::Completion { request, outcome } => { + self.handle_completion(request, outcome); + } + SwitchDriverMessage::CleanupDone { profile } => { + self.handle_cleanup_done(profile); + } + } + } + } + + fn handle_enqueue(&mut self, request: SwitchRequest, respond_to: oneshot::Sender) { + // Each new request supersedes older ones for the same profile to avoid thrashing the core. + let mut responder = Some(respond_to); + let accepted = true; + let profile_key = request.profile_id().clone(); + let cleanup_pending = + self.state.active.is_none() && !self.state.cleanup_profiles.is_empty(); + + if cleanup_pending && self.state.cleanup_profiles.contains_key(&profile_key) { + logging!( + debug, + Type::Cmd, + "Cleanup running for {}; queueing switch task {} -> {} to run afterwards", + profile_key, + request.task_id(), + profile_key + ); + if let Some(previous) = self + .state + .latest_tokens + .insert(profile_key.clone(), request.cancel_token().clone()) + { + previous.cancel(); + } + self.state + .queue + .retain(|queued| queued.profile_id() != &profile_key); + self.state.queue.push_back(request); + if let Some(sender) = responder.take() { + let _ = sender.send(accepted); + } + self.publish_status(); + return; + } + + if cleanup_pending { + logging!( + debug, + Type::Cmd, + "Cleanup running for {} profile(s); queueing task {} -> {} to run after cleanup without clearing existing requests", + self.state.cleanup_profiles.len(), + request.task_id(), + profile_key + ); + } + + if let Some(previous) = self + .state + .latest_tokens + .insert(profile_key.clone(), request.cancel_token().clone()) + { + previous.cancel(); + } + + if let Some(active) = self.state.active.as_mut() + && active.profile_id() == &profile_key + { + active.cancel_token().cancel(); + active.merge_notify(request.notify()); + self.state + .queue + .retain(|queued| queued.profile_id() != &profile_key); + self.state.queue.push_front(request.clone()); + if let Some(sender) = responder.take() { + let _ = sender.send(accepted); + } + self.publish_status(); + return; + } + + if let Some(active) = self.state.active.as_ref() { + logging!( + debug, + Type::Cmd, + "Cancelling active switch task {} (profile={}) in favour of task {} -> {}", + active.task_id(), + active.profile_id(), + request.task_id(), + profile_key + ); + active.cancel_token().cancel(); + } + + self.state + .queue + .retain(|queued| queued.profile_id() != &profile_key); + + self.state.queue.push_back(request.clone()); + if let Some(sender) = responder.take() { + let _ = sender.send(accepted); + } + + self.start_next_job(); + self.publish_status(); + } + + fn handle_completion(&mut self, request: SwitchRequest, outcome: SwitchJobOutcome) { + // Translate the workflow result into an event the frontend can understand. + let result_record = match &outcome { + SwitchJobOutcome::Completed { success, .. } => { + logging!( + info, + Type::Cmd, + "Switch task {} completed (success={})", + request.task_id(), + success + ); + if *success { + SwitchResultStatus::success(request.task_id(), request.profile_id()) + } else { + SwitchResultStatus::failed(request.task_id(), request.profile_id(), None, None) + } + } + SwitchJobOutcome::Panicked { info, .. } => { + logging!( + error, + Type::Cmd, + "Switch task {} panicked at stage {:?}: {}", + request.task_id(), + info.stage, + info.detail + ); + SwitchResultStatus::failed( + request.task_id(), + request.profile_id(), + Some(format!("{:?}", info.stage)), + Some(info.detail.clone()), + ) + } + }; + + if let Some(active) = self.state.active.as_ref() + && active.task_id() == request.task_id() + { + self.state.active = None; + } + + if let Some(latest) = self.state.latest_tokens.get(request.profile_id()) + && latest.same_token(request.cancel_token()) + { + self.state.latest_tokens.remove(request.profile_id()); + } + + let cleanup = match outcome { + SwitchJobOutcome::Completed { cleanup, .. } => cleanup, + SwitchJobOutcome::Panicked { cleanup, .. } => cleanup, + }; + + self.track_cleanup(request.profile_id().clone(), cleanup); + + let event_record = result_record.clone(); + self.state.last_result = Some(result_record); + notify_completion_waiter(request.task_id(), event_record.clone()); + self.manager.push_event(event_record); + self.start_next_job(); + self.publish_status(); + } + + fn handle_cleanup_done(&mut self, profile: SmartString) { + if let Some(handle) = self.state.cleanup_profiles.remove(&profile) { + handle.abort(); + } + self.start_next_job(); + self.publish_status(); + } + + fn start_next_job(&mut self) { + if self.state.active.is_some() || !self.state.cleanup_profiles.is_empty() { + self.publish_status(); + return; + } + + while let Some(request) = self.state.queue.pop_front() { + if request.cancel_token().is_cancelled() { + self.discard_request(request); + continue; + } + + self.state.active = Some(request.clone()); + self.start_switch_job(request); + break; + } + + self.publish_status(); + } + + fn track_cleanup(&mut self, profile: SmartString, cleanup: workflow::CleanupHandle) { + if let Some(existing) = self.state.cleanup_profiles.remove(&profile) { + existing.abort(); + } + + let driver_tx = self.sender.clone(); + let profile_clone = profile.clone(); + let handle = tokio::spawn(async move { + let profile_label = profile_clone.clone(); + if let Err(err) = cleanup.await { + logging!( + warn, + Type::Cmd, + "Cleanup task for profile {} failed: {}", + profile_label.as_str(), + err + ); + } + if let Err(err) = driver_tx + .send(SwitchDriverMessage::CleanupDone { + profile: profile_clone, + }) + .await + { + logging!( + error, + Type::Cmd, + "Failed to push cleanup completion for profile {}: {}", + profile_label.as_str(), + err + ); + } + }); + self.state.cleanup_profiles.insert(profile, handle); + } + + fn start_switch_job(&self, request: SwitchRequest) { + // Run the workflow in a background task while the driver keeps processing messages. + let driver_tx = self.sender.clone(); + let manager = self.manager; + + let completion_request = request.clone(); + let heartbeat = request.heartbeat().clone(); + let cancel_token = request.cancel_token().clone(); + let task_id = request.task_id(); + let profile_label = request.profile_id().clone(); + + tokio::spawn(async move { + let mut watchdog_interval = time::interval(WATCHDOG_TICK); + watchdog_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + let workflow_fut = + AssertUnwindSafe(workflow::run_switch_job(manager, request)).catch_unwind(); + tokio::pin!(workflow_fut); + + let job_result = loop { + tokio::select! { + res = workflow_fut.as_mut() => { + break match res { + Ok(Ok(result)) => SwitchJobOutcome::Completed { + success: result.success, + cleanup: result.cleanup, + }, + Ok(Err(error)) => SwitchJobOutcome::Panicked { + info: error.info, + cleanup: error.cleanup, + }, + Err(payload) => { + let info = SwitchPanicInfo::driver_task( + workflow::describe_panic_payload(payload.as_ref()), + ); + let cleanup = workflow::schedule_post_switch_failure( + profile_label.clone(), + completion_request.notify(), + completion_request.task_id(), + ); + SwitchJobOutcome::Panicked { info, cleanup } + } + }; + } + _ = watchdog_interval.tick() => { + if cancel_token.is_cancelled() { + continue; + } + let elapsed = heartbeat.elapsed(); + if elapsed > WATCHDOG_TIMEOUT { + let stage = SwitchStage::from_code(heartbeat.stage_code()) + .unwrap_or(SwitchStage::Workflow); + logging!( + warn, + Type::Cmd, + "Switch task {} watchdog timeout (profile={} stage={:?}, elapsed={:?}); cancelling", + task_id, + profile_label.as_str(), + stage, + elapsed + ); + cancel_token.cancel(); + } + } + } + }; + + let request_for_error = completion_request.clone(); + + if let Err(err) = driver_tx + .send(SwitchDriverMessage::Completion { + request: completion_request, + outcome: job_result, + }) + .await + { + logging!( + error, + Type::Cmd, + "Failed to push switch completion to driver: {}", + err + ); + notify_completion_waiter( + request_for_error.task_id(), + SwitchResultStatus::failed( + request_for_error.task_id(), + request_for_error.profile_id(), + Some("driver".to_string()), + Some(format!("completion dispatch failed: {}", err)), + ), + ); + } + }); + } + + /// Mark a request as failed because a newer request superseded it. + fn discard_request(&mut self, request: SwitchRequest) { + let key = request.profile_id().clone(); + let should_remove = self + .state + .latest_tokens + .get(&key) + .map(|latest| latest.same_token(request.cancel_token())) + .unwrap_or(false); + + if should_remove { + self.state.latest_tokens.remove(&key); + } + + if !request.cancel_token().is_cancelled() { + request.cancel_token().cancel(); + } + + let event = SwitchResultStatus::cancelled( + request.task_id(), + request.profile_id(), + Some("request superseded".to_string()), + ); + + self.state.last_result = Some(event.clone()); + notify_completion_waiter(request.task_id(), event.clone()); + self.manager.push_event(event); + } + + fn publish_status(&self) { + self.manager.set_status(self.state.snapshot(self.manager)); + } +} + +impl SwitchDriverState { + /// Lightweight struct suitable for sharing across the command boundary. + fn snapshot(&self, manager: &SwitchManager) -> ProfileSwitchStatus { + let active = self + .active + .as_ref() + .map(|req| SwitchTaskStatus::from_request(req, false)); + let queue = self + .queue + .iter() + .map(|req| SwitchTaskStatus::from_request(req, true)) + .collect::>(); + let cleanup_profiles = self + .cleanup_profiles + .keys() + .map(|key| key.to_string()) + .collect::>(); + + ProfileSwitchStatus { + is_switching: manager.is_switching(), + active, + queue, + cleanup_profiles, + last_result: self.last_result.clone(), + last_updated: current_millis(), + } + } +} diff --git a/src-tauri/src/cmd/profile_switch/mod.rs b/src-tauri/src/cmd/profile_switch/mod.rs new file mode 100644 index 000000000..0729c68d1 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/mod.rs @@ -0,0 +1,34 @@ +// Profile switch orchestration: plumbing between the public tauri commands, +// the async driver queue, validation helpers, and the state machine workflow. +mod driver; +mod state; +mod validation; +mod workflow; + +pub use state::{ProfileSwitchStatus, SwitchResultEvent}; + +use smartstring::alias::String; + +use super::CmdResult; + +pub(super) async fn patch_profiles_config(profiles: crate::config::IProfiles) -> CmdResult { + workflow::patch_profiles_config(profiles).await +} + +pub(super) async fn patch_profiles_config_by_profile_index( + profile_index: String, +) -> CmdResult { + driver::switch_profile_and_wait(profile_index, false).await +} + +pub(super) async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult { + driver::switch_profile(profile_index, notify_success).await +} + +pub(super) fn get_switch_status() -> CmdResult { + Ok(state::manager().status_snapshot()) +} + +pub(super) fn get_switch_events(after_sequence: u64) -> CmdResult> { + Ok(state::manager().events_after(after_sequence)) +} diff --git a/src-tauri/src/cmd/profile_switch/state.rs b/src-tauri/src/cmd/profile_switch/state.rs new file mode 100644 index 000000000..1bb52d6b2 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/state.rs @@ -0,0 +1,353 @@ +use once_cell::sync::OnceCell; +use parking_lot::RwLock; +use serde::Serialize; +use smartstring::alias::String as SmartString; +use std::collections::VecDeque; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use tokio::sync::{Mutex, Notify}; + +pub(super) const SWITCH_JOB_TIMEOUT: Duration = Duration::from_secs(30); +pub(super) const SWITCH_CLEANUP_TIMEOUT: Duration = Duration::from_secs(5); + +static SWITCH_MANAGER: OnceCell = OnceCell::new(); + +pub(super) fn manager() -> &'static SwitchManager { + SWITCH_MANAGER.get_or_init(SwitchManager::default) +} + +#[derive(Debug)] +// Central coordination point shared between the driver and workflow state machine. +pub(super) struct SwitchManager { + core_mutex: Mutex<()>, + request_sequence: AtomicU64, + switching: AtomicBool, + task_sequence: AtomicU64, + status: RwLock, + event_sequence: AtomicU64, + recent_events: RwLock>, +} + +impl Default for SwitchManager { + fn default() -> Self { + Self { + core_mutex: Mutex::new(()), + request_sequence: AtomicU64::new(0), + switching: AtomicBool::new(false), + task_sequence: AtomicU64::new(0), + status: RwLock::new(ProfileSwitchStatus::default()), + event_sequence: AtomicU64::new(0), + recent_events: RwLock::new(VecDeque::with_capacity(32)), + } + } +} + +impl SwitchManager { + pub(super) fn core_mutex(&self) -> &Mutex<()> { + &self.core_mutex + } + + // Monotonic identifiers so logs can correlate enqueue/finish pairs. + pub(super) fn next_task_id(&self) -> u64 { + self.task_sequence.fetch_add(1, Ordering::SeqCst) + 1 + } + + /// Sequence id assigned to each enqueue request so we can spot stale work. + pub(super) fn next_request_sequence(&self) -> u64 { + self.request_sequence.fetch_add(1, Ordering::SeqCst) + 1 + } + + pub(super) fn latest_request_sequence(&self) -> u64 { + self.request_sequence.load(Ordering::SeqCst) + } + + pub(super) fn begin_switch(&'static self) -> SwitchScope<'static> { + self.switching.store(true, Ordering::SeqCst); + SwitchScope { manager: self } + } + + pub(super) fn is_switching(&self) -> bool { + self.switching.load(Ordering::SeqCst) + } + + pub(super) fn set_status(&self, status: ProfileSwitchStatus) { + *self.status.write() = status; + } + + pub(super) fn status_snapshot(&self) -> ProfileSwitchStatus { + self.status.read().clone() + } + pub(super) fn push_event(&self, result: SwitchResultStatus) { + const MAX_EVENTS: usize = 64; + let sequence = self.event_sequence.fetch_add(1, Ordering::SeqCst) + 1; + let mut guard = self.recent_events.write(); + if guard.len() == MAX_EVENTS { + guard.pop_front(); + } + guard.push_back(SwitchResultEvent { sequence, result }); + } + + pub(super) fn events_after(&self, sequence: u64) -> Vec { + self.recent_events + .read() + .iter() + .filter(|event| event.sequence > sequence) + .cloned() + .collect() + } +} + +pub(super) struct SwitchScope<'a> { + manager: &'a SwitchManager, +} + +impl Drop for SwitchScope<'_> { + fn drop(&mut self) { + self.manager.switching.store(false, Ordering::SeqCst); + } +} + +#[derive(Debug, Clone)] +pub(super) struct SwitchCancellation { + flag: Arc, + notify: Arc, +} + +impl SwitchCancellation { + pub(super) fn new() -> Self { + Self { + flag: Arc::new(AtomicBool::new(false)), + notify: Arc::new(Notify::new()), + } + } + + pub(super) fn cancel(&self) { + self.flag.store(true, Ordering::SeqCst); + self.notify.notify_waiters(); + } + + /// True if another request already cancelled this job. + pub(super) fn is_cancelled(&self) -> bool { + self.flag.load(Ordering::SeqCst) + } + + pub(super) fn same_token(&self, other: &SwitchCancellation) -> bool { + Arc::ptr_eq(&self.flag, &other.flag) + } + + pub(super) async fn cancelled_future(&self) { + // Used by async blocks that want to pause until a newer request pre-empts them. + if self.is_cancelled() { + return; + } + self.notify.notified().await; + } +} + +#[derive(Debug, Clone)] +pub(super) struct SwitchRequest { + task_id: u64, + profile_id: SmartString, + notify: bool, + cancel_token: SwitchCancellation, + heartbeat: SwitchHeartbeat, +} + +impl SwitchRequest { + pub(super) fn new(task_id: u64, profile_id: SmartString, notify: bool) -> Self { + Self { + task_id, + profile_id, + notify, + cancel_token: SwitchCancellation::new(), + heartbeat: SwitchHeartbeat::new(), + } + } + + pub(super) fn task_id(&self) -> u64 { + self.task_id + } + + pub(super) fn profile_id(&self) -> &SmartString { + &self.profile_id + } + + pub(super) fn notify(&self) -> bool { + self.notify + } + + pub(super) fn merge_notify(&mut self, notify: bool) { + // When a new request wants a toast, remember it even if an older request did not. + if notify { + self.notify = true; + } + } + + pub(super) fn cancel_token(&self) -> &SwitchCancellation { + &self.cancel_token + } + + pub(super) fn heartbeat(&self) -> &SwitchHeartbeat { + &self.heartbeat + } +} + +#[derive(Debug, Clone)] +pub(super) struct SwitchHeartbeat { + last_tick_millis: Arc, + stage_code: Arc, +} + +fn now_millis() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_millis() as u64 +} + +#[derive(Debug, Clone, Serialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct ProfileSwitchStatus { + pub is_switching: bool, + pub active: Option, + pub queue: Vec, + pub cleanup_profiles: Vec, + pub last_result: Option, + pub last_updated: u64, +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SwitchTaskStatus { + pub task_id: u64, + pub profile_id: String, + pub notify: bool, + pub stage: Option, + pub queued: bool, +} + +impl SwitchTaskStatus { + pub(super) fn from_request(request: &SwitchRequest, queued: bool) -> Self { + Self { + task_id: request.task_id(), + profile_id: request.profile_id().to_string(), + notify: request.notify(), + stage: if queued { + None + } else { + Some(request.heartbeat().stage_code()) + }, + queued, + } + } +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SwitchResultStatus { + pub task_id: u64, + pub profile_id: String, + pub success: bool, + pub cancelled: bool, + pub finished_at: u64, + pub error_stage: Option, + pub error_detail: Option, +} + +impl SwitchResultStatus { + pub(super) fn success(task_id: u64, profile_id: &SmartString) -> Self { + Self { + task_id, + profile_id: profile_id.to_string(), + success: true, + cancelled: false, + finished_at: now_millis(), + error_stage: None, + error_detail: None, + } + } + + pub(super) fn failed( + task_id: u64, + profile_id: &SmartString, + stage: Option, + detail: Option, + ) -> Self { + Self { + task_id, + profile_id: profile_id.to_string(), + success: false, + cancelled: false, + finished_at: now_millis(), + error_stage: stage, + error_detail: detail, + } + } + + pub(super) fn cancelled( + task_id: u64, + profile_id: &SmartString, + detail: Option, + ) -> Self { + Self { + task_id, + profile_id: profile_id.to_string(), + success: false, + cancelled: true, + finished_at: now_millis(), + error_stage: Some("cancelled".to_string()), + error_detail: detail, + } + } +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SwitchResultEvent { + pub sequence: u64, + pub result: SwitchResultStatus, +} + +pub(super) fn current_millis() -> u64 { + now_millis() +} + +impl SwitchHeartbeat { + fn now_millis() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_millis() as u64 + } + + pub(super) fn new() -> Self { + let heartbeat = Self { + last_tick_millis: Arc::new(AtomicU64::new(Self::now_millis())), + stage_code: Arc::new(AtomicU32::new(0)), + }; + heartbeat.touch(); + heartbeat + } + + pub(super) fn touch(&self) { + self.last_tick_millis + .store(Self::now_millis(), Ordering::SeqCst); + } + + /// Update the internal timer to reflect the amount of time since the last heartbeat. + pub(super) fn elapsed(&self) -> Duration { + let last = self.last_tick_millis.load(Ordering::SeqCst); + let now = Self::now_millis(); + Duration::from_millis(now.saturating_sub(last)) + } + + pub(super) fn set_stage(&self, stage: u32) { + self.stage_code.store(stage, Ordering::SeqCst); + self.touch(); + } + + pub(super) fn stage_code(&self) -> u32 { + self.stage_code.load(Ordering::SeqCst) + } +} diff --git a/src-tauri/src/cmd/profile_switch/validation.rs b/src-tauri/src/cmd/profile_switch/validation.rs new file mode 100644 index 000000000..b15806cfa --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/validation.rs @@ -0,0 +1,113 @@ +use crate::{ + config::Config, + logging, + process::AsyncHandler, + utils::{dirs, logging::Type}, +}; +use serde_yaml_ng as serde_yaml; +use smartstring::alias::String; +use std::time::Duration; +use tokio::{fs as tokio_fs, time}; + +const YAML_READ_TIMEOUT: Duration = Duration::from_secs(5); + +/// Verify that the requested profile exists locally and is well-formed before switching. +pub(super) async fn validate_switch_request(task_id: u64, profile_id: &str) -> Result<(), String> { + logging!( + info, + Type::Cmd, + "Validating profile switch task {} -> {}", + task_id, + profile_id + ); + + let profile_key: String = profile_id.into(); + let (file_path, profile_type, is_current, remote_url) = { + let profiles_guard = Config::profiles().await; + let latest = profiles_guard.latest_ref(); + let item = latest.get_item(&profile_key).map_err(|err| -> String { + format!("Target profile {} not found: {}", profile_id, err).into() + })?; + ( + item.file.clone().map(|f| f.to_string()), + item.itype.clone().map(|t| t.to_string()), + latest + .current + .as_ref() + .map(|current| current.as_str() == profile_id) + .unwrap_or(false), + item.url.clone().map(|u| u.to_string()), + ) + }; + + if is_current { + logging!( + info, + Type::Cmd, + "Switch task {} is targeting the current profile {}; skipping validation", + task_id, + profile_id + ); + return Ok(()); + } + + if matches!(profile_type.as_deref(), Some("remote")) { + // Remote profiles must retain a URL so the subsequent refresh job knows where to download. + let has_url = remote_url.as_ref().map(|u| !u.is_empty()).unwrap_or(false); + if !has_url { + return Err({ + let msg = format!("Remote profile {} is missing a download URL", profile_id); + msg.into() + }); + } + } + + if let Some(file) = file_path { + let profiles_dir = dirs::app_profiles_dir().map_err(|err| -> String { + format!("Failed to resolve profiles directory: {}", err).into() + })?; + let path = profiles_dir.join(&file); + + let contents = match time::timeout(YAML_READ_TIMEOUT, tokio_fs::read_to_string(&path)).await + { + Ok(Ok(contents)) => contents, + Ok(Err(err)) => { + return Err( + format!("Failed to read profile file {}: {}", path.display(), err).into(), + ); + } + Err(_) => { + return Err(format!( + "Timed out reading profile file {} after {:?}", + path.display(), + YAML_READ_TIMEOUT + ) + .into()); + } + }; + + let parse_result = AsyncHandler::spawn_blocking(move || { + serde_yaml::from_str::(&contents) + }) + .await; + + match parse_result { + Ok(Ok(_)) => {} + Ok(Err(err)) => { + return Err( + format!("Profile YAML parse failed for {}: {}", path.display(), err).into(), + ); + } + Err(join_err) => { + return Err(format!( + "Profile YAML parse task panicked for {}: {}", + path.display(), + join_err + ) + .into()); + } + } + } + + Ok(()) +} diff --git a/src-tauri/src/cmd/profile_switch/workflow.rs b/src-tauri/src/cmd/profile_switch/workflow.rs new file mode 100644 index 000000000..27d162696 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow.rs @@ -0,0 +1,385 @@ +use super::{ + CmdResult, + state::{SWITCH_JOB_TIMEOUT, SwitchManager, SwitchRequest, manager}, + validation::validate_switch_request, +}; +use crate::cmd::StringifyErr; +use crate::{ + config::{Config, IProfiles, profiles::profiles_save_file_safe}, + core::handle, + logging, + process::AsyncHandler, + utils::{dirs, logging::Type}, +}; +use futures::FutureExt; +use serde_yaml_ng as serde_yaml; +use smartstring::alias::String as SmartString; +use std::{any::Any, panic::AssertUnwindSafe, time::Duration}; +use tokio::{fs as tokio_fs, time}; + +mod cleanup; +mod state_machine; +pub(super) use cleanup::{ + CleanupHandle, schedule_post_switch_failure, schedule_post_switch_success, +}; + +use state_machine::{CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchStateMachine}; +pub(super) use state_machine::{SwitchPanicInfo, SwitchStage}; + +pub(super) struct SwitchWorkflowResult { + pub success: bool, + pub cleanup: CleanupHandle, +} + +pub(super) struct SwitchWorkflowError { + pub info: SwitchPanicInfo, + pub cleanup: CleanupHandle, +} + +pub(super) async fn run_switch_job( + manager: &'static SwitchManager, + request: SwitchRequest, +) -> Result { + // Short-circuit cancelled jobs before we allocate resources or emit events. + if request.cancel_token().is_cancelled() { + logging!( + info, + Type::Cmd, + "Switch task {} cancelled before validation", + request.task_id() + ); + let cleanup = schedule_post_switch_failure( + request.profile_id().clone(), + request.notify(), + request.task_id(), + ); + return Ok(SwitchWorkflowResult { + success: false, + cleanup, + }); + } + + let profile_id = request.profile_id().clone(); + let task_id = request.task_id(); + let notify = request.notify(); + + if let Err(err) = validate_switch_request(task_id, profile_id.as_str()).await { + logging!( + warn, + Type::Cmd, + "Validation failed for switch task {} -> {}: {}", + task_id, + profile_id, + err + ); + handle::Handle::notice_message("config_validate::error", err.clone()); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + return Ok(SwitchWorkflowResult { + success: false, + cleanup, + }); + } + + logging!( + info, + Type::Cmd, + "Starting switch task {} for profile {} (notify={})", + task_id, + profile_id, + notify + ); + + let pipeline_request = request; + // The state machine owns the heavy lifting. We wrap it with timeout/panic guards so the driver never hangs. + let pipeline = async move { + let target_profile = pipeline_request.profile_id().clone(); + SwitchStateMachine::new( + manager, + Some(pipeline_request), + IProfiles { + current: Some(target_profile), + items: None, + }, + ) + .run() + .await + }; + + match time::timeout( + SWITCH_JOB_TIMEOUT, + AssertUnwindSafe(pipeline).catch_unwind(), + ) + .await + { + Err(_) => { + logging!( + error, + Type::Cmd, + "Profile switch task {} timed out after {:?}", + task_id, + SWITCH_JOB_TIMEOUT + ); + handle::Handle::notice_message( + "config_validate::error", + format!("profile switch timed out: {}", profile_id), + ); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + Ok(SwitchWorkflowResult { + success: false, + cleanup, + }) + } + Ok(Err(panic_payload)) => { + let panic_message = describe_panic_payload(panic_payload.as_ref()); + logging!( + error, + Type::Cmd, + "Panic captured during profile switch task {} ({}): {}", + task_id, + profile_id, + panic_message + ); + handle::Handle::notice_message( + "config_validate::panic", + format!("profile switch panic: {}", profile_id), + ); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + Err(SwitchWorkflowError { + info: SwitchPanicInfo::workflow_root(panic_message), + cleanup, + }) + } + Ok(Ok(machine_result)) => match machine_result { + Ok(cmd_result) => match cmd_result { + Ok(success) => { + let cleanup = + schedule_post_switch_success(profile_id.clone(), success, notify, task_id); + Ok(SwitchWorkflowResult { success, cleanup }) + } + Err(err) => { + logging!( + error, + Type::Cmd, + "Profile switch failed ({}): {}", + profile_id, + err + ); + handle::Handle::notice_message("config_validate::error", err.clone()); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + Ok(SwitchWorkflowResult { + success: false, + cleanup, + }) + } + }, + Err(panic_info) => { + logging!( + error, + Type::Cmd, + "State machine panic during profile switch task {} ({} {:?}): {}", + task_id, + profile_id, + panic_info.stage, + panic_info.detail + ); + handle::Handle::notice_message( + "config_validate::panic", + format!("profile switch panic: {}", profile_id), + ); + let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); + Err(SwitchWorkflowError { + info: panic_info, + cleanup, + }) + } + }, + } +} + +/// Allow patch operations (no driver request) to use the same state machine pipeline. +pub(super) async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { + match SwitchStateMachine::new(manager(), None, profiles) + .run() + .await + { + Ok(result) => result, + Err(panic_info) => Err(format!( + "profile switch panic ({:?}): {}", + panic_info.stage, panic_info.detail + ) + .into()), + } +} + +/// Parse the target profile YAML on a background thread to catch syntax errors early. +pub(super) async fn validate_profile_yaml(profile: &SmartString) -> CmdResult { + let file_path = { + let profiles_guard = Config::profiles().await; + let profiles_data = profiles_guard.latest_ref(); + match profiles_data.get_item(profile) { + Ok(item) => item.file.as_ref().and_then(|file| { + dirs::app_profiles_dir() + .ok() + .map(|dir| dir.join(file.as_str())) + }), + Err(e) => { + logging!( + error, + Type::Cmd, + "Failed to load target profile metadata: {}", + e + ); + return Ok(false); + } + } + }; + + let Some(path) = file_path else { + return Ok(true); + }; + + if !path.exists() { + logging!( + error, + Type::Cmd, + "Target profile file does not exist: {}", + path.display() + ); + handle::Handle::notice_message( + "config_validate::file_not_found", + format!("{}", path.display()), + ); + return Ok(false); + } + + let file_read_result = + time::timeout(Duration::from_secs(5), tokio_fs::read_to_string(&path)).await; + + match file_read_result { + Ok(Ok(content)) => { + let yaml_parse_result = AsyncHandler::spawn_blocking(move || { + serde_yaml::from_str::(&content) + }) + .await; + + match yaml_parse_result { + Ok(Ok(_)) => { + logging!(info, Type::Cmd, "Target profile YAML syntax is valid"); + Ok(true) + } + Ok(Err(err)) => { + let error_msg = format!(" {err}"); + logging!( + error, + Type::Cmd, + "Target profile contains YAML syntax errors: {}", + error_msg + ); + handle::Handle::notice_message( + "config_validate::yaml_syntax_error", + error_msg.clone(), + ); + Ok(false) + } + Err(join_err) => { + let error_msg = format!("YAML parsing task failed: {join_err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::yaml_parse_error", + error_msg.clone(), + ); + Ok(false) + } + } + } + Ok(Err(err)) => { + let error_msg = format!("Failed to read target profile file: {err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message("config_validate::file_read_error", error_msg.clone()); + Ok(false) + } + Err(_) => { + let error_msg = "Timed out reading profile file (5s)".to_string(); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message("config_validate::file_read_timeout", error_msg.clone()); + Err(error_msg.into()) + } + } +} + +/// Best-effort rollback invoked when a switch fails midway through the pipeline. +pub(super) async fn restore_previous_profile(previous: Option) -> CmdResult<()> { + if let Some(prev_profile) = previous { + logging!( + info, + Type::Cmd, + "Attempting to restore previous configuration: {}", + prev_profile + ); + let restore_profiles = IProfiles { + current: Some(prev_profile), + items: None, + }; + Config::profiles() + .await + .draft_mut() + .patch_config(restore_profiles) + .stringify_err()?; + if time::timeout(CONFIG_APPLY_TIMEOUT, async { + Config::profiles().await.apply(); + }) + .await + .is_err() + { + logging!( + warn, + Type::Cmd, + "Restoring previous configuration timed out after {:?}", + CONFIG_APPLY_TIMEOUT + ); + return Ok(()); + } + + AsyncHandler::spawn(|| async move { + let save_future = AsyncHandler::spawn_blocking(|| { + futures::executor::block_on(async { profiles_save_file_safe().await }) + }); + match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await { + Ok(join_res) => match join_res { + Ok(Ok(())) => {} + Ok(Err(err)) => { + logging!( + warn, + Type::Cmd, + "Failed to persist restored configuration asynchronously: {}", + err + ); + } + Err(join_err) => { + logging!(warn, Type::Cmd, "Blocking save task failed: {}", join_err); + } + }, + Err(_) => { + logging!( + warn, + Type::Cmd, + "Persisting restored configuration timed out after {:?}", + SAVE_PROFILES_TIMEOUT + ); + } + } + }); + } + + Ok(()) +} + +pub(super) fn describe_panic_payload(payload: &(dyn Any + Send)) -> String { + if let Some(message) = payload.downcast_ref::<&str>() { + (*message).to_string() + } else if let Some(message) = payload.downcast_ref::() { + message.clone() + } else { + "unknown panic".into() + } +} diff --git a/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs b/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs new file mode 100644 index 000000000..2f7e1aaca --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs @@ -0,0 +1,65 @@ +use super::super::state::SWITCH_CLEANUP_TIMEOUT; +use crate::{core::handle, logging, process::AsyncHandler, utils::logging::Type}; +use smartstring::alias::String as SmartString; +use tokio::time; + +pub(crate) type CleanupHandle = tauri::async_runtime::JoinHandle<()>; + +pub(crate) fn schedule_post_switch_success( + profile_id: SmartString, + success: bool, + notify: bool, + task_id: u64, +) -> CleanupHandle { + // Post-success cleanup runs detached from the driver so the queue keeps moving. + AsyncHandler::spawn(move || async move { + handle::Handle::notify_profile_switch_finished( + profile_id.clone(), + success, + notify, + task_id, + ); + if success { + close_connections_after_switch(profile_id).await; + } + }) +} + +pub(crate) fn schedule_post_switch_failure( + profile_id: SmartString, + notify: bool, + task_id: u64, +) -> CleanupHandle { + // Failures or cancellations do not alter the active profile, so skip draining live connections. + AsyncHandler::spawn(move || async move { + handle::Handle::notify_profile_switch_finished(profile_id.clone(), false, notify, task_id); + }) +} + +async fn close_connections_after_switch(profile_id: SmartString) { + match time::timeout(SWITCH_CLEANUP_TIMEOUT, async { + handle::Handle::mihomo().await.close_all_connections().await + }) + .await + { + Ok(Ok(())) => {} + Ok(Err(err)) => { + logging!( + warn, + Type::Cmd, + "Failed to close connections after profile switch ({}): {}", + profile_id, + err + ); + } + Err(_) => { + logging!( + warn, + Type::Cmd, + "Closing connections after profile switch ({}) timed out after {:?}", + profile_id, + SWITCH_CLEANUP_TIMEOUT + ); + } + } +} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs new file mode 100644 index 000000000..9de753dbc --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs @@ -0,0 +1,178 @@ +use super::{CmdResult, core::SwitchStage}; +use crate::{ + cmd::profile_switch::state::{ + SwitchCancellation, SwitchHeartbeat, SwitchManager, SwitchRequest, SwitchScope, + }, + config::IProfiles, + logging, + utils::logging::Type, +}; +use smartstring::alias::String as SmartString; +use tokio::sync::MutexGuard; + +pub(super) struct SwitchContext { + pub(super) manager: &'static SwitchManager, + pub(super) request: Option, + pub(super) profiles_patch: Option, + pub(super) sequence: Option, + pub(super) target_profile: Option, + pub(super) previous_profile: Option, + pub(super) new_profile_for_event: Option, + pub(super) switch_scope: Option>, + pub(super) core_guard: Option>, + pub(super) heartbeat: SwitchHeartbeat, + pub(super) task_id: Option, + pub(super) profile_label: SmartString, + pub(super) active_stage: SwitchStage, +} + +impl SwitchContext { + // Captures all mutable data required across states (locks, profile ids, etc). + pub(super) fn new( + manager: &'static SwitchManager, + request: Option, + profiles: IProfiles, + heartbeat: SwitchHeartbeat, + ) -> Self { + let task_id = request.as_ref().map(|req| req.task_id()); + let profile_label = request + .as_ref() + .map(|req| req.profile_id().clone()) + .or_else(|| profiles.current.clone()) + .unwrap_or_else(|| SmartString::from("unknown")); + heartbeat.touch(); + Self { + manager, + request, + profiles_patch: Some(profiles), + sequence: None, + target_profile: None, + previous_profile: None, + new_profile_for_event: None, + switch_scope: None, + core_guard: None, + heartbeat, + task_id, + profile_label, + active_stage: SwitchStage::Start, + } + } + + pub(super) fn ensure_target_profile(&mut self) { + // Lazily determine which profile we're switching to so shared paths (patch vs. driver) behave the same. + if let Some(patch) = self.profiles_patch.as_mut() { + if patch.current.is_none() + && let Some(request) = self.request.as_ref() + { + patch.current = Some(request.profile_id().clone()); + } + self.target_profile = patch.current.clone(); + } + } + + pub(super) fn take_profiles_patch(&mut self) -> CmdResult { + self.profiles_patch + .take() + .ok_or_else(|| "profiles patch already consumed".into()) + } + + pub(super) fn cancel_token(&self) -> Option { + self.request.as_ref().map(|req| req.cancel_token().clone()) + } + + pub(super) fn cancelled(&self) -> bool { + self.request + .as_ref() + .map(|req| req.cancel_token().is_cancelled()) + .unwrap_or(false) + } + + pub(super) fn log_cancelled(&self, stage: &str) { + if let Some(request) = self.request.as_ref() { + logging!( + info, + Type::Cmd, + "Switch task {} cancelled {}; profile={}", + request.task_id(), + stage, + request.profile_id() + ); + } else { + logging!(info, Type::Cmd, "Profile switch cancelled {}", stage); + } + } + + pub(super) fn should_validate_target(&self) -> bool { + match (&self.target_profile, &self.previous_profile) { + (Some(target), Some(current)) => current != target, + (Some(_), None) => true, + _ => false, + } + } + + pub(super) fn stale(&self) -> bool { + self.sequence + .map(|seq| seq < self.manager.latest_request_sequence()) + .unwrap_or(false) + } + + pub(super) fn sequence(&self) -> u64 { + self.sequence.unwrap_or_else(|| { + logging!( + warn, + Type::Cmd, + "Sequence unexpectedly missing in switch context; defaulting to 0" + ); + 0 + }) + } + + pub(super) fn record_stage(&mut self, stage: SwitchStage) { + let since_last = self.heartbeat.elapsed(); + let previous = self.active_stage; + self.active_stage = stage; + self.heartbeat.set_stage(stage.as_code()); + + match self.task_id { + Some(task_id) => logging!( + debug, + Type::Cmd, + "Switch task {} (profile={}) transitioned {:?} -> {:?} after {:?}", + task_id, + self.profile_label, + previous, + stage, + since_last + ), + None => logging!( + debug, + Type::Cmd, + "Profile patch {} transitioned {:?} -> {:?} after {:?}", + self.profile_label, + previous, + stage, + since_last + ), + } + } + + pub(super) fn release_core_guard(&mut self) { + self.core_guard = None; + } + + pub(super) fn release_switch_scope(&mut self) { + self.switch_scope = None; + } + + pub(super) fn release_locks(&mut self) { + self.release_core_guard(); + self.release_switch_scope(); + } +} + +impl Drop for SwitchContext { + fn drop(&mut self) { + self.core_guard.take(); + self.switch_scope.take(); + } +} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs new file mode 100644 index 000000000..1c4e32ab2 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs @@ -0,0 +1,284 @@ +use super::{CmdResult, context::SwitchContext, describe_panic_payload}; +use crate::{ + cmd::profile_switch::state::{SwitchHeartbeat, SwitchManager, SwitchRequest}, + config::IProfiles, + logging, + utils::logging::Type, +}; +use futures::FutureExt; +use std::{ + mem, + panic::AssertUnwindSafe, + time::{Duration, Instant}, +}; +pub(crate) const CONFIG_APPLY_TIMEOUT: Duration = Duration::from_secs(5); +pub(crate) const TRAY_UPDATE_TIMEOUT: Duration = Duration::from_secs(3); +pub(crate) const REFRESH_TIMEOUT: Duration = Duration::from_secs(3); +pub(crate) const SAVE_PROFILES_TIMEOUT: Duration = Duration::from_secs(5); +pub(crate) const SWITCH_IDLE_WAIT_TIMEOUT: Duration = Duration::from_secs(30); +pub(crate) const SWITCH_IDLE_WAIT_POLL: Duration = Duration::from_millis(25); +pub(crate) const SWITCH_IDLE_WAIT_MAX_BACKOFF: Duration = Duration::from_millis(250); + +/// Explicit state machine for profile switching so we can reason about +/// cancellation, stale requests, and side effects at each stage. +pub(crate) struct SwitchStateMachine { + pub(super) ctx: SwitchContext, + state: SwitchState, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum SwitchStage { + Start, + AcquireCore, + Prepare, + ValidateTarget, + PatchDraft, + UpdateCore, + Finalize, + Workflow, + DriverTask, +} + +impl SwitchStage { + pub(crate) fn as_code(self) -> u32 { + match self { + SwitchStage::Start => 0, + SwitchStage::AcquireCore => 1, + SwitchStage::Prepare => 2, + SwitchStage::ValidateTarget => 3, + SwitchStage::PatchDraft => 4, + SwitchStage::UpdateCore => 5, + SwitchStage::Finalize => 6, + SwitchStage::Workflow => 7, + SwitchStage::DriverTask => 8, + } + } + + pub(crate) fn from_code(code: u32) -> Option { + Some(match code { + 0 => SwitchStage::Start, + 1 => SwitchStage::AcquireCore, + 2 => SwitchStage::Prepare, + 3 => SwitchStage::ValidateTarget, + 4 => SwitchStage::PatchDraft, + 5 => SwitchStage::UpdateCore, + 6 => SwitchStage::Finalize, + 7 => SwitchStage::Workflow, + 8 => SwitchStage::DriverTask, + _ => return None, + }) + } +} + +#[derive(Debug, Clone)] +pub(crate) struct SwitchPanicInfo { + pub(crate) stage: SwitchStage, + pub(crate) detail: String, +} + +impl SwitchPanicInfo { + pub(crate) fn new(stage: SwitchStage, detail: String) -> Self { + Self { stage, detail } + } + + pub(crate) fn workflow_root(detail: String) -> Self { + Self::new(SwitchStage::Workflow, detail) + } + + pub(crate) fn driver_task(detail: String) -> Self { + Self::new(SwitchStage::DriverTask, detail) + } +} + +/// High-level state machine nodes executed in strict sequence. +pub(crate) enum SwitchState { + Start, + AcquireCore, + Prepare, + ValidateTarget, + PatchDraft, + UpdateCore, + Finalize(CoreUpdateOutcome), + Complete(bool), +} + +/// Result of trying to apply the draft configuration to the core. +pub(crate) enum CoreUpdateOutcome { + Success, + ValidationFailed { message: String }, + CoreError { message: String }, + Timeout, +} + +/// Indicates where a stale request was detected so logs stay descriptive. +pub(crate) enum StaleStage { + AfterLock, + BeforeCoreOperation, + BeforeCoreInteraction, + AfterCoreOperation, +} + +impl StaleStage { + pub(super) fn log(&self, ctx: &SwitchContext) { + let sequence = ctx.sequence(); + let latest = ctx.manager.latest_request_sequence(); + match self { + StaleStage::AfterLock => logging!( + info, + Type::Cmd, + "Detected a newer request after acquiring the lock (sequence: {} < {}), abandoning current request", + sequence, + latest + ), + StaleStage::BeforeCoreOperation => logging!( + info, + Type::Cmd, + "Detected a newer request before core operation (sequence: {} < {}), abandoning current request", + sequence, + latest + ), + StaleStage::BeforeCoreInteraction => logging!( + info, + Type::Cmd, + "Detected a newer request before core interaction (sequence: {} < {}), abandoning current request", + sequence, + latest + ), + StaleStage::AfterCoreOperation => logging!( + info, + Type::Cmd, + "Detected a newer request after core operation (sequence: {} < {}), ignoring current result", + sequence, + latest + ), + } + } +} + +impl SwitchStateMachine { + pub(crate) fn new( + manager: &'static SwitchManager, + request: Option, + profiles: IProfiles, + ) -> Self { + let heartbeat = request + .as_ref() + .map(|req| req.heartbeat().clone()) + .unwrap_or_else(SwitchHeartbeat::new); + + Self { + ctx: SwitchContext::new(manager, request, profiles, heartbeat), + state: SwitchState::Start, + } + } + + pub(crate) async fn run(mut self) -> Result, SwitchPanicInfo> { + // Drive the state machine until we either complete successfully or bubble up a panic. + loop { + let current_state = mem::replace(&mut self.state, SwitchState::Complete(false)); + match current_state { + SwitchState::Complete(result) => return Ok(Ok(result)), + _ => match self.run_state(current_state).await? { + Ok(state) => self.state = state, + Err(err) => return Ok(Err(err)), + }, + } + } + } + + async fn run_state( + &mut self, + current: SwitchState, + ) -> Result, SwitchPanicInfo> { + match current { + SwitchState::Start => { + self.with_stage( + SwitchStage::Start, + |this| async move { this.handle_start() }, + ) + .await + } + SwitchState::AcquireCore => { + self.with_stage(SwitchStage::AcquireCore, |this| async move { + this.handle_acquire_core().await + }) + .await + } + SwitchState::Prepare => { + self.with_stage(SwitchStage::Prepare, |this| async move { + this.handle_prepare().await + }) + .await + } + SwitchState::ValidateTarget => { + self.with_stage(SwitchStage::ValidateTarget, |this| async move { + this.handle_validate_target().await + }) + .await + } + SwitchState::PatchDraft => { + self.with_stage(SwitchStage::PatchDraft, |this| async move { + this.handle_patch_draft().await + }) + .await + } + SwitchState::UpdateCore => { + self.with_stage(SwitchStage::UpdateCore, |this| async move { + this.handle_update_core().await + }) + .await + } + SwitchState::Finalize(outcome) => { + self.with_stage(SwitchStage::Finalize, |this| async move { + this.handle_finalize(outcome).await + }) + .await + } + SwitchState::Complete(result) => Ok(Ok(SwitchState::Complete(result))), + } + } + + /// Helper that wraps each stage with consistent logging and panic reporting. + async fn with_stage<'a, F, Fut>( + &'a mut self, + stage: SwitchStage, + f: F, + ) -> Result, SwitchPanicInfo> + where + F: FnOnce(&'a mut Self) -> Fut, + Fut: std::future::Future> + 'a, + { + let sequence = self.ctx.sequence(); + let task = self.ctx.task_id; + let profile = self.ctx.profile_label.clone(); + logging!( + info, + Type::Cmd, + "Enter {:?} (sequence={}, task={:?}, profile={})", + stage, + sequence, + task, + profile + ); + let stage_start = Instant::now(); + self.ctx.record_stage(stage); + AssertUnwindSafe(f(self)) + .catch_unwind() + .await + .map_err(|payload| { + SwitchPanicInfo::new(stage, describe_panic_payload(payload.as_ref())) + }) + .inspect(|_| { + logging!( + info, + Type::Cmd, + "Exit {:?} (sequence={}, task={:?}, profile={}, elapsed={}ms)", + stage, + sequence, + task, + profile, + stage_start.elapsed().as_millis() + ); + }) + } +} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs new file mode 100644 index 000000000..84ee0f491 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs @@ -0,0 +1,11 @@ +mod context; +mod core; +mod stages; + +pub(crate) use core::{ + CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchPanicInfo, SwitchStage, SwitchStateMachine, +}; + +pub(super) use super::{ + CmdResult, describe_panic_payload, restore_previous_profile, validate_profile_yaml, +}; diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs new file mode 100644 index 000000000..78c313d42 --- /dev/null +++ b/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs @@ -0,0 +1,597 @@ +use super::{ + CmdResult, + core::{ + CONFIG_APPLY_TIMEOUT, CoreUpdateOutcome, REFRESH_TIMEOUT, SAVE_PROFILES_TIMEOUT, + SWITCH_IDLE_WAIT_MAX_BACKOFF, SWITCH_IDLE_WAIT_POLL, SWITCH_IDLE_WAIT_TIMEOUT, StaleStage, + SwitchState, SwitchStateMachine, TRAY_UPDATE_TIMEOUT, + }, + restore_previous_profile, validate_profile_yaml, +}; +use crate::{ + config::{Config, profiles::profiles_save_file_safe}, + core::{CoreManager, handle, tray::Tray}, + logging, + process::AsyncHandler, + utils::logging::Type, +}; +use anyhow::Error; +use futures::future; +use smartstring::alias::String as SmartString; +use std::{ + pin::Pin, + time::{Duration, Instant}, +}; +use tokio::time; + +impl SwitchStateMachine { + pub(super) fn handle_start(&mut self) -> CmdResult { + if self.ctx.manager.is_switching() { + logging!( + info, + Type::Cmd, + "Profile switch already in progress; queuing request for task={:?}, profile={}", + self.ctx.task_id, + self.ctx.profile_label + ); + } + Ok(SwitchState::AcquireCore) + } + + /// Grab the core lock, mark the manager as switching, and compute the target profile. + pub(super) async fn handle_acquire_core(&mut self) -> CmdResult { + let manager = self.ctx.manager; + let core_guard = manager.core_mutex().lock().await; + + if manager.is_switching() { + logging!( + info, + Type::Cmd, + "Active profile switch detected; waiting before acquiring scope" + ); + let wait_start = Instant::now(); + let mut backoff = SWITCH_IDLE_WAIT_POLL; + while manager.is_switching() { + if self.ctx.cancelled() { + self.ctx + .log_cancelled("while waiting for active switch to finish"); + return Ok(SwitchState::Complete(false)); + } + if wait_start.elapsed() >= SWITCH_IDLE_WAIT_TIMEOUT { + let message = format!( + "Timed out after {:?} waiting for active profile switch to finish", + SWITCH_IDLE_WAIT_TIMEOUT + ); + logging!(error, Type::Cmd, "{}", message); + return Err(message.into()); + } + + time::sleep(backoff).await; + backoff = backoff.saturating_mul(2).min(SWITCH_IDLE_WAIT_MAX_BACKOFF); + } + let waited = wait_start.elapsed().as_millis(); + if waited > 0 { + logging!( + info, + Type::Cmd, + "Waited {}ms for active switch to finish before acquiring scope", + waited + ); + } + } + + self.ctx.core_guard = Some(core_guard); + self.ctx.switch_scope = Some(manager.begin_switch()); + self.ctx.sequence = Some(manager.next_request_sequence()); + self.ctx.ensure_target_profile(); + + logging!( + info, + Type::Cmd, + "Begin modifying configuration; sequence: {}, target profile: {:?}", + self.ctx.sequence(), + self.ctx.target_profile + ); + + if self.ctx.cancelled() { + self.ctx.log_cancelled("after acquiring core lock"); + return Ok(SwitchState::Complete(false)); + } + + if self.ctx.stale() { + StaleStage::AfterLock.log(&self.ctx); + return Ok(SwitchState::Complete(false)); + } + + Ok(SwitchState::Prepare) + } + + pub(super) async fn handle_prepare(&mut self) -> CmdResult { + let current_profile = { + let profiles_guard = Config::profiles().await; + profiles_guard.latest_ref().current.clone() + }; + + logging!(info, Type::Cmd, "Current profile: {:?}", current_profile); + self.ctx.previous_profile = current_profile; + Ok(SwitchState::ValidateTarget) + } + + pub(super) async fn handle_validate_target(&mut self) -> CmdResult { + if self.ctx.cancelled() { + self.ctx.log_cancelled("before validation"); + return Ok(SwitchState::Complete(false)); + } + + if self.ctx.should_validate_target() { + let Some(target) = self.ctx.target_profile.clone() else { + logging!( + error, + Type::Cmd, + "Missing target profile while validation was requested; aborting switch" + ); + return Err("missing target profile at validation".into()); + }; + if !validate_profile_yaml(&target).await? { + return Ok(SwitchState::Complete(false)); + } + } + + if self.ctx.stale() { + StaleStage::BeforeCoreOperation.log(&self.ctx); + return Ok(SwitchState::Complete(false)); + } + + Ok(SwitchState::PatchDraft) + } + + pub(super) async fn handle_patch_draft(&mut self) -> CmdResult { + if self.ctx.cancelled() { + self.ctx.log_cancelled("before patching configuration"); + return Ok(SwitchState::Complete(false)); + } + + logging!( + info, + Type::Cmd, + "Updating configuration draft, sequence: {}", + self.ctx.sequence() + ); + + let patch = self.ctx.take_profiles_patch()?; + self.ctx.new_profile_for_event = patch.current.clone(); + let _ = Config::profiles().await.draft_mut().patch_config(patch); + + if self.ctx.stale() { + StaleStage::BeforeCoreInteraction.log(&self.ctx); + Config::profiles().await.discard(); + return Ok(SwitchState::Complete(false)); + } + + Ok(SwitchState::UpdateCore) + } + + pub(super) async fn handle_update_core(&mut self) -> CmdResult { + let sequence = self.ctx.sequence(); + let task_id = self.ctx.task_id; + let profile = self.ctx.profile_label.clone(); + logging!( + info, + Type::Cmd, + "Starting core configuration update, sequence: {}, task={:?}, profile={}", + sequence, + task_id, + profile + ); + + let heartbeat = self.ctx.heartbeat.clone(); + let start = Instant::now(); + let mut ticker = time::interval(Duration::from_secs(1)); + ticker.set_missed_tick_behavior(time::MissedTickBehavior::Delay); + + let update_future = CoreManager::global().update_config(); + tokio::pin!(update_future); + + let timeout = time::sleep(Duration::from_secs(30)); + tokio::pin!(timeout); + + let cancel_token = self.ctx.cancel_token(); + let mut cancel_notifier: Pin + Send>> = + match cancel_token { + Some(token) => Box::pin(async move { + token.cancelled_future().await; + }), + None => Box::pin(future::pending()), + }; + + enum UpdateOutcome { + Finished(Result<(bool, SmartString), Error>), + Timeout, + Cancelled, + } + + let update_outcome = loop { + tokio::select! { + res = &mut update_future => break UpdateOutcome::Finished(res), + _ = &mut timeout => break UpdateOutcome::Timeout, + _ = &mut cancel_notifier => break UpdateOutcome::Cancelled, + _ = ticker.tick() => { + let elapsed_ms = start.elapsed().as_millis(); + heartbeat.touch(); + match task_id { + Some(id) => logging!( + debug, + Type::Cmd, + "Switch task {} (profile={}) UpdateCore still running (elapsed={}ms)", + id, + profile, + elapsed_ms + ), + None => logging!( + debug, + Type::Cmd, + "Profile patch {} UpdateCore still running (elapsed={}ms)", + profile, + elapsed_ms + ), + } + } + } + }; + + let elapsed_ms = start.elapsed().as_millis(); + + let outcome = match update_outcome { + UpdateOutcome::Finished(Ok((true, _))) => { + logging!( + info, + Type::Cmd, + "Core configuration update succeeded in {}ms", + elapsed_ms + ); + CoreUpdateOutcome::Success + } + UpdateOutcome::Finished(Ok((false, msg))) => { + logging!( + warn, + Type::Cmd, + "Core configuration update validation failed in {}ms: {}", + elapsed_ms, + msg + ); + CoreUpdateOutcome::ValidationFailed { + message: msg.to_string(), + } + } + UpdateOutcome::Finished(Err(err)) => { + logging!( + error, + Type::Cmd, + "Core configuration update errored in {}ms: {}", + elapsed_ms, + err + ); + CoreUpdateOutcome::CoreError { + message: err.to_string(), + } + } + UpdateOutcome::Timeout => { + logging!( + error, + Type::Cmd, + "Core configuration update timed out after {}ms", + elapsed_ms + ); + CoreUpdateOutcome::Timeout + } + UpdateOutcome::Cancelled => { + self.ctx.log_cancelled("during core update"); + logging!( + info, + Type::Cmd, + "Core configuration update cancelled after {}ms", + elapsed_ms + ); + self.ctx.release_locks(); + Config::profiles().await.discard(); + return Ok(SwitchState::Complete(false)); + } + }; + + self.ctx.release_core_guard(); + + Ok(SwitchState::Finalize(outcome)) + } + + pub(super) async fn handle_finalize( + &mut self, + outcome: CoreUpdateOutcome, + ) -> CmdResult { + let next_state = match outcome { + CoreUpdateOutcome::Success => self.finalize_success().await, + CoreUpdateOutcome::ValidationFailed { message } => { + self.finalize_validation_failed(message).await + } + CoreUpdateOutcome::CoreError { message } => self.finalize_core_error(message).await, + CoreUpdateOutcome::Timeout => self.finalize_timeout().await, + }; + + if next_state.is_err() || matches!(next_state, Ok(SwitchState::Complete(_))) { + self.ctx.release_switch_scope(); + } + + next_state + } + + pub(super) async fn finalize_success(&mut self) -> CmdResult { + if self.abort_if_stale_post_core().await? { + return Ok(SwitchState::Complete(false)); + } + + self.log_successful_update(); + + if !self.apply_config_with_timeout().await? { + logging!( + warn, + Type::Cmd, + "Apply step failed; attempting to restore previous profile before completing" + ); + restore_previous_profile(self.ctx.previous_profile.clone()).await?; + return Ok(SwitchState::Complete(false)); + } + + self.refresh_clash_with_timeout().await; + self.update_tray_tooltip_with_timeout().await; + self.update_tray_menu_with_timeout().await; + if let Err(err) = self.persist_profiles_with_timeout().await { + logging!( + error, + Type::Cmd, + "Persisting new profile configuration failed; attempting to restore previous profile: {}", + err + ); + restore_previous_profile(self.ctx.previous_profile.clone()).await?; + return Err(err); + } + self.emit_profile_change_event(); + logging!( + debug, + Type::Cmd, + "Finalize success pipeline completed for sequence {}", + self.ctx.sequence() + ); + + Ok(SwitchState::Complete(true)) + } + + pub(super) async fn finalize_validation_failed( + &mut self, + message: String, + ) -> CmdResult { + logging!( + warn, + Type::Cmd, + "Configuration validation failed: {}", + message + ); + Config::profiles().await.discard(); + restore_previous_profile(self.ctx.previous_profile.clone()).await?; + handle::Handle::notice_message("config_validate::error", message); + Ok(SwitchState::Complete(false)) + } + + pub(super) async fn finalize_core_error(&mut self, message: String) -> CmdResult { + logging!( + warn, + Type::Cmd, + "Error occurred during update: {}, sequence: {}", + message, + self.ctx.sequence() + ); + Config::profiles().await.discard(); + handle::Handle::notice_message("config_validate::boot_error", message); + Ok(SwitchState::Complete(false)) + } + + pub(super) async fn finalize_timeout(&mut self) -> CmdResult { + let timeout_msg = + "Configuration update timed out (30s); possible validation or core communication stall"; + logging!( + error, + Type::Cmd, + "{}, sequence: {}", + timeout_msg, + self.ctx.sequence() + ); + Config::profiles().await.discard(); + restore_previous_profile(self.ctx.previous_profile.clone()).await?; + handle::Handle::notice_message("config_validate::timeout", timeout_msg); + Ok(SwitchState::Complete(false)) + } + + pub(super) async fn abort_if_stale_post_core(&mut self) -> CmdResult { + if self.ctx.stale() { + StaleStage::AfterCoreOperation.log(&self.ctx); + Config::profiles().await.discard(); + return Ok(true); + } + + Ok(false) + } + + pub(super) fn log_successful_update(&self) { + logging!( + info, + Type::Cmd, + "Configuration update succeeded, sequence: {}", + self.ctx.sequence() + ); + } + + pub(super) async fn apply_config_with_timeout(&mut self) -> CmdResult { + let apply_result = time::timeout(CONFIG_APPLY_TIMEOUT, async { + Config::profiles().await.apply() + }) + .await; + + if apply_result.is_ok() { + Ok(true) + } else { + logging!( + warn, + Type::Cmd, + "Applying profile configuration timed out after {:?}", + CONFIG_APPLY_TIMEOUT + ); + Config::profiles().await.discard(); + Ok(false) + } + } + + pub(super) async fn refresh_clash_with_timeout(&self) { + let start = Instant::now(); + let result = time::timeout(REFRESH_TIMEOUT, async { + handle::Handle::refresh_clash(); + }) + .await; + + let elapsed = start.elapsed().as_millis(); + match result { + Ok(_) => logging!( + debug, + Type::Cmd, + "refresh_clash_with_timeout completed in {}ms", + elapsed + ), + Err(_) => logging!( + warn, + Type::Cmd, + "Refreshing Clash state timed out after {:?} (elapsed={}ms)", + REFRESH_TIMEOUT, + elapsed + ), + } + } + + pub(super) async fn update_tray_tooltip_with_timeout(&self) { + let start = Instant::now(); + let update_tooltip = time::timeout(TRAY_UPDATE_TIMEOUT, async { + Tray::global().update_tooltip().await + }) + .await; + let elapsed = start.elapsed().as_millis(); + + if update_tooltip.is_err() { + logging!( + warn, + Type::Cmd, + "Updating tray tooltip timed out after {:?} (elapsed={}ms)", + TRAY_UPDATE_TIMEOUT, + elapsed + ); + } else if let Ok(Err(err)) = update_tooltip { + logging!( + warn, + Type::Cmd, + "Failed to update tray tooltip asynchronously: {}", + err + ); + } else { + logging!( + debug, + Type::Cmd, + "update_tray_tooltip_with_timeout completed in {}ms", + elapsed + ); + } + } + + pub(super) async fn update_tray_menu_with_timeout(&self) { + let start = Instant::now(); + let update_menu = time::timeout(TRAY_UPDATE_TIMEOUT, async { + Tray::global().update_menu().await + }) + .await; + let elapsed = start.elapsed().as_millis(); + + if update_menu.is_err() { + logging!( + warn, + Type::Cmd, + "Updating tray menu timed out after {:?} (elapsed={}ms)", + TRAY_UPDATE_TIMEOUT, + elapsed + ); + } else if let Ok(Err(err)) = update_menu { + logging!( + warn, + Type::Cmd, + "Failed to update tray menu asynchronously: {}", + err + ); + } else { + logging!( + debug, + Type::Cmd, + "update_tray_menu_with_timeout completed in {}ms", + elapsed + ); + } + } + + pub(super) async fn persist_profiles_with_timeout(&self) -> CmdResult<()> { + let start = Instant::now(); + let save_future = AsyncHandler::spawn_blocking(|| { + futures::executor::block_on(async { profiles_save_file_safe().await }) + }); + + let elapsed = start.elapsed().as_millis(); + match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await { + Err(_) => { + let message = format!( + "Persisting configuration file timed out after {:?} (elapsed={}ms)", + SAVE_PROFILES_TIMEOUT, elapsed + ); + logging!(warn, Type::Cmd, "{}", message); + Err(message.into()) + } + Ok(join_result) => match join_result { + Err(join_err) => { + let message = format!( + "Persisting configuration file failed: blocking task join error: {join_err}" + ); + logging!(error, Type::Cmd, "{}", message); + Err(message.into()) + } + Ok(save_result) => match save_result { + Ok(()) => { + logging!( + debug, + Type::Cmd, + "persist_profiles_with_timeout completed in {}ms", + elapsed + ); + Ok(()) + } + Err(err) => { + let message = format!("Persisting configuration file failed: {}", err); + logging!(error, Type::Cmd, "{}", message); + Err(message.into()) + } + }, + }, + } + } + + pub(super) fn emit_profile_change_event(&self) { + if let Some(current) = self.ctx.new_profile_for_event.clone() { + logging!( + info, + Type::Cmd, + "Emitting configuration change event to frontend: {}, sequence: {}", + current, + self.ctx.sequence() + ); + handle::Handle::notify_profile_changed(current); + } + } +} diff --git a/src-tauri/src/core/handle.rs b/src-tauri/src/core/handle.rs index ef868f59d..e3735d461 100644 --- a/src-tauri/src/core/handle.rs +++ b/src-tauri/src/core/handle.rs @@ -1,7 +1,14 @@ -use crate::{APP_HANDLE, constants::timing, singleton}; +use crate::{ + APP_HANDLE, config::Config, constants::timing, logging, singleton, utils::logging::Type, +}; use parking_lot::RwLock; +use serde_json::{Value, json}; use smartstring::alias::String; -use std::{sync::Arc, thread}; +use std::{ + sync::Arc, + thread, + time::{SystemTime, UNIX_EPOCH}, +}; use tauri::{AppHandle, Manager, WebviewWindow}; use tauri_plugin_mihomo::{Mihomo, MihomoExt}; use tokio::sync::RwLockReadGuard; @@ -66,10 +73,14 @@ impl Handle { return; } - let system_opt = handle.notification_system.read(); - if let Some(system) = system_opt.as_ref() { - system.send_event(FrontendEvent::RefreshClash); + { + let system_opt = handle.notification_system.read(); + if let Some(system) = system_opt.as_ref() { + system.send_event(FrontendEvent::RefreshClash); + } } + + Self::spawn_proxy_snapshot(); } pub fn refresh_verge() { @@ -85,11 +96,37 @@ impl Handle { } pub fn notify_profile_changed(profile_id: String) { - Self::send_event(FrontendEvent::ProfileChanged { - current_profile_id: profile_id, + let handle = Self::global(); + if handle.is_exiting() { + return; + } + + let system_opt = handle.notification_system.read(); + if let Some(system) = system_opt.as_ref() { + system.send_event(FrontendEvent::ProfileChanged { + current_profile_id: profile_id, + }); + } + } + + pub fn notify_profile_switch_finished( + profile_id: String, + success: bool, + notify: bool, + task_id: u64, + ) { + Self::send_event(FrontendEvent::ProfileSwitchFinished { + profile_id, + success, + notify, + task_id, }); } + pub fn notify_rust_panic(message: String, location: String) { + Self::send_event(FrontendEvent::RustPanic { message, location }); + } + pub fn notify_timer_updated(profile_index: String) { Self::send_event(FrontendEvent::TimerUpdated { profile_index }); } @@ -100,6 +137,86 @@ impl Handle { pub fn notify_profile_update_completed(uid: String) { Self::send_event(FrontendEvent::ProfileUpdateCompleted { uid }); + Self::spawn_proxy_snapshot(); + } + + pub fn notify_proxies_updated(payload: Value) { + Self::send_event(FrontendEvent::ProxiesUpdated { payload }); + } + + pub async fn build_proxy_snapshot() -> Option { + let mihomo_guard = Self::mihomo().await; + let proxies = match mihomo_guard.get_proxies().await { + Ok(data) => match serde_json::to_value(&data) { + Ok(value) => value, + Err(error) => { + logging!( + warn, + Type::Frontend, + "Failed to serialize proxies snapshot: {error}" + ); + return None; + } + }, + Err(error) => { + logging!( + warn, + Type::Frontend, + "Failed to fetch proxies for snapshot: {error}" + ); + return None; + } + }; + + drop(mihomo_guard); + + let providers_guard = Self::mihomo().await; + let providers_value = match providers_guard.get_proxy_providers().await { + Ok(data) => serde_json::to_value(&data).unwrap_or_else(|error| { + logging!( + warn, + Type::Frontend, + "Failed to serialize proxy providers for snapshot: {error}" + ); + Value::Null + }), + Err(error) => { + logging!( + warn, + Type::Frontend, + "Failed to fetch proxy providers for snapshot: {error}" + ); + Value::Null + } + }; + + drop(providers_guard); + + let profile_guard = Config::profiles().await; + let profile_id = profile_guard.latest_ref().current.clone(); + drop(profile_guard); + + let emitted_at = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|duration| duration.as_millis() as i64) + .unwrap_or(0); + + let payload = json!({ + "proxies": proxies, + "providers": providers_value, + "profileId": profile_id, + "emittedAt": emitted_at, + }); + + Some(payload) + } + + fn spawn_proxy_snapshot() { + tauri::async_runtime::spawn(async { + if let Some(payload) = Handle::build_proxy_snapshot().await { + Handle::notify_proxies_updated(payload); + } + }); } pub fn notice_message, M: Into>(status: S, msg: M) { diff --git a/src-tauri/src/core/manager/config.rs b/src-tauri/src/core/manager/config.rs index 263ddb4b0..e93d5244a 100644 --- a/src-tauri/src/core/manager/config.rs +++ b/src-tauri/src/core/manager/config.rs @@ -10,7 +10,10 @@ use anyhow::{Result, anyhow}; use smartstring::alias::String; use std::{path::PathBuf, time::Instant}; use tauri_plugin_mihomo::Error as MihomoError; -use tokio::time::sleep; +use tokio::time::{sleep, timeout}; + +const RELOAD_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(5); +const MAX_RELOAD_ATTEMPTS: usize = 3; impl CoreManager { pub async fn use_default_config(&self, error_key: &str, error_msg: &str) -> Result<()> { @@ -39,12 +42,38 @@ impl CoreManager { return Ok((true, String::new())); } + let start = Instant::now(); + let _permit = self .update_semaphore .try_acquire() .map_err(|_| anyhow!("Config update already in progress"))?; - self.perform_config_update().await + let result = self.perform_config_update().await; + + match &result { + Ok((success, msg)) => { + logging!( + info, + Type::Core, + "[ConfigUpdate] Finished (success={}, elapsed={}ms, msg={})", + success, + start.elapsed().as_millis(), + msg + ); + } + Err(err) => { + logging!( + error, + Type::Core, + "[ConfigUpdate] Failed after {}ms: {}", + start.elapsed().as_millis(), + err + ); + } + } + + result } fn should_update_config(&self) -> Result { @@ -62,20 +91,73 @@ impl CoreManager { } async fn perform_config_update(&self) -> Result<(bool, String)> { - Config::generate().await?; + logging!(debug, Type::Core, "[ConfigUpdate] Pipeline start"); + let total_start = Instant::now(); - match CoreConfigValidator::global().validate_config().await { + let mut stage_timer = Instant::now(); + Config::generate().await?; + logging!( + debug, + Type::Core, + "[ConfigUpdate] Generation completed in {}ms", + stage_timer.elapsed().as_millis() + ); + + stage_timer = Instant::now(); + let validation_result = CoreConfigValidator::global().validate_config().await; + logging!( + debug, + Type::Core, + "[ConfigUpdate] Validation completed in {}ms", + stage_timer.elapsed().as_millis() + ); + + match validation_result { Ok((true, _)) => { + stage_timer = Instant::now(); let run_path = Config::generate_file(ConfigType::Run).await?; + logging!( + debug, + Type::Core, + "[ConfigUpdate] Runtime file generated in {}ms", + stage_timer.elapsed().as_millis() + ); + stage_timer = Instant::now(); self.apply_config(run_path).await?; + logging!( + debug, + Type::Core, + "[ConfigUpdate] Core apply completed in {}ms", + stage_timer.elapsed().as_millis() + ); + logging!( + debug, + Type::Core, + "[ConfigUpdate] Pipeline succeeded in {}ms", + total_start.elapsed().as_millis() + ); Ok((true, String::new())) } Ok((false, error_msg)) => { Config::runtime().await.discard(); + logging!( + warn, + Type::Core, + "[ConfigUpdate] Validation reported failure after {}ms: {}", + total_start.elapsed().as_millis(), + error_msg + ); Ok((false, error_msg)) } Err(e) => { Config::runtime().await.discard(); + logging!( + error, + Type::Core, + "[ConfigUpdate] Validation errored after {}ms: {}", + total_start.elapsed().as_millis(), + e + ); Err(e) } } @@ -88,17 +170,49 @@ impl CoreManager { pub(super) async fn apply_config(&self, path: PathBuf) -> Result<()> { let path_str = dirs::path_to_str(&path)?; - match self.reload_config(path_str).await { + let reload_start = Instant::now(); + match self.reload_config_with_retry(path_str).await { Ok(_) => { Config::runtime().await.apply(); - logging!(info, Type::Core, "Configuration applied"); + logging!( + debug, + Type::Core, + "Configuration applied (reload={}ms)", + reload_start.elapsed().as_millis() + ); Ok(()) } - Err(err) if Self::should_restart_on_error(&err) => { - self.retry_with_restart(path_str).await - } Err(err) => { + if Self::should_restart_for_anyhow(&err) { + logging!( + warn, + Type::Core, + "Reload failed after {}ms with retryable/timeout error; attempting restart: {}", + reload_start.elapsed().as_millis(), + err + ); + match self.retry_with_restart(path_str).await { + Ok(_) => return Ok(()), + Err(retry_err) => { + logging!( + error, + Type::Core, + "Reload retry with restart failed: {}", + retry_err + ); + Config::runtime().await.discard(); + return Err(retry_err); + } + } + } Config::runtime().await.discard(); + logging!( + error, + Type::Core, + "Failed to apply config after {}ms: {}", + reload_start.elapsed().as_millis(), + err + ); Err(anyhow!("Failed to apply config: {}", err)) } } @@ -113,17 +227,116 @@ impl CoreManager { self.restart_core().await?; sleep(timing::CONFIG_RELOAD_DELAY).await; - self.reload_config(config_path).await?; + self.reload_config_with_retry(config_path).await?; Config::runtime().await.apply(); logging!(info, Type::Core, "Configuration applied after restart"); Ok(()) } - async fn reload_config(&self, path: &str) -> Result<(), MihomoError> { - handle::Handle::mihomo() + async fn reload_config_with_retry(&self, path: &str) -> Result<()> { + for attempt in 1..=MAX_RELOAD_ATTEMPTS { + let attempt_start = Instant::now(); + let reload_future = self.reload_config_once(path); + match timeout(RELOAD_TIMEOUT, reload_future).await { + Ok(Ok(())) => { + logging!( + debug, + Type::Core, + "reload_config attempt {}/{} succeeded in {}ms", + attempt, + MAX_RELOAD_ATTEMPTS, + attempt_start.elapsed().as_millis() + ); + return Ok(()); + } + Ok(Err(err)) => { + logging!( + warn, + Type::Core, + "reload_config attempt {}/{} failed after {}ms: {}", + attempt, + MAX_RELOAD_ATTEMPTS, + attempt_start.elapsed().as_millis(), + err + ); + if attempt == MAX_RELOAD_ATTEMPTS { + return Err(anyhow!( + "Failed to reload config after {} attempts: {}", + attempt, + err + )); + } + } + Err(_) => { + logging!( + warn, + Type::Core, + "reload_config attempt {}/{} timed out after {:?}", + attempt, + MAX_RELOAD_ATTEMPTS, + RELOAD_TIMEOUT + ); + if attempt == MAX_RELOAD_ATTEMPTS { + return Err(anyhow!( + "Config reload timed out after {:?} ({} attempts)", + RELOAD_TIMEOUT, + MAX_RELOAD_ATTEMPTS + )); + } + } + } + } + + Err(anyhow!( + "Config reload retry loop exited unexpectedly ({} attempts)", + MAX_RELOAD_ATTEMPTS + )) + } + + async fn reload_config_once(&self, path: &str) -> Result<(), MihomoError> { + logging!( + info, + Type::Core, + "[ConfigUpdate] reload_config_once begin path={} ", + path + ); + let start = Instant::now(); + let result = handle::Handle::mihomo() .await .reload_config(true, path) - .await + .await; + let elapsed = start.elapsed().as_millis(); + match result { + Ok(()) => { + logging!( + info, + Type::Core, + "[ConfigUpdate] reload_config_once succeeded (elapsed={}ms)", + elapsed + ); + Ok(()) + } + Err(err) => { + logging!( + warn, + Type::Core, + "[ConfigUpdate] reload_config_once failed (elapsed={}ms, err={})", + elapsed, + err + ); + Err(err) + } + } + } + + fn should_restart_for_anyhow(err: &anyhow::Error) -> bool { + if let Some(mihomo_err) = err.downcast_ref::() { + return Self::should_restart_on_error(mihomo_err); + } + let msg = err.to_string(); + msg.contains("timed out") + || msg.contains("reload") + || msg.contains("Failed to apply config") } fn should_restart_on_error(err: &MihomoError) -> bool { diff --git a/src-tauri/src/core/notification.rs b/src-tauri/src/core/notification.rs index 071bcedb9..5754fecb2 100644 --- a/src-tauri/src/core/notification.rs +++ b/src-tauri/src/core/notification.rs @@ -1,38 +1,71 @@ -use crate::{ - constants::{retry, timing}, - logging, - utils::logging::Type, -}; +use crate::{constants::retry, logging, utils::logging::Type}; +use once_cell::sync::Lazy; use parking_lot::RwLock; use smartstring::alias::String; use std::{ sync::{ - atomic::{AtomicU64, Ordering}, + Arc, + atomic::{AtomicBool, AtomicU64, Ordering}, mpsc, }, thread, time::Instant, }; -use tauri::{Emitter, WebviewWindow}; +use tauri::Emitter; +use tauri::async_runtime; +#[allow(dead_code)] // Temporarily suppress warnings while diagnostics disable certain events #[derive(Debug, Clone)] pub enum FrontendEvent { RefreshClash, RefreshVerge, - NoticeMessage { status: String, message: String }, - ProfileChanged { current_profile_id: String }, - TimerUpdated { profile_index: String }, - ProfileUpdateStarted { uid: String }, - ProfileUpdateCompleted { uid: String }, + RefreshProxy, + ProxiesUpdated { + payload: serde_json::Value, + }, + NoticeMessage { + status: String, + message: String, + }, + ProfileChanged { + current_profile_id: String, + }, + ProfileSwitchFinished { + profile_id: String, + success: bool, + notify: bool, + task_id: u64, + }, + TimerUpdated { + profile_index: String, + }, + ProfileUpdateStarted { + uid: String, + }, + ProfileUpdateCompleted { + uid: String, + }, + RustPanic { + message: String, + location: String, + }, } +static EMIT_SERIALIZER: Lazy> = Lazy::new(|| tokio::sync::Mutex::new(())); + #[derive(Debug, Default)] struct EventStats { - total_sent: AtomicU64, total_errors: AtomicU64, last_error_time: RwLock>, } +#[derive(Debug, Default)] +#[allow(dead_code)] +struct BufferedProxies { + pending: parking_lot::Mutex>, + in_flight: AtomicBool, +} + #[derive(Debug, Clone)] pub struct ErrorMessage { pub status: String, @@ -47,6 +80,7 @@ pub struct NotificationSystem { pub(super) is_running: bool, stats: EventStats, emergency_mode: RwLock, + proxies_buffer: Arc, } impl Default for NotificationSystem { @@ -63,6 +97,7 @@ impl NotificationSystem { is_running: false, stats: EventStats::default(), emergency_mode: RwLock::new(false), + proxies_buffer: Arc::new(BufferedProxies::default()), } } @@ -117,13 +152,78 @@ impl NotificationSystem { return; }; - if system.should_skip_event(&event) { - return; + let event_label = Self::describe_event(&event); + + match event { + FrontendEvent::ProxiesUpdated { payload } => { + logging!( + debug, + Type::Frontend, + "Queueing proxies-updated event for buffered emit: {}", + event_label + ); + system.enqueue_proxies_updated(payload); + } + other => { + logging!( + debug, + Type::Frontend, + "Queueing event for async emit: {}", + event_label + ); + + let (event_name, payload_result) = system.serialize_event(other); + let payload = match payload_result { + Ok(value) => value, + Err(err) => { + logging!( + warn, + Type::Frontend, + "Failed to serialize event {}: {}", + event_name, + err + ); + return; + } + }; + + logging!( + debug, + Type::Frontend, + "Dispatching async emit: {}", + event_name + ); + let _ = Self::emit_via_app(event_name, payload); + } + } + } + + fn enqueue_proxies_updated(&self, payload: serde_json::Value) { + let replaced = { + let mut slot = self.proxies_buffer.pending.lock(); + let had_pending = slot.is_some(); + *slot = Some(payload); + had_pending + }; + + if replaced { + logging!( + debug, + Type::Frontend, + "Replaced pending proxies-updated payload with latest snapshot" + ); } - if let Some(window) = super::handle::Handle::get_window() { - system.emit_to_window(&window, event); - thread::sleep(timing::EVENT_EMIT_DELAY); + if self + .proxies_buffer + .in_flight + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) + .is_ok() + { + let buffer = Arc::clone(&self.proxies_buffer); + async_runtime::spawn(async move { + Self::flush_proxies(buffer).await; + }); } } @@ -135,25 +235,95 @@ impl NotificationSystem { ) } - fn emit_to_window(&self, window: &WebviewWindow, event: FrontendEvent) { - let (event_name, payload) = self.serialize_event(event); - - let Ok(payload) = payload else { - self.stats.total_errors.fetch_add(1, Ordering::Relaxed); - return; - }; - - match window.emit(event_name, payload) { - Ok(_) => { - self.stats.total_sent.fetch_add(1, Ordering::Relaxed); + fn emit_via_app(event_name: &'static str, payload: serde_json::Value) -> Result<(), String> { + let app_handle = super::handle::Handle::app_handle().clone(); + let event_name = event_name.to_string(); + async_runtime::spawn(async move { + if let Err(err) = app_handle.emit_to("main", event_name.as_str(), payload) { + logging!( + warn, + Type::Frontend, + "emit_to failed for {}: {}", + event_name, + err + ); } - Err(e) => { - logging!(warn, Type::Frontend, "Event emit failed: {}", e); - self.handle_emit_error(); + }); + Ok(()) + } + + async fn flush_proxies(buffer: Arc) { + const EVENT_NAME: &str = "proxies-updated"; + + loop { + let payload_opt = { + let mut guard = buffer.pending.lock(); + guard.take() + }; + + let Some(payload) = payload_opt else { + buffer.in_flight.store(false, Ordering::Release); + + if buffer.pending.lock().is_some() + && buffer + .in_flight + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) + .is_ok() + { + continue; + } + + break; + }; + + logging!(debug, Type::Frontend, "Dispatching buffered proxies emit"); + let _guard = EMIT_SERIALIZER.lock().await; + if let Err(err) = Self::emit_via_app(EVENT_NAME, payload) { + logging!( + warn, + Type::Frontend, + "Buffered proxies emit failed: {}", + err + ); } } } + fn describe_event(event: &FrontendEvent) -> String { + match event { + FrontendEvent::RefreshClash => "RefreshClash".into(), + FrontendEvent::RefreshVerge => "RefreshVerge".into(), + FrontendEvent::RefreshProxy => "RefreshProxy".into(), + FrontendEvent::ProxiesUpdated { .. } => "ProxiesUpdated".into(), + FrontendEvent::NoticeMessage { status, .. } => { + format!("NoticeMessage({})", status).into() + } + FrontendEvent::ProfileChanged { current_profile_id } => { + format!("ProfileChanged({})", current_profile_id).into() + } + FrontendEvent::ProfileSwitchFinished { + profile_id, + task_id, + .. + } => format!( + "ProfileSwitchFinished(profile={}, task={})", + profile_id, task_id + ) + .into(), + FrontendEvent::TimerUpdated { profile_index } => { + format!("TimerUpdated({})", profile_index).into() + } + FrontendEvent::ProfileUpdateStarted { uid } => { + format!("ProfileUpdateStarted({})", uid).into() + } + FrontendEvent::ProfileUpdateCompleted { uid } => { + format!("ProfileUpdateCompleted({})", uid).into() + } + FrontendEvent::RustPanic { message, .. } => format!("RustPanic({})", message).into(), + } + } + + #[allow(dead_code)] fn serialize_event( &self, event: FrontendEvent, @@ -167,9 +337,25 @@ impl NotificationSystem { "verge://notice-message", serde_json::to_value((status, message)), ), + FrontendEvent::RefreshProxy => ("verge://refresh-proxy-config", Ok(json!("yes"))), + FrontendEvent::ProxiesUpdated { payload } => ("proxies-updated", Ok(payload)), FrontendEvent::ProfileChanged { current_profile_id } => { ("profile-changed", Ok(json!(current_profile_id))) } + FrontendEvent::ProfileSwitchFinished { + profile_id, + success, + notify, + task_id, + } => ( + "profile-switch-finished", + Ok(json!({ + "profileId": profile_id, + "success": success, + "notify": notify, + "taskId": task_id + })), + ), FrontendEvent::TimerUpdated { profile_index } => { ("verge://timer-updated", Ok(json!(profile_index))) } @@ -179,6 +365,10 @@ impl NotificationSystem { FrontendEvent::ProfileUpdateCompleted { uid } => { ("profile-update-completed", Ok(json!({ "uid": uid }))) } + FrontendEvent::RustPanic { message, location } => ( + "rust-panic", + Ok(json!({ "message": message, "location": location })), + ), } } @@ -204,10 +394,19 @@ impl NotificationSystem { } if let Some(sender) = &self.sender { - sender.send(event).is_ok() - } else { - false + if sender.send(event).is_err() { + logging!( + warn, + Type::Frontend, + "Failed to send event to worker thread" + ); + self.handle_emit_error(); + return false; + } + return true; } + + false } pub fn shutdown(&mut self) { diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 5942ac155..6f3130fe8 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -192,6 +192,7 @@ mod app_init { cmd::get_profiles, cmd::enhance_profiles, cmd::patch_profiles_config, + cmd::switch_profile, cmd::view_profile, cmd::patch_profile, cmd::create_profile, @@ -202,6 +203,8 @@ mod app_init { cmd::read_profile_file, cmd::save_profile_file, cmd::get_next_update_time, + cmd::get_profile_switch_status, + cmd::get_profile_switch_events, cmd::script_validate_notice, cmd::validate_script_file, cmd::create_local_backup, @@ -218,6 +221,7 @@ mod app_init { cmd::get_system_info, cmd::get_unlock_items, cmd::check_media_unlock, + cmd::frontend_log, ] } } @@ -356,6 +360,28 @@ pub fn run() { } } + std::panic::set_hook(Box::new(|info| { + let payload = info + .payload() + .downcast_ref::<&'static str>() + .map(|s| (*s).to_string()) + .or_else(|| info.payload().downcast_ref::().cloned()) + .unwrap_or_else(|| "Unknown panic".to_string()); + let location = info + .location() + .map(|loc| format!("{}:{}", loc.file(), loc.line())) + .unwrap_or_else(|| "unknown location".to_string()); + + logging!( + error, + Type::System, + "Rust panic captured: {} @ {}", + payload, + location + ); + handle::Handle::notify_rust_panic(payload.into(), location.into()); + })); + #[cfg(feature = "clippy")] let context = tauri::test::mock_context(tauri::test::noop_assets()); #[cfg(feature = "clippy")] diff --git a/src-tauri/src/utils/draft.rs b/src-tauri/src/utils/draft.rs index 044f6f1fa..cc72f9c32 100644 --- a/src-tauri/src/utils/draft.rs +++ b/src-tauri/src/utils/draft.rs @@ -68,6 +68,13 @@ impl Draft> { }) } + /// 尝试获取最新只读视图,若当前持有写锁则返回 `None` + pub fn try_latest_ref(&self) -> Option>> { + self.inner + .try_read() + .map(|guard| RwLockReadGuard::map(guard, |inner| inner.1.as_ref().unwrap_or(&inner.0))) + } + /// 提交草稿,返回旧正式数据 pub fn apply(&self) -> Option> { let mut inner = self.inner.write(); diff --git a/src/components/home/current-proxy-card.tsx b/src/components/home/current-proxy-card.tsx index ceea82d78..ed74def85 100644 --- a/src/components/home/current-proxy-card.tsx +++ b/src/components/home/current-proxy-card.tsx @@ -100,10 +100,12 @@ export const CurrentProxyCard = () => { const { t } = useTranslation(); const navigate = useNavigate(); const theme = useTheme(); - const { proxies, clashConfig, refreshProxy, rules } = useAppData(); + const { proxies, proxyHydration, clashConfig, refreshProxy, rules } = + useAppData(); const { verge } = useVerge(); const { current: currentProfile } = useProfiles(); const autoDelayEnabled = verge?.enable_auto_delay_detection ?? false; + const isLiveHydration = proxyHydration === "live"; const currentProfileId = currentProfile?.uid || null; const getProfileStorageKey = useCallback( @@ -715,7 +717,6 @@ export const CurrentProxyCard = () => { ); } } - refreshProxy(); if (sortType === 1) { setDelaySortRefresh((prev) => prev + 1); @@ -840,13 +841,24 @@ export const CurrentProxyCard = () => { iconColor={currentProxy ? "primary" : undefined} action={ + {!isLiveHydration && ( + + )} @@ -960,7 +972,7 @@ export const CurrentProxyCard = () => { value={state.selection.group} onChange={handleGroupChange} label={t("Group")} - disabled={isGlobalMode || isDirectMode} + disabled={isGlobalMode || isDirectMode || !isLiveHydration} > {state.proxyData.groups.map((group) => ( @@ -978,7 +990,7 @@ export const CurrentProxyCard = () => { value={state.selection.proxy} onChange={handleProxyChange} label={t("Proxy")} - disabled={isDirectMode} + disabled={isDirectMode || !isLiveHydration} renderValue={renderProxyValue} MenuProps={{ PaperProps: { diff --git a/src/components/proxy/provider-button.tsx b/src/components/proxy/provider-button.tsx index e22b856e0..4d0f2c397 100644 --- a/src/components/proxy/provider-button.tsx +++ b/src/components/proxy/provider-button.tsx @@ -1,6 +1,7 @@ import { RefreshRounded, StorageOutlined } from "@mui/icons-material"; import { Box, + Chip, Button, Dialog, DialogActions, @@ -18,7 +19,7 @@ import { } from "@mui/material"; import { useLockFn } from "ahooks"; import dayjs from "dayjs"; -import { useState } from "react"; +import { useMemo, useState } from "react"; import { useTranslation } from "react-i18next"; import { updateProxyProvider } from "tauri-plugin-mihomo-api"; @@ -48,29 +49,61 @@ const parseExpire = (expire?: number) => { export const ProviderButton = () => { const { t } = useTranslation(); const [open, setOpen] = useState(false); - const { proxyProviders, refreshProxy, refreshProxyProviders } = useAppData(); + const { + proxyProviders, + proxyHydration, + refreshProxy, + refreshProxyProviders, + } = useAppData(); + + const isHydrating = proxyHydration !== "live"; const [updating, setUpdating] = useState>({}); // 检查是否有提供者 const hasProviders = Object.keys(proxyProviders || {}).length > 0; + // Hydration hint badge keeps users aware of sync state + const hydrationChip = useMemo(() => { + if (proxyHydration === "live") return null; + + return ( + + ); + }, [proxyHydration, t]); + // 更新单个代理提供者 const updateProvider = useLockFn(async (name: string) => { + if (isHydrating) { + showNotice("info", t("Proxy data is syncing, please wait")); + return; + } + try { // 设置更新状态 setUpdating((prev) => ({ ...prev, [name]: true })); - await updateProxyProvider(name); - - // 刷新数据 - await refreshProxy(); await refreshProxyProviders(); - - showNotice("success", `${name} 更新成功`); + await refreshProxy(); + showNotice( + "success", + t("Provider {{name}} updated successfully", { name }), + ); } catch (err: any) { showNotice( "error", - `${name} 更新失败: ${err?.message || err.toString()}`, + t("Provider {{name}} update failed: {{message}}", { + name, + message: err?.message || err.toString(), + }), ); } finally { // 清除更新状态 @@ -80,11 +113,16 @@ export const ProviderButton = () => { // 更新所有代理提供者 const updateAllProviders = useLockFn(async () => { + if (isHydrating) { + showNotice("info", t("Proxy data is syncing, please wait")); + return; + } + try { // 获取所有provider的名称 const allProviders = Object.keys(proxyProviders || {}); if (allProviders.length === 0) { - showNotice("info", "没有可更新的代理提供者"); + showNotice("info", t("No providers to update")); return; } @@ -110,54 +148,67 @@ export const ProviderButton = () => { } } - // 刷新数据 - await refreshProxy(); await refreshProxyProviders(); - - showNotice("success", "全部代理提供者更新成功"); + await refreshProxy(); + showNotice("success", t("All providers updated successfully")); } catch (err: any) { - showNotice("error", `更新失败: ${err?.message || err.toString()}`); + showNotice( + "error", + t("Failed to update providers: {{message}}", { + message: err?.message || err.toString(), + }), + ); } finally { // 清除所有更新状态 setUpdating({}); } }); - const handleClose = () => { - setOpen(false); - }; + const handleClose = () => setOpen(false); if (!hasProviders) return null; return ( <> - + + + {hydrationChip} + {t("Proxy Provider")} - - - + @@ -166,54 +217,63 @@ export const ProviderButton = () => { {Object.entries(proxyProviders || {}) .sort() .map(([key, item]) => { - const provider = item; - const time = dayjs(provider.updatedAt); + if (!item) return null; + + const time = dayjs(item.updatedAt); const isUpdating = updating[key]; - - // 订阅信息 - const sub = provider.subscriptionInfo; - const hasSubInfo = !!sub; - const upload = sub?.Upload || 0; - const download = sub?.Download || 0; - const total = sub?.Total || 0; - const expire = sub?.Expire || 0; - - // 流量使用进度 + const sub = item.subscriptionInfo; + const hasSubInfo = Boolean(sub); + const upload = sub?.Upload ?? 0; + const download = sub?.Download ?? 0; + const total = sub?.Total ?? 0; + const expire = sub?.Expire ?? 0; const progress = total > 0 ? Math.min( - Math.round(((download + upload) * 100) / total) + 1, 100, + Math.max(0, ((upload + download) / total) * 100), ) : 0; return ( { - const bgcolor = - mode === "light" ? "#ffffff" : "#24252f"; - const hoverColor = - mode === "light" - ? alpha(primary.main, 0.1) - : alpha(primary.main, 0.2); - - return { - backgroundColor: bgcolor, - "&:hover": { - backgroundColor: hoverColor, - }, - }; - }, - ]} + secondaryAction={ + + updateProvider(key)} + disabled={isUpdating || isHydrating} + sx={{ + animation: isUpdating + ? "spin 1s linear infinite" + : "none", + "@keyframes spin": { + "0%": { transform: "rotate(0deg)" }, + "100%": { transform: "rotate(360deg)" }, + }, + }} + title={t("Update Provider") as string} + > + + + + } + sx={{ + mb: 1, + borderRadius: 1, + border: "1px solid", + borderColor: alpha("#ccc", 0.4), + backgroundColor: alpha("#fff", 0.02), + }} > { display: "flex", justifyContent: "space-between", alignItems: "center", + gap: 1, }} > { title={key} sx={{ display: "flex", alignItems: "center" }} > - {key} + {key} - {provider.proxies.length} + {item.proxies.length} - {provider.vehicleType} + {item.vehicleType} @@ -252,72 +313,39 @@ export const ProviderButton = () => { } secondary={ - <> - {/* 订阅信息 */} - {hasSubInfo && ( - <> - - - {parseTraffic(upload + download)} /{" "} - {parseTraffic(total)} - - - {parseExpire(expire)} - - + hasSubInfo ? ( + <> + + + {parseTraffic(upload + download)} /{" "} + {parseTraffic(total)} + + + {parseExpire(expire)} + + - {/* 进度条 */} - 0 ? 1 : 0, - }} - /> - - )} - + 0 ? 1 : 0, + }} + /> + + ) : null } /> - - { - updateProvider(key); - }} - disabled={isUpdating} - sx={{ - animation: isUpdating - ? "spin 1s linear infinite" - : "none", - "@keyframes spin": { - "0%": { transform: "rotate(0deg)" }, - "100%": { transform: "rotate(360deg)" }, - }, - }} - title={t("Update Provider") as string} - > - - - ); })} diff --git a/src/components/proxy/proxy-groups.tsx b/src/components/proxy/proxy-groups.tsx index eec3f2e88..1a82f2c83 100644 --- a/src/components/proxy/proxy-groups.tsx +++ b/src/components/proxy/proxy-groups.tsx @@ -61,10 +61,17 @@ export const ProxyGroups = (props: Props) => { }>({ open: false, message: "" }); const { verge } = useVerge(); - const { proxies: proxiesData } = useAppData(); + const { + proxies: proxiesData, + proxyHydration, + proxyTargetProfileId, + proxyDisplayProfileId, + isProxyRefreshPending, + } = useAppData(); const groups = proxiesData?.groups; const availableGroups = useMemo(() => groups ?? [], [groups]); - + const showHydrationOverlay = isProxyRefreshPending; + const pendingProfileSwitch = proxyTargetProfileId !== proxyDisplayProfileId; const defaultRuleGroup = useMemo(() => { if (isChainMode && mode === "rule" && availableGroups.length > 0) { return availableGroups[0].name; @@ -76,6 +83,35 @@ export const ProxyGroups = (props: Props) => { () => selectedGroup ?? defaultRuleGroup, [selectedGroup, defaultRuleGroup], ); + const hydrationChip = useMemo(() => { + if (proxyHydration === "live") return null; + + const label = + proxyHydration === "snapshot" ? t("Snapshot data") : t("Syncing..."); + + return ( + + ); + }, [proxyHydration, t]); + + const overlayMessage = useMemo(() => { + if (!showHydrationOverlay) return null; + + if (pendingProfileSwitch) { + return t("Loading proxy data for the selected profile..."); + } + + if (proxyHydration === "snapshot") { + return t("Preparing proxy snapshot..."); + } + + return t("Syncing proxy data..."); + }, [showHydrationOverlay, pendingProfileSwitch, proxyHydration, t]); const { renderList, onProxies, onHeadState } = useRenderList( mode, @@ -93,7 +129,7 @@ export const ProxyGroups = (props: Props) => { [renderList], ); - // 统代理选择 + // 系统代理选择 const { handleProxyGroupChange } = useProxySelection({ onSuccess: () => { onProxies(); @@ -306,12 +342,7 @@ export const ProxyGroups = (props: Props) => { try { await Promise.race([ delayManager.checkListDelay(names, groupName, timeout), - delayGroup(groupName, url, timeout).then((result) => { - console.log( - `[ProxyGroups] getGroupProxyDelays返回结果数量:`, - Object.keys(result || {}).length, - ); - }), // 查询group delays 将清除fixed(不关注调用结果) + delayGroup(groupName, url, timeout), ]); console.log(`[ProxyGroups] 延迟测试完成,组: ${groupName}`); } catch (error) { @@ -376,6 +407,11 @@ export const ProxyGroups = (props: Props) => { } if (isChainMode) { + const chainVirtuosoHeight = + mode === "rule" && proxyGroupNames.length > 0 + ? "calc(100% - 80px)" + : "calc(100% - 14px)"; + // 获取所有代理组 const proxyGroups = proxiesData?.groups || []; @@ -454,10 +490,7 @@ export const ProxyGroups = (props: Props) => { 0 - ? "calc(100% - 80px)" // 只有标题的高度 - : "calc(100% - 14px)", + height: chainVirtuosoHeight, }} totalCount={renderList.length} increaseViewportBy={{ top: 200, bottom: 200 }} @@ -548,7 +581,9 @@ export const ProxyGroups = (props: Props) => { {group.name} - {group.type} · {group.all.length} 节点 + {`${t("Group Type")}: ${group.type} · ${t("Proxy Count")}: ${ + Array.isArray(group.all) ? group.all.length : 0 + }`} @@ -556,7 +591,7 @@ export const ProxyGroups = (props: Props) => { {availableGroups.length === 0 && ( - 暂无可用代理组 + {t("Empty")} )} @@ -567,9 +602,29 @@ export const ProxyGroups = (props: Props) => { return (
- {/* 代理组导航栏 */} + {hydrationChip && ( + + {hydrationChip} + + )} {mode === "rule" && ( { )} /> + {showHydrationOverlay && overlayMessage && ( + + + + {overlayMessage} + + + + )}
); }; diff --git a/src/components/proxy/use-render-list.ts b/src/components/proxy/use-render-list.ts index 7a5949ae3..1e6e0fd6a 100644 --- a/src/components/proxy/use-render-list.ts +++ b/src/components/proxy/use-render-list.ts @@ -14,50 +14,13 @@ import { } from "./use-head-state"; import { useWindowWidth } from "./use-window-width"; -// 定义代理项接口 -interface IProxyItem { - name: string; - type: string; - udp: boolean; - xudp: boolean; - tfo: boolean; - mptcp: boolean; - smux: boolean; - history: { - time: string; - delay: number; - }[]; - provider?: string; - testUrl?: string; - [key: string]: any; // 添加索引签名以适应其他可能的属性 -} - -// 代理组类型 -type ProxyGroup = { - name: string; - type: string; - udp: boolean; - xudp: boolean; - tfo: boolean; - mptcp: boolean; - smux: boolean; - history: { - time: string; - delay: number; - }[]; - now: string; - all: IProxyItem[]; - hidden?: boolean; - icon?: string; - testUrl?: string; - provider?: string; -}; +type RenderGroup = IProxyGroupItem; export interface IRenderItem { // 组 | head | item | empty | item col type: 0 | 1 | 2 | 3 | 4; key: string; - group: ProxyGroup; + group: RenderGroup; proxy?: IProxyItem; col?: number; proxyCol?: IProxyItem[]; @@ -99,7 +62,7 @@ export const useRenderList = ( selectedGroup?: string | null, ) => { // 使用全局数据提供者 - const { proxies: proxiesData, refreshProxy } = useAppData(); + const { proxies: proxiesData, proxyHydration, refreshProxy } = useAppData(); const { verge } = useVerge(); const { width } = useWindowWidth(); const [headStates, setHeadState] = useHeadStateNew(); @@ -123,17 +86,29 @@ export const useRenderList = ( // 确保代理数据加载 useEffect(() => { - if (!proxiesData) return; + if (!proxiesData || proxyHydration !== "live") return; const { groups, proxies } = proxiesData; if ( (mode === "rule" && !groups.length) || (mode === "global" && proxies.length < 2) ) { - const handle = setTimeout(() => refreshProxy(), 500); + const handle = setTimeout(() => { + void refreshProxy().catch(() => {}); + }, 500); return () => clearTimeout(handle); } - }, [proxiesData, mode, refreshProxy]); + }, [proxiesData, proxyHydration, mode, refreshProxy]); + + useEffect(() => { + if (proxyHydration !== "snapshot") return; + + const handle = setTimeout(() => { + void refreshProxy().catch(() => {}); + }, 1800); + + return () => clearTimeout(handle); + }, [proxyHydration, refreshProxy]); // 链式代理模式节点自动计算延迟 useEffect(() => { @@ -147,7 +122,7 @@ export const useRenderList = ( // 设置组监听器,当有延迟更新时自动刷新 const groupListener = () => { console.log("[ChainMode] 延迟更新,刷新UI"); - refreshProxy(); + void refreshProxy().catch(() => {}); }; delayManager.setGroupListener("chain-mode", groupListener); @@ -188,9 +163,12 @@ export const useRenderList = ( // 链式代理模式下,显示代理组和其节点 if (isChainMode && runtimeConfig && mode === "rule") { // 使用正常的规则模式代理组 - const allGroups = proxiesData.groups.length - ? proxiesData.groups - : [proxiesData.global!]; + const chainGroups = proxiesData.groups ?? []; + const allGroups = chainGroups.length + ? chainGroups + : proxiesData.global + ? [proxiesData.global] + : []; // 如果选择了特定代理组,只显示该组的节点 if (selectedGroup) { @@ -282,7 +260,7 @@ export const useRenderList = ( }); // 创建一个虚拟的组来容纳所有节点 - const virtualGroup: ProxyGroup = { + const virtualGroup: RenderGroup = { name: "All Proxies", type: "Selector", udp: false, @@ -340,7 +318,7 @@ export const useRenderList = ( }); // 创建一个虚拟的组来容纳所有节点 - const virtualGroup: ProxyGroup = { + const virtualGroup: RenderGroup = { name: "All Proxies", type: "Selector", udp: false, @@ -380,12 +358,15 @@ export const useRenderList = ( // 正常模式的渲染逻辑 const useRule = mode === "rule" || mode === "script"; - const renderGroups = - useRule && proxiesData.groups.length - ? proxiesData.groups - : [proxiesData.global!]; + const renderGroups = (() => { + const groups = proxiesData.groups ?? []; + if (useRule && groups.length) { + return groups; + } + return proxiesData.global ? [proxiesData.global] : groups; + })(); - const retList = renderGroups.flatMap((group: ProxyGroup) => { + const retList = renderGroups.flatMap((group: RenderGroup) => { const headState = headStates[group.name] || DEFAULT_STATE; const ret: IRenderItem[] = [ { diff --git a/src/hooks/use-current-proxy.ts b/src/hooks/use-current-proxy.ts index 7d3523269..0c7108ffb 100644 --- a/src/hooks/use-current-proxy.ts +++ b/src/hooks/use-current-proxy.ts @@ -2,12 +2,6 @@ import { useMemo } from "react"; import { useAppData } from "@/providers/app-data-context"; -// 定义代理组类型 -interface ProxyGroup { - name: string; - now: string; -} - // 获取当前代理节点信息的自定义Hook export const useCurrentProxy = () => { // 从AppDataProvider获取数据 @@ -37,15 +31,15 @@ export const useCurrentProxy = () => { "自动选择", ]; const primaryGroup = - groups.find((group: ProxyGroup) => + groups.find((group) => primaryKeywords.some((keyword) => group.name.toLowerCase().includes(keyword.toLowerCase()), ), - ) || groups.filter((g: ProxyGroup) => g.name !== "GLOBAL")[0]; + ) || groups.find((group) => group.name !== "GLOBAL"); if (primaryGroup) { primaryGroupName = primaryGroup.name; - currentName = primaryGroup.now; + currentName = primaryGroup.now ?? currentName; } } diff --git a/src/hooks/use-profiles.ts b/src/hooks/use-profiles.ts index fdb734627..c412ec307 100644 --- a/src/hooks/use-profiles.ts +++ b/src/hooks/use-profiles.ts @@ -5,33 +5,54 @@ import { getProfiles, patchProfile, patchProfilesConfig, + calcuProxies, } from "@/services/cmds"; -import { calcuProxies } from "@/services/cmds"; +import { + useProfileStore, + selectEffectiveProfiles, + selectIsHydrating, + selectLastResult, +} from "@/stores/profile-store"; export const useProfiles = () => { + const profilesFromStore = useProfileStore(selectEffectiveProfiles); + const storeHydrating = useProfileStore(selectIsHydrating); + const lastResult = useProfileStore(selectLastResult); + const commitProfileSnapshot = useProfileStore( + (state) => state.commitHydrated, + ); + const { - data: profiles, + data: swrProfiles, mutate: mutateProfiles, error, isValidating, } = useSWR("getProfiles", getProfiles, { revalidateOnFocus: false, revalidateOnReconnect: false, - dedupingInterval: 500, // 减少去重时间,提高响应性 + dedupingInterval: 500, errorRetryCount: 3, errorRetryInterval: 1000, - refreshInterval: 0, // 完全由手动控制 - onError: (error) => { - console.error("[useProfiles] SWR错误:", error); + refreshInterval: 0, + onError: (err) => { + console.error("[useProfiles] SWR错误:", err); }, onSuccess: (data) => { + commitProfileSnapshot(data); console.log( - "[useProfiles] 配置数据更新成功,配置数量:", + "[useProfiles] 配置数据更新成功,配置数量", data?.items?.length || 0, ); }, }); + const rawProfiles = profilesFromStore ?? swrProfiles; + const profiles = (rawProfiles ?? { + current: null, + items: [], + }) as IProfilesConfig; + const hasProfiles = rawProfiles != null; + const patchProfiles = async ( value: Partial, signal?: AbortSignal, @@ -49,32 +70,30 @@ export const useProfiles = () => { await mutateProfiles(); return success; - } catch (error) { - if (error instanceof DOMException && error.name === "AbortError") { - throw error; + } catch (err) { + if (err instanceof DOMException && err.name === "AbortError") { + throw err; } await mutateProfiles(); - throw error; + throw err; } }; const patchCurrent = async (value: Partial) => { - if (profiles?.current) { - await patchProfile(profiles.current, value); - mutateProfiles(); + if (!hasProfiles || !profiles.current) { + return; } + await patchProfile(profiles.current, value); + mutateProfiles(); }; - // 根据selected的节点选择 const activateSelected = async () => { try { console.log("[ActivateSelected] 开始处理代理选择"); - const [proxiesData, profileData] = await Promise.all([ - calcuProxies(), - getProfiles(), - ]); + const proxiesData = await calcuProxies(); + const profileData = hasProfiles ? profiles : null; if (!profileData || !proxiesData) { console.log("[ActivateSelected] 代理或配置数据不可用,跳过处理"); @@ -90,7 +109,6 @@ export const useProfiles = () => { return; } - // 检查是否有saved的代理选择 const { selected = [] } = current; if (selected.length === 0) { console.log("[ActivateSelected] 当前profile无保存的代理选择,跳过"); @@ -98,7 +116,7 @@ export const useProfiles = () => { } console.log( - `[ActivateSelected] 当前profile有 ${selected.length} 个代理选择配置`, + `[ActivateSelected] 当前profile有${selected.length} 个代理选择配置`, ); const selectedMap = Object.fromEntries( @@ -115,7 +133,6 @@ export const useProfiles = () => { "LoadBalance", ]); - // 处理所有代理组 [global, ...groups].forEach((group) => { if (!group) { return; @@ -150,7 +167,7 @@ export const useProfiles = () => { if (!existsInGroup) { console.warn( - `[ActivateSelected] 保存的代理 ${savedProxy} 不存在于代理组 ${name}`, + `[ActivateSelected] 保存的代理${savedProxy} 不存在于代理组${name}`, ); hasChange = true; newSelected.push({ name, now: now ?? savedProxy }); @@ -173,7 +190,7 @@ export const useProfiles = () => { return; } - console.log(`[ActivateSelected] 完成代理切换,保存新的选择配置`); + console.log("[ActivateSelected] 完成代理切换,保存新的选择配置"); try { await patchProfile(profileData.current!, { selected: newSelected }); @@ -195,14 +212,18 @@ export const useProfiles = () => { return { profiles, - current: profiles?.items?.find((p) => p && p.uid === profiles.current), + hasProfiles, + current: hasProfiles + ? (profiles.items?.find((p) => p && p.uid === profiles.current) ?? null) + : null, activateSelected, patchProfiles, patchCurrent, mutateProfiles, - // 新增故障检测状态 - isLoading: isValidating, + isLoading: isValidating || storeHydrating, + isHydrating: storeHydrating, + lastResult, error, - isStale: !profiles && !error && !isValidating, // 检测是否处于异常状态 + isStale: !hasProfiles && !error && !isValidating, }; }; diff --git a/src/pages/_layout/useLayoutEvents.ts b/src/pages/_layout/useLayoutEvents.ts index c26084a3f..e3c9ecbc1 100644 --- a/src/pages/_layout/useLayoutEvents.ts +++ b/src/pages/_layout/useLayoutEvents.ts @@ -1,11 +1,9 @@ import { listen } from "@tauri-apps/api/event"; import { getCurrentWebviewWindow } from "@tauri-apps/api/webviewWindow"; import { useEffect } from "react"; -import { mutate } from "swr"; import { useListen } from "@/hooks/use-listen"; -import { getAxios } from "@/services/api"; - +import { refreshClashData, refreshVergeData } from "@/services/refresh"; export const useLayoutEvents = ( handleNotice: (payload: [string, string]) => void, ) => { @@ -37,32 +35,32 @@ export const useLayoutEvents = ( .catch((error) => console.error("[事件监听] 注册失败", error)); }; + register( + addListener("verge://notice-message", ({ payload }) => + handleNotice(payload as [string, string]), + ), + ); + register( addListener("verge://refresh-clash-config", async () => { - await getAxios(true); - mutate("getProxies"); - mutate("getVersion"); - mutate("getClashConfig"); - mutate("getProxyProviders"); + try { + await refreshClashData(); + } catch (error) { + console.error("[事件监听] 刷新 Clash 配置失败", error); + } }), ); register( addListener("verge://refresh-verge-config", () => { - mutate("getVergeConfig"); - mutate("getSystemProxy"); - mutate("getAutotemProxy"); - mutate("getRunningMode"); - mutate("isServiceAvailable"); + try { + refreshVergeData(); + } catch (error) { + console.error("[事件监听] 刷新 Verge 配置失败", error); + } }), ); - register( - addListener("verge://notice-message", ({ payload }) => - handleNotice(payload as [string, string]), - ), - ); - const appWindow = getCurrentWebviewWindow(); register( (async () => { diff --git a/src/pages/profiles.tsx b/src/pages/profiles.tsx index 597ec4ec6..0d29fe34f 100644 --- a/src/pages/profiles.tsx +++ b/src/pages/profiles.tsx @@ -25,16 +25,23 @@ import { } from "@mui/icons-material"; import { LoadingButton } from "@mui/lab"; import { Box, Button, Divider, Grid, IconButton, Stack } from "@mui/material"; +import { invoke } from "@tauri-apps/api/core"; import { listen, TauriEvent } from "@tauri-apps/api/event"; import { readText } from "@tauri-apps/plugin-clipboard-manager"; import { readTextFile } from "@tauri-apps/plugin-fs"; import { useLockFn } from "ahooks"; import { throttle } from "lodash-es"; -import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { + useCallback, + useEffect, + useMemo, + useReducer, + useRef, + useState, +} from "react"; import { useTranslation } from "react-i18next"; import { useLocation } from "react-router"; import useSWR, { mutate } from "swr"; -import { closeAllConnections } from "tauri-plugin-mihomo-api"; import { BasePage, DialogRef } from "@/components/base"; import { BaseStyledTextField } from "@/components/base/base-styled-text-field"; @@ -47,6 +54,7 @@ import { import { ConfigViewer } from "@/components/setting/mods/config-viewer"; import { useListen } from "@/hooks/use-listen"; import { useProfiles } from "@/hooks/use-profiles"; +import { useAppData } from "@/providers/app-data-context"; import { createProfile, deleteProfile, @@ -57,11 +65,16 @@ import { importProfile, reorderProfile, updateProfile, + switchProfileCommand, + type ProfileSwitchStatus, + type SwitchTaskStatus, } from "@/services/cmds"; import { showNotice } from "@/services/noticeService"; +import { refreshClashData } from "@/services/refresh"; import { useSetLoadingCache, useThemeMode } from "@/services/states"; +import { AsyncEventQueue, afterPaint } from "@/utils/asyncQueue"; -// 记录profile切换状态 +// Record profile switch state const debugProfileSwitch = (action: string, profile: string, extra?: any) => { const timestamp = new Date().toISOString().substring(11, 23); console.log( @@ -70,33 +83,80 @@ const debugProfileSwitch = (action: string, profile: string, extra?: any) => { ); }; -// 检查请求是否已过期 -const isRequestOutdated = ( - currentSequence: number, - requestSequenceRef: any, - profile: string, -) => { - if (currentSequence !== requestSequenceRef.current) { - debugProfileSwitch( - "REQUEST_OUTDATED", - profile, - `当前序列号: ${currentSequence}, 最新序列号: ${requestSequenceRef.current}`, - ); - return true; - } - return false; +type RustPanicPayload = { + message: string; + location: string; }; -// 检查是否被中断 -const isOperationAborted = ( - abortController: AbortController, - profile: string, -) => { - if (abortController.signal.aborted) { - debugProfileSwitch("OPERATION_ABORTED", profile); - return true; +type SwitchTaskMeta = { profileId: string; notify: boolean }; + +const collectSwitchingProfileIds = ( + status: ProfileSwitchStatus | null, +): string[] => { + if (!status) return []; + const ids = new Set(); + if (status.active) { + ids.add(status.active.profileId); + } + status.queue.forEach((task) => ids.add(task.profileId)); + return Array.from(ids); +}; + +type ManualActivatingAction = + | { type: "reset" } + | { type: "set"; value: string[] } + | { type: "add"; ids: string[] } + | { type: "remove"; id: string } + | { type: "filterAllowed"; allowed: Set }; + +const manualActivatingReducer = ( + state: string[], + action: ManualActivatingAction, +): string[] => { + switch (action.type) { + case "reset": + return state.length > 0 ? [] : state; + case "set": { + const unique = Array.from( + new Set(action.value.filter((id) => typeof id === "string" && id)), + ); + if ( + unique.length === state.length && + unique.every((id, index) => id === state[index]) + ) { + return state; + } + return unique; + } + case "add": { + const incoming = action.ids.filter((id) => typeof id === "string" && id); + if (incoming.length === 0) { + return state; + } + const next = new Set(state); + let changed = false; + incoming.forEach((id) => { + const before = next.size; + next.add(id); + if (next.size !== before) { + changed = true; + } + }); + return changed ? Array.from(next) : state; + } + case "remove": { + if (!state.includes(action.id)) { + return state; + } + return state.filter((id) => id !== action.id); + } + case "filterAllowed": { + const next = state.filter((id) => action.allowed.has(id)); + return next.length === state.length ? state : next; + } + default: + return state; } - return false; }; const normalizeProfileUrl = (value?: string) => { @@ -117,7 +177,7 @@ const normalizeProfileUrl = (value?: string) => { } catch { const schemeNormalized = trimmed.replace( /^([a-z]+):\/\//i, - (match, scheme: string) => `${scheme.toLowerCase()}://`, + (_match, scheme: string) => `${scheme.toLowerCase()}://`, ); return schemeNormalized.replace(/\/+$/, ""); } @@ -159,7 +219,7 @@ const createImportLandingVerifier = ( if (currentCount > baselineCount) { console.log( - `[导入验证] 配置数量已增加: ${baselineCount} -> ${currentCount}`, + `[Import Verify] Configuration count increased: ${baselineCount} -> ${currentCount}`, ); return true; } @@ -177,7 +237,9 @@ const createImportLandingVerifier = ( } if (!hadBaselineProfile) { - console.log("[导入验证] 检测到新的订阅记录,判定为导入成功"); + console.log( + "[Import Verify] Detected new profile record; treating as success", + ); return true; } @@ -186,13 +248,15 @@ const createImportLandingVerifier = ( if (currentUpdated > baselineUpdated) { console.log( - `[导入验证] 订阅更新时间已更新 ${baselineUpdated} -> ${currentUpdated}`, + `[Import Verify] Profile timestamp updated ${baselineUpdated} -> ${currentUpdated}`, ); return true; } if (currentSignature !== baselineSignature) { - console.log("[导入验证] 订阅详情发生变化,判定为导入成功"); + console.log( + "[Import Verify] Profile details changed; treating as success", + ); return true; } @@ -205,14 +269,110 @@ const createImportLandingVerifier = ( }; }; +const isDev = import.meta.env.DEV; + const ProfilePage = () => { + // Serialize profile switch events so state transitions stay deterministic. + const switchEventQueue = useMemo(() => new AsyncEventQueue(), []); + // Stage follow-up effects (hydration, refresh) to run sequentially after switch completion. + const postSwitchEffectQueue = useMemo(() => new AsyncEventQueue(), []); + const mountedRef = useRef(false); + const { t } = useTranslation(); const location = useLocation(); + const logToBackend = useCallback( + ( + level: "debug" | "info" | "warn" | "error", + message: string, + context?: Record, + ) => { + const payload: Record = { + level, + message, + }; + if (context !== undefined) { + payload.context = context; + } + invoke("frontend_log", { payload }).catch(() => {}); + }, + [], + ); const { addListener } = useListen(); + const { switchStatus } = useAppData(); const [url, setUrl] = useState(""); const [disabled, setDisabled] = useState(false); - const [activatings, setActivatings] = useState([]); + const [manualActivatings, dispatchManualActivatings] = useReducer( + manualActivatingReducer, + [], + ); + const taskMetaRef = useRef>(new Map()); + const lastResultAtRef = useRef(0); + const initialLastResultSyncRef = useRef(true); + + useEffect(() => { + mountedRef.current = true; + return () => { + mountedRef.current = false; + switchEventQueue.clear(); + postSwitchEffectQueue.clear(); + if (isDev) { + console.debug("[ProfileSwitch] component unmounted, queues cleared"); + } + }; + }, [postSwitchEffectQueue, switchEventQueue]); + useEffect(() => { + const handleError = (event: ErrorEvent) => { + logToBackend("error", "[ProfileSwitch] window error captured", { + message: event.message, + filename: event.filename, + lineno: event.lineno, + colno: event.colno, + stack: event.error?.stack, + }); + console.error( + "[ProfileSwitch] window error captured", + event.message, + event.error, + ); + }; + const handleRejection = (event: PromiseRejectionEvent) => { + let reasonSummary: string; + if (typeof event.reason === "object") { + try { + reasonSummary = JSON.stringify(event.reason); + } catch (error) { + reasonSummary = `[unserializable reason: ${String(error)}]`; + } + } else { + reasonSummary = String(event.reason); + } + logToBackend("error", "[ProfileSwitch] unhandled rejection captured", { + reason: reasonSummary, + }); + console.error( + "[ProfileSwitch] unhandled rejection captured", + event.reason, + ); + }; + window.addEventListener("error", handleError); + window.addEventListener("unhandledrejection", handleRejection); + return () => { + window.removeEventListener("error", handleError); + window.removeEventListener("unhandledrejection", handleRejection); + }; + }, [logToBackend]); const [loading, setLoading] = useState(false); + const postSwitchGenerationRef = useRef(0); + const switchingProfileId = switchStatus?.active?.profileId ?? null; + const switchActivatingIds = useMemo( + () => collectSwitchingProfileIds(switchStatus ?? null), + [switchStatus], + ); + const activatings = useMemo(() => { + const merged = new Set(manualActivatings); + switchActivatingIds.forEach((id) => merged.add(id)); + return Array.from(merged); + }, [manualActivatings, switchActivatingIds]); // Batch selection states const [batchMode, setBatchMode] = useState(false); @@ -220,57 +380,6 @@ const ProfilePage = () => { () => new Set(), ); - // 防止重复切换 - const switchingProfileRef = useRef(null); - - // 支持中断当前切换操作 - const abortControllerRef = useRef(null); - - // 只处理最新的切换请求 - const requestSequenceRef = useRef(0); - - // 待处理请求跟踪,取消排队的请求 - const pendingRequestRef = useRef | null>(null); - - // 处理profile切换中断 - const handleProfileInterrupt = useCallback( - (previousSwitching: string, newProfile: string) => { - debugProfileSwitch( - "INTERRUPT_PREVIOUS", - previousSwitching, - `被 ${newProfile} 中断`, - ); - - if (abortControllerRef.current) { - abortControllerRef.current.abort(); - debugProfileSwitch("ABORT_CONTROLLER_TRIGGERED", previousSwitching); - } - - if (pendingRequestRef.current) { - debugProfileSwitch("CANCEL_PENDING_REQUEST", previousSwitching); - } - - setActivatings((prev) => prev.filter((id) => id !== previousSwitching)); - showNotice( - "info", - `${t("Profile switch interrupted by new selection")}: ${previousSwitching} → ${newProfile}`, - 3000, - ); - }, - [t], - ); - - // 清理切换状态 - const cleanupSwitchState = useCallback( - (profile: string, sequence: number) => { - setActivatings((prev) => prev.filter((id) => id !== profile)); - switchingProfileRef.current = null; - abortControllerRef.current = null; - pendingRequestRef.current = null; - debugProfileSwitch("SWITCH_END", profile, `序列号: ${sequence}`); - }, - [], - ); const sensors = useSensors( useSensor(PointerSensor), useSensor(KeyboardSensor, { @@ -282,11 +391,32 @@ const ProfilePage = () => { const { profiles = {}, activateSelected, - patchProfiles, mutateProfiles, error, isStale, } = useProfiles(); + const activateSelectedRef = useRef(activateSelected); + const mutateProfilesRef = useRef(mutateProfiles); + const profileMutateScheduledRef = useRef(false); + const mutateLogsRef = useRef<(() => Promise | void) | null>(null); + const tRef = useRef(t); + const showNoticeRef = useRef(showNotice); + const refreshClashDataRef = useRef(refreshClashData); + + useEffect(() => { + activateSelectedRef.current = activateSelected; + }, [activateSelected]); + + useEffect(() => { + mutateProfilesRef.current = mutateProfiles; + }, [mutateProfiles]); + + useEffect(() => { + tRef.current = t; + }, [t]); + + showNoticeRef.current = showNotice; + refreshClashDataRef.current = refreshClashData; useEffect(() => { const handleFileDrop = async () => { @@ -327,28 +457,28 @@ const ProfilePage = () => { }; }, [addListener, mutateProfiles, t]); - // 添加紧急恢复功能 + // Add emergency recovery capability const onEmergencyRefresh = useLockFn(async () => { - console.log("[紧急刷新] 开始强制刷新所有数据"); + console.log("[Emergency Refresh] Starting forced refresh of all data"); try { - // 清除所有SWR缓存 + // Clear all SWR caches await mutate(() => true, undefined, { revalidate: false }); - // 强制重新获取配置数据 + // Force fetching profile data await mutateProfiles(undefined, { revalidate: true, rollbackOnError: false, }); - // 等待状态稳定后增强配置 + // Wait for state to stabilize before enhancing the profile await new Promise((resolve) => setTimeout(resolve, 500)); await onEnhance(false); - showNotice("success", "数据已强制刷新", 2000); + showNotice("success", "Data forcibly refreshed", 2000); } catch (error: any) { - console.error("[紧急刷新] 失败:", error); - showNotice("error", `紧急刷新失败: ${error.message}`, 4000); + console.error("[Emergency Refresh] Failed:", error); + showNotice("error", `Emergency refresh failed: ${error.message}`, 4000); } }); @@ -356,6 +486,156 @@ const ProfilePage = () => { "getRuntimeLogs", getRuntimeLogs, ); + useEffect(() => { + mutateLogsRef.current = mutateLogs; + }, [mutateLogs]); + + useEffect(() => { + activateSelectedRef.current = activateSelected; + }, [activateSelected]); + + useEffect(() => { + mutateProfilesRef.current = mutateProfiles; + }, [mutateProfiles]); + + const scheduleProfileMutate = useCallback(() => { + if (profileMutateScheduledRef.current) return; + if (!mountedRef.current) return; + profileMutateScheduledRef.current = true; + requestAnimationFrame(() => { + profileMutateScheduledRef.current = false; + const mutateProfilesFn = mutateProfilesRef.current; + if (mutateProfilesFn) { + void mutateProfilesFn(); + if (isDev) { + console.debug( + "[ProfileSwitch] mutateProfiles executed from schedule", + ); + } + } + }); + }, []); + + useEffect(() => { + if (!switchStatus) { + taskMetaRef.current.clear(); + dispatchManualActivatings({ type: "reset" }); + return; + } + + const trackedProfiles = new Set(); + const registerTask = (task: SwitchTaskStatus | null | undefined) => { + if (!task) return; + taskMetaRef.current.set(task.taskId, { + profileId: task.profileId, + notify: task.notify, + }); + trackedProfiles.add(task.profileId); + }; + + registerTask(switchStatus.active ?? null); + switchStatus.queue.forEach((task) => registerTask(task)); + + dispatchManualActivatings({ + type: "filterAllowed", + allowed: trackedProfiles, + }); + + const lastResult = switchStatus.lastResult ?? null; + if (initialLastResultSyncRef.current) { + initialLastResultSyncRef.current = false; + if (lastResult) { + lastResultAtRef.current = lastResult.finishedAt; + } + } + + if (lastResult && lastResult.finishedAt !== lastResultAtRef.current) { + lastResultAtRef.current = lastResult.finishedAt; + const { profileId, success, finishedAt, errorDetail, cancelled } = + lastResult; + const isCancelled = Boolean(cancelled); + const meta = taskMetaRef.current.get(lastResult.taskId); + const notifySuccess = meta?.notify ?? true; + taskMetaRef.current.delete(lastResult.taskId); + + debugProfileSwitch("STATUS_RESULT", profileId, { + success, + finishedAt, + notifySuccess, + cancelled: isCancelled, + }); + + switchEventQueue.enqueue(() => { + if (!mountedRef.current) return; + + dispatchManualActivatings({ type: "remove", id: profileId }); + + const eventGeneration = postSwitchGenerationRef.current; + + postSwitchEffectQueue.enqueue(async () => { + if (!mountedRef.current) return; + if (postSwitchGenerationRef.current !== eventGeneration) { + return; + } + + logToBackend( + success || isCancelled ? "info" : "warn", + "[ProfileSwitch] status result received", + { + profileId, + success, + cancelled: isCancelled, + finishedAt, + }, + ); + + scheduleProfileMutate(); + + if (success) { + if (notifySuccess) { + await afterPaint(); + showNoticeRef.current?.( + "success", + tRef.current("Profile Switched"), + 1000, + ); + } + + const operations: Promise[] = []; + const mutateLogs = mutateLogsRef.current; + if (mutateLogs) { + operations.push(Promise.resolve(mutateLogs())); + } + const activateSelected = activateSelectedRef.current; + if (activateSelected) { + operations.push(Promise.resolve(activateSelected())); + } + const refreshFn = refreshClashDataRef.current; + if (refreshFn) { + operations.push(Promise.resolve(refreshFn())); + } + + if (operations.length > 0) { + void Promise.resolve().then(() => Promise.allSettled(operations)); + } + } else if (!isCancelled) { + await afterPaint(); + showNoticeRef.current?.( + "error", + errorDetail ?? tRef.current("Profile switch failed"), + ); + } + }); + }); + } + }, [ + dispatchManualActivatings, + logToBackend, + postSwitchEffectQueue, + scheduleProfileMutate, + switchEventQueue, + switchStatus, + ]); const viewerRef = useRef(null); const configRef = useRef(null); @@ -375,7 +655,7 @@ const ProfilePage = () => { const onImport = async () => { if (!url) return; - // 校验url是否为http/https + // Validate that the URL uses http/https if (!/^https?:\/\//i.test(url)) { showNotice("error", t("Invalid Profile URL")); return; @@ -405,7 +685,10 @@ const ProfilePage = () => { ); } } catch (verifyErr) { - console.warn("[导入验证] 获取配置状态失败:", verifyErr); + console.warn( + "[Import Verify] Failed to fetch profile state:", + verifyErr, + ); break; } } @@ -414,33 +697,33 @@ const ProfilePage = () => { }; try { - // 尝试正常导入 + // Attempt standard import await importProfile(url); await handleImportSuccess("Profile Imported Successfully"); return; } catch (initialErr) { - console.warn("[订阅导入] 首次导入失败:", initialErr); + console.warn("[Profile Import] Initial import failed:", initialErr); const alreadyImported = await waitForImportLanding(); if (alreadyImported) { console.warn( - "[订阅导入] 接口返回失败,但检测到订阅已导入,跳过回退导入流程", + "[Profile Import] API reported failure, but profile already imported; skipping rollback", ); await handleImportSuccess("Profile Imported Successfully"); return; } - // 首次导入失败且未检测到数据变更,尝试使用自身代理 + // Initial import failed without data change; try built-in proxy showNotice("info", t("Import failed, retrying with Clash proxy...")); try { - // 使用自身代理尝试导入 + // Attempt import using built-in proxy await importProfile(url, { with_proxy: false, self_proxy: true, }); await handleImportSuccess("Profile Imported with Clash proxy"); } catch (retryErr: any) { - // 回退导入也失败 + // Rollback import also failed const retryErrmsg = retryErr?.message || retryErr.toString(); showNotice( "error", @@ -453,7 +736,9 @@ const ProfilePage = () => { } }; - // 强化的刷新策略 + const currentProfileId = profiles.current ?? null; + + // Enhanced refresh strategy const performRobustRefresh = async ( importVerifier: ImportLandingVerifier, ) => { @@ -464,43 +749,50 @@ const ProfilePage = () => { while (retryCount < maxRetries) { try { - console.log(`[导入刷新] 第${retryCount + 1}次尝试刷新配置数据`); + console.log( + `[Import Refresh] Attempt ${retryCount + 1} to refresh profile data`, + ); - // 强制刷新,绕过所有缓存 + // Force refresh and bypass caches await mutateProfiles(undefined, { revalidate: true, rollbackOnError: false, }); - // 等待状态稳定 + // Wait for state to stabilize await new Promise((resolve) => setTimeout(resolve, baseDelay * (retryCount + 1)), ); - // 验证刷新是否成功 + // Verify whether refresh succeeded const currentProfiles = await getProfiles(); const currentCount = currentProfiles?.items?.length || 0; if (currentCount > baselineCount) { console.log( - `[导入刷新] 配置刷新成功,配置数量 ${baselineCount} -> ${currentCount}`, + `[Import Refresh] Profile refresh succeeded; count ${baselineCount} -> ${currentCount}`, ); await onEnhance(false); return; } if (hasLanding(currentProfiles)) { - console.log("[导入刷新] 检测到订阅内容更新,判定刷新成功"); + console.log( + "[Import Refresh] Detected profile update; treating as success", + ); await onEnhance(false); return; } console.warn( - `[导入刷新] 配置数量未增加 (${currentCount}), 继续重试...`, + `[Import Refresh] Profile count unchanged (${currentCount}), retrying...`, ); retryCount++; } catch (error) { - console.error(`[导入刷新] 第${retryCount + 1}次刷新失败:`, error); + console.error( + `[Import Refresh] Attempt ${retryCount + 1} failed:`, + error, + ); retryCount++; await new Promise((resolve) => setTimeout(resolve, baseDelay * retryCount), @@ -508,10 +800,12 @@ const ProfilePage = () => { } } - // 所有重试失败后的最后尝试 - console.warn(`[导入刷新] 常规刷新失败,尝试清除缓存重新获取`); + // Final attempt after all retries fail + console.warn( + `[Import Refresh] Regular refresh failed; clearing cache and retrying`, + ); try { - // 清除SWR缓存并重新获取 + // Clear SWR cache and refetch await mutate("getProfiles", getProfiles(), { revalidate: true }); await onEnhance(false); showNotice( @@ -520,7 +814,10 @@ const ProfilePage = () => { 3000, ); } catch (finalError) { - console.error(`[导入刷新] 最终刷新尝试失败:`, finalError); + console.error( + `[Import Refresh] Final refresh attempt failed:`, + finalError, + ); showNotice( "error", t("Profile imported successfully, please restart if not visible"), @@ -531,209 +828,108 @@ const ProfilePage = () => { const onDragEnd = async (event: DragEndEvent) => { const { active, over } = event; - if (over) { - if (active.id !== over.id) { - await reorderProfile(active.id.toString(), over.id.toString()); - mutateProfiles(); - } + if (over && active.id !== over.id) { + await reorderProfile(active.id.toString(), over.id.toString()); + mutateProfiles(); } }; - const executeBackgroundTasks = useCallback( - async ( - profile: string, - sequence: number, - abortController: AbortController, - ) => { - try { - if ( - sequence === requestSequenceRef.current && - switchingProfileRef.current === profile && - !abortController.signal.aborted - ) { - await activateSelected(); - console.log(`[Profile] 后台处理完成,序列号: ${sequence}`); - } else { - debugProfileSwitch( - "BACKGROUND_TASK_SKIPPED", - profile, - `序列号过期或被中断: ${sequence} vs ${requestSequenceRef.current}`, - ); - } - } catch (err: any) { - console.warn("Failed to activate selected proxies:", err); - } - }, - [activateSelected], - ); + const requestSwitch = useCallback( + (targetProfile: string, notifySuccess: boolean) => { + const nextGeneration = postSwitchGenerationRef.current + 1; + postSwitchGenerationRef.current = nextGeneration; + postSwitchEffectQueue.clear(); - const activateProfile = useCallback( - async (profile: string, notifySuccess: boolean) => { - if (profiles.current === profile && !notifySuccess) { - console.log( - `[Profile] 目标profile ${profile} 已经是当前配置,跳过切换`, - ); - return; - } - - const currentSequence = ++requestSequenceRef.current; - debugProfileSwitch("NEW_REQUEST", profile, `序列号: ${currentSequence}`); - - // 处理中断逻辑 - const previousSwitching = switchingProfileRef.current; - if (previousSwitching && previousSwitching !== profile) { - handleProfileInterrupt(previousSwitching, profile); - } - - // 防止重复切换同一个profile - if (switchingProfileRef.current === profile) { - debugProfileSwitch("DUPLICATE_SWITCH_BLOCKED", profile); - return; - } - - // 初始化切换状态 - switchingProfileRef.current = profile; - debugProfileSwitch("SWITCH_START", profile, `序列号: ${currentSequence}`); - - const currentAbortController = new AbortController(); - abortControllerRef.current = currentAbortController; - - setActivatings((prev) => { - if (prev.includes(profile)) return prev; - return [...prev, profile]; + debugProfileSwitch("REQUEST_SWITCH", targetProfile, { + notifySuccess, + generation: nextGeneration, }); - try { - console.log( - `[Profile] 开始切换到: ${profile},序列号: ${currentSequence}`, - ); + logToBackend("info", "[ProfileSwitch] request switch", { + targetProfile, + notifySuccess, + generation: nextGeneration, + }); - // 检查请求有效性 - if ( - isRequestOutdated(currentSequence, requestSequenceRef, profile) || - isOperationAborted(currentAbortController, profile) - ) { - return; - } + dispatchManualActivatings({ type: "add", ids: [targetProfile] }); - // 执行切换请求 - const requestPromise = patchProfiles( - { current: profile }, - currentAbortController.signal, - ); - pendingRequestRef.current = requestPromise; - - const success = await requestPromise; - - if (pendingRequestRef.current === requestPromise) { - pendingRequestRef.current = null; - } - - // 再次检查有效性 - if ( - isRequestOutdated(currentSequence, requestSequenceRef, profile) || - isOperationAborted(currentAbortController, profile) - ) { - return; - } - - // 完成切换 - await mutateLogs(); - closeAllConnections(); - - if (notifySuccess && success) { - showNotice("success", t("Profile Switched"), 1000); - } - - console.log( - `[Profile] 切换到 ${profile} 完成,序列号: ${currentSequence},开始后台处理`, - ); - - // 延迟执行后台任务 - setTimeout( - () => - executeBackgroundTasks( - profile, - currentSequence, - currentAbortController, - ), - 50, - ); - } catch (err: any) { - if (pendingRequestRef.current) { - pendingRequestRef.current = null; - } - - // 检查是否因为中断或过期而出错 - if ( - isOperationAborted(currentAbortController, profile) || - isRequestOutdated(currentSequence, requestSequenceRef, profile) - ) { - return; - } - - console.error(`[Profile] 切换失败:`, err); - showNotice("error", err?.message || err.toString(), 4000); - } finally { - // 只有当前profile仍然是正在切换的profile且序列号匹配时才清理状态 - if ( - switchingProfileRef.current === profile && - currentSequence === requestSequenceRef.current - ) { - cleanupSwitchState(profile, currentSequence); - } else { - debugProfileSwitch( - "CLEANUP_SKIPPED", - profile, - `序列号不匹配或已被接管: ${currentSequence} vs ${requestSequenceRef.current}`, + void (async () => { + try { + const accepted = await switchProfileCommand( + targetProfile, + notifySuccess, ); + if (!accepted) { + throw new Error(tRef.current("Profile switch failed")); + } + } catch (error: any) { + const message = + error?.message || error?.toString?.() || String(error); + logToBackend("error", "[ProfileSwitch] switch command failed", { + profileId: targetProfile, + message, + }); + dispatchManualActivatings({ type: "remove", id: targetProfile }); + scheduleProfileMutate(); + await afterPaint(); + showNoticeRef.current?.("error", message); } - } + })(); }, [ - profiles, - patchProfiles, - mutateLogs, - t, - executeBackgroundTasks, - handleProfileInterrupt, - cleanupSwitchState, + dispatchManualActivatings, + logToBackend, + postSwitchEffectQueue, + scheduleProfileMutate, ], ); - const onSelect = async (current: string, force: boolean) => { - // 阻止重复点击或已激活的profile - if (switchingProfileRef.current === current) { - debugProfileSwitch("DUPLICATE_CLICK_IGNORED", current); - return; - } - if (!force && current === profiles.current) { - debugProfileSwitch("ALREADY_CURRENT_IGNORED", current); - return; - } - - await activateProfile(current, true); - }; + const onSelect = useCallback( + (targetProfile: string, force: boolean) => { + if (!force && targetProfile === currentProfileId) { + debugProfileSwitch("ALREADY_CURRENT_IGNORED", targetProfile); + return; + } + requestSwitch(targetProfile, true); + }, + [currentProfileId, requestSwitch], + ); useEffect(() => { - (async () => { - if (current) { - mutateProfiles(); - await activateProfile(current, false); - } - })(); - }, [current, activateProfile, mutateProfiles]); + if (!current) return; + if (current === currentProfileId) return; + if (switchActivatingIds.includes(current)) return; + requestSwitch(current, false); + }, [current, currentProfileId, requestSwitch, switchActivatingIds]); + + useEffect(() => { + let mounted = true; + const panicListener = listen("rust-panic", (event) => { + if (!mounted) return; + const payload = event.payload; + if (!payload) return; + showNotice( + "error", + `Rust panic: ${payload.message} @ ${payload.location}`, + ); + console.error("Rust panic reported from backend:", payload); + }); + return () => { + mounted = false; + panicListener.then((unlisten) => unlisten()).catch(() => {}); + }; + }, [t]); const onEnhance = useLockFn(async (notifySuccess: boolean) => { - if (switchingProfileRef.current) { + if (switchingProfileId) { console.log( - `[Profile] 有profile正在切换中(${switchingProfileRef.current}),跳过enhance操作`, + `[Profile] A profile is currently switching (${switchingProfileId}); skipping enhance operation`, ); return; } const currentProfiles = currentActivatings(); - setActivatings((prev) => [...new Set([...prev, ...currentProfiles])]); + dispatchManualActivatings({ type: "add", ids: currentProfiles }); try { await enhanceProfiles(); @@ -744,17 +940,17 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err.message || err.toString(), 3000); } finally { - // 保留正在切换的profile,清除其他状态 - setActivatings((prev) => - prev.filter((id) => id === switchingProfileRef.current), - ); + dispatchManualActivatings({ type: "reset" }); } }); const onDelete = useLockFn(async (uid: string) => { const current = profiles.current === uid; try { - setActivatings([...(current ? currentActivatings() : []), uid]); + dispatchManualActivatings({ + type: "set", + value: [...new Set([...(current ? currentActivatings() : []), uid])], + }); await deleteProfile(uid); mutateProfiles(); mutateLogs(); @@ -764,11 +960,11 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err?.message || err.toString()); } finally { - setActivatings([]); + dispatchManualActivatings({ type: "reset" }); } }); - // 更新所有订阅 + // Update all profiles const setLoadingCache = useSetLoadingCache(); const onUpdateAll = useLockFn(async () => { const throttleMutate = throttle(mutateProfiles, 2000, { @@ -779,7 +975,7 @@ const ProfilePage = () => { await updateProfile(uid); throttleMutate(); } catch (err: any) { - console.error(`更新订阅 ${uid} 失败:`, err); + console.error(`Failed to update profile ${uid}:`, err); } finally { setLoadingCache((cache) => ({ ...cache, [uid]: false })); } @@ -787,7 +983,7 @@ const ProfilePage = () => { return new Promise((resolve) => { setLoadingCache((cache) => { - // 获取没有正在更新的订阅 + // Gather profiles that are not updating const items = profileItems.filter( (e) => e.type === "remote" && !cache[e.uid], ); @@ -841,11 +1037,11 @@ const ProfilePage = () => { const getSelectionState = () => { if (selectedProfiles.size === 0) { - return "none"; // 无选择 + return "none"; // no selection } else if (selectedProfiles.size === profileItems.length) { - return "all"; // 全选 + return "all"; // all selected } else { - return "partial"; // 部分选择 + return "partial"; // partially selected } }; @@ -859,7 +1055,7 @@ const ProfilePage = () => { ? [profiles.current] : []; - setActivatings((prev) => [...new Set([...prev, ...currentActivating])]); + dispatchManualActivatings({ type: "add", ids: currentActivating }); // Delete all selected profiles for (const uid of selectedProfiles) { @@ -882,17 +1078,17 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err?.message || err.toString()); } finally { - setActivatings([]); + dispatchManualActivatings({ type: "reset" }); } }); const mode = useThemeMode(); - const islight = mode === "light" ? true : false; + const islight = mode === "light"; const dividercolor = islight ? "rgba(0, 0, 0, 0.06)" : "rgba(255, 255, 255, 0.06)"; - // 监听后端配置变更 + // Observe configuration changes from backend useEffect(() => { let unlistenPromise: Promise<() => void> | undefined; let lastProfileId: string | null = null; @@ -906,29 +1102,29 @@ const ProfilePage = () => { const newProfileId = event.payload; const now = Date.now(); - console.log(`[Profile] 收到配置变更事件: ${newProfileId}`); + console.log(`[Profile] Received profile-change event: ${newProfileId}`); if ( lastProfileId === newProfileId && now - lastUpdateTime < debounceDelay ) { - console.log(`[Profile] 重复事件被防抖,跳过`); + console.log(`[Profile] Duplicate event throttled; skipping`); return; } lastProfileId = newProfileId; lastUpdateTime = now; - console.log(`[Profile] 执行配置数据刷新`); + console.log(`[Profile] Performing profile data refresh`); if (refreshTimer !== null) { window.clearTimeout(refreshTimer); } - // 使用异步调度避免阻塞事件处理 + // Use async scheduling to avoid blocking event handling refreshTimer = window.setTimeout(() => { mutateProfiles().catch((error) => { - console.error("[Profile] 配置数据刷新失败:", error); + console.error("[Profile] Profile data refresh failed:", error); }); refreshTimer = null; }, 0); @@ -945,16 +1141,6 @@ const ProfilePage = () => { }; }, [mutateProfiles]); - // 组件卸载时清理中断控制器 - useEffect(() => { - return () => { - if (abortControllerRef.current) { - abortControllerRef.current.abort(); - debugProfileSwitch("COMPONENT_UNMOUNT_CLEANUP", "all"); - } - }; - }, []); - return ( { - {/* 故障检测和紧急恢复按钮 */} + {/* Fault detection and emergency recovery button */} {(error || isStale) && ( { ref={viewerRef} onChange={async (isActivating) => { mutateProfiles(); - // 只有更改当前激活的配置时才触发全局重新加载 + // Only trigger global reload when the active profile changes if (isActivating) { await onEnhance(false); } diff --git a/src/providers/app-data-context.ts b/src/providers/app-data-context.ts index 7b7244aba..6bf02313f 100644 --- a/src/providers/app-data-context.ts +++ b/src/providers/app-data-context.ts @@ -6,8 +6,15 @@ import { RuleProvider, } from "tauri-plugin-mihomo-api"; +import { ProxiesView, type ProfileSwitchStatus } from "@/services/cmds"; + export interface AppDataContextType { - proxies: any; + proxies: ProxiesView | null; + proxyHydration: "none" | "snapshot" | "live"; + proxyTargetProfileId: string | null; + proxyDisplayProfileId: string | null; + isProxyRefreshPending: boolean; + switchStatus: ProfileSwitchStatus | null; clashConfig: BaseConfig; rules: Rule[]; sysproxy: any; diff --git a/src/providers/app-data-provider.tsx b/src/providers/app-data-provider.tsx index c71528c40..9c97c61f3 100644 --- a/src/providers/app-data-provider.tsx +++ b/src/providers/app-data-provider.tsx @@ -1,6 +1,6 @@ import { listen } from "@tauri-apps/api/event"; -import React, { useCallback, useEffect, useMemo } from "react"; -import useSWR from "swr"; +import React, { useCallback, useEffect, useMemo, useRef } from "react"; +import useSWR, { mutate as globalMutate } from "swr"; import { getBaseConfig, getRuleProviders, @@ -9,31 +9,53 @@ import { import { useVerge } from "@/hooks/use-verge"; import { - calcuProxies, calcuProxyProviders, getAppUptime, + getProfileSwitchStatus, + getProfileSwitchEvents, + getProfiles as fetchProfilesConfig, getRunningMode, + readProfileFile, getSystemProxy, + type ProxiesView, + type ProfileSwitchStatus, + type SwitchResultStatus, } from "@/services/cmds"; -import { SWR_DEFAULTS, SWR_REALTIME, SWR_SLOW_POLL } from "@/services/config"; +import { SWR_DEFAULTS, SWR_SLOW_POLL } from "@/services/config"; +import { useProfileStore } from "@/stores/profile-store"; +import { + applyLiveProxyPayload, + fetchLiveProxies, + type ProxiesUpdatedPayload, + useProxyStore, +} from "@/stores/proxy-store"; +import { createProxySnapshotFromProfile } from "@/utils/proxy-snapshot"; import { AppDataContext, AppDataContextType } from "./app-data-context"; -// 全局数据提供者组件 +// Global app data provider export const AppDataProvider = ({ children, }: { children: React.ReactNode; }) => { const { verge } = useVerge(); - - const { data: proxiesData, mutate: refreshProxy } = useSWR( - "getProxies", - calcuProxies, - { - ...SWR_REALTIME, - onError: (err) => console.warn("[DataProvider] Proxy fetch failed:", err), - }, + const applyProfileSwitchResult = useProfileStore( + (state) => state.applySwitchResult, + ); + const commitProfileSnapshot = useProfileStore( + (state) => state.commitHydrated, + ); + const setSwitchEventSeq = useProfileStore((state) => state.setLastEventSeq); + const proxyView = useProxyStore((state) => state.data); + const proxyHydration = useProxyStore((state) => state.hydration); + const proxyProfileId = useProxyStore((state) => state.lastProfileId); + const pendingProxyProfileId = useProxyStore( + (state) => state.pendingProfileId, + ); + const setProxySnapshot = useProxyStore((state) => state.setSnapshot); + const clearPendingProxyProfile = useProxyStore( + (state) => state.clearPendingProfile, ); const { data: clashConfig, mutate: refreshClashConfig } = useSWR( @@ -60,25 +82,259 @@ export const AppDataProvider = ({ SWR_DEFAULTS, ); - useEffect(() => { - let lastProfileId: string | null = null; - let lastUpdateTime = 0; - const refreshThrottle = 800; + const { data: switchStatus, mutate: mutateSwitchStatus } = + useSWR( + "getProfileSwitchStatus", + getProfileSwitchStatus, + { + refreshInterval: (status) => + status && (status.isSwitching || (status.queue?.length ?? 0) > 0) + ? 400 + : 4000, + dedupingInterval: 200, + }, + ); - let isUnmounted = false; - const scheduledTimeouts = new Set(); + const isUnmountedRef = useRef(false); + // Keep track of pending timers so we can cancel them on unmount and avoid stray updates. + const scheduledTimeoutsRef = useRef>(new Set()); + // Shared metadata to dedupe switch events coming from both polling and subscriptions. + const switchMetaRef = useRef<{ + pendingProfileId: string | null; + lastResultTaskId: number | null; + }>({ + pendingProfileId: null, + lastResultTaskId: null, + }); + const switchEventSeqRef = useRef(0); + const profileChangeMetaRef = useRef<{ + lastProfileId: string | null; + lastEventTs: number; + }>({ + lastProfileId: null, + lastEventTs: 0, + }); + const lastClashRefreshAtRef = useRef(0); + const PROFILE_EVENT_DEDUP_MS = 400; + const CLASH_REFRESH_DEDUP_MS = 300; + + // Thin wrapper around setTimeout that no-ops once the provider unmounts. + const scheduleTimeout = useCallback( + (callback: () => void | Promise, delay: number) => { + if (isUnmountedRef.current) return -1; + + const timeoutId = window.setTimeout(() => { + scheduledTimeoutsRef.current.delete(timeoutId); + if (!isUnmountedRef.current) { + void callback(); + } + }, delay); + + scheduledTimeoutsRef.current.add(timeoutId); + return timeoutId; + }, + [], + ); + + const clearAllTimeouts = useCallback(() => { + scheduledTimeoutsRef.current.forEach((timeoutId) => + clearTimeout(timeoutId), + ); + scheduledTimeoutsRef.current.clear(); + }, []); + + // Delay live proxy refreshes slightly so we don't hammer Mihomo while a switch is still applying. + const queueProxyRefresh = useCallback( + (reason: string, delay = 1500) => { + scheduleTimeout(() => { + fetchLiveProxies().catch((error) => + console.warn( + `[DataProvider] Proxy refresh failed (${reason}, fallback):`, + error, + ), + ); + }, delay); + }, + [scheduleTimeout], + ); + // Prime the proxy store with the static selections from the profile YAML before live data arrives. + const seedProxySnapshot = useCallback( + async (profileId: string) => { + if (!profileId) return; + + try { + const yamlContent = await readProfileFile(profileId); + const snapshot = createProxySnapshotFromProfile(yamlContent); + if (!snapshot) return; + + setProxySnapshot(snapshot, profileId); + } catch (error) { + console.warn( + "[DataProvider] Failed to seed proxy snapshot from profile:", + error, + ); + } + }, + [setProxySnapshot], + ); + + const handleSwitchResult = useCallback( + (result: SwitchResultStatus) => { + // Ignore duplicate notifications for the same switch execution. + const meta = switchMetaRef.current; + if (result.taskId === meta.lastResultTaskId) { + return; + } + meta.lastResultTaskId = result.taskId; + + // Optimistically update the SWR cache so the UI shows the new profile immediately. + void globalMutate( + "getProfiles", + (current?: IProfilesConfig | null) => { + if (!current || !result.success) { + return current; + } + if (current.current === result.profileId) { + return current; + } + return { + ...current, + current: result.profileId, + }; + }, + false, + ); + + applyProfileSwitchResult(result); + if (!result.success) { + clearPendingProxyProfile(); + } + + if (result.success && result.cancelled !== true) { + // Once the backend settles, refresh all dependent data in the background. + scheduleTimeout(() => { + void Promise.allSettled([ + fetchProfilesConfig().then((data) => { + commitProfileSnapshot(data); + globalMutate("getProfiles", data, false); + }), + fetchLiveProxies(), + refreshProxyProviders(), + refreshRules(), + refreshRuleProviders(), + ]).catch((error) => { + console.warn( + "[DataProvider] Background refresh after profile switch failed:", + error, + ); + }); + }, 100); + } + + void mutateSwitchStatus((current) => { + if (!current) { + return current; + } + const filteredQueue = current.queue.filter( + (task) => task.taskId !== result.taskId, + ); + const active = + current.active && current.active.taskId === result.taskId + ? null + : current.active; + const isSwitching = filteredQueue.length > 0; + return { + ...current, + active, + queue: filteredQueue, + isSwitching, + lastResult: result, + }; + }, false); + }, + [ + scheduleTimeout, + refreshProxyProviders, + refreshRules, + refreshRuleProviders, + mutateSwitchStatus, + applyProfileSwitchResult, + commitProfileSnapshot, + clearPendingProxyProfile, + ], + ); + + useEffect(() => { + isUnmountedRef.current = false; + return () => { + isUnmountedRef.current = true; + clearAllTimeouts(); + }; + }, [clearAllTimeouts]); + + useEffect(() => { + if (!switchStatus) { + return; + } + + const meta = switchMetaRef.current; + const nextTarget = + switchStatus.active?.profileId ?? + (switchStatus.queue.length > 0 ? switchStatus.queue[0].profileId : null); + + if (nextTarget && nextTarget !== meta.pendingProfileId) { + meta.pendingProfileId = nextTarget; + void seedProxySnapshot(nextTarget); + } else if (!nextTarget) { + meta.pendingProfileId = null; + } + + const lastResult = switchStatus.lastResult ?? null; + if (lastResult) { + handleSwitchResult(lastResult); + } + }, [switchStatus, seedProxySnapshot, handleSwitchResult]); + + useEffect(() => { + let disposed = false; + + const pollEvents = async () => { + if (disposed) { + return; + } + try { + const events = await getProfileSwitchEvents(switchEventSeqRef.current); + if (events.length > 0) { + switchEventSeqRef.current = events[events.length - 1].sequence; + setSwitchEventSeq(switchEventSeqRef.current); + events.forEach((event) => handleSwitchResult(event.result)); + } + } catch (error) { + console.warn("[DataProvider] Failed to poll switch events:", error); + } finally { + if (!disposed) { + const nextDelay = + switchStatus && + (switchStatus.isSwitching || (switchStatus.queue?.length ?? 0) > 0) + ? 250 + : 1000; + scheduleTimeout(pollEvents, nextDelay); + } + } + }; + + scheduleTimeout(pollEvents, 0); + + return () => { + disposed = true; + }; + }, [scheduleTimeout, handleSwitchResult, switchStatus, setSwitchEventSeq]); + + useEffect(() => { const cleanupFns: Array<() => void> = []; const registerCleanup = (fn: () => void) => { - if (isUnmounted) { - try { - fn(); - } catch (error) { - console.error("[DataProvider] Immediate cleanup failed:", error); - } - } else { - cleanupFns.push(fn); - } + cleanupFns.push(fn); }; const addWindowListener = (eventName: string, handler: EventListener) => { @@ -87,140 +343,319 @@ export const AppDataProvider = ({ return () => window.removeEventListener(eventName, handler); }; - const scheduleTimeout = ( - callback: () => void | Promise, - delay: number, + const runProfileChangedPipeline = ( + profileId: string | null, + source: "tauri" | "window", ) => { - if (isUnmounted) return -1; - - const timeoutId = window.setTimeout(() => { - scheduledTimeouts.delete(timeoutId); - if (!isUnmounted) { - void callback(); - } - }, delay); - - scheduledTimeouts.add(timeoutId); - return timeoutId; - }; - - const clearAllTimeouts = () => { - scheduledTimeouts.forEach((timeoutId) => clearTimeout(timeoutId)); - scheduledTimeouts.clear(); - }; - - const handleProfileChanged = (event: { payload: string }) => { - const newProfileId = event.payload; const now = Date.now(); + const meta = profileChangeMetaRef.current; if ( - lastProfileId === newProfileId && - now - lastUpdateTime < refreshThrottle + meta.lastProfileId === profileId && + now - meta.lastEventTs < PROFILE_EVENT_DEDUP_MS ) { return; } - lastProfileId = newProfileId; - lastUpdateTime = now; + meta.lastProfileId = profileId; + meta.lastEventTs = now; - scheduleTimeout(() => { - refreshRules().catch((error) => - console.warn("[DataProvider] Rules refresh failed:", error), - ); - refreshRuleProviders().catch((error) => - console.warn("[DataProvider] Rule providers refresh failed:", error), - ); - }, 200); - }; - - const handleRefreshClash = () => { - const now = Date.now(); - if (now - lastUpdateTime <= refreshThrottle) return; - - lastUpdateTime = now; - scheduleTimeout(() => { - refreshProxy().catch((error) => - console.error("[DataProvider] Proxy refresh failed:", error), - ); - }, 200); - }; - - const handleRefreshProxy = () => { - const now = Date.now(); - if (now - lastUpdateTime <= refreshThrottle) return; - - lastUpdateTime = now; - scheduleTimeout(() => { - refreshProxy().catch((error) => - console.warn("[DataProvider] Proxy refresh failed:", error), - ); - }, 200); - }; - - const initializeListeners = async () => { - try { - const unlistenProfile = await listen( - "profile-changed", - handleProfileChanged, - ); - registerCleanup(unlistenProfile); - } catch (error) { - console.error("[AppDataProvider] 监听 Profile 事件失败:", error); + if (profileId) { + void seedProxySnapshot(profileId); } - try { - const unlistenClash = await listen( - "verge://refresh-clash-config", - handleRefreshClash, + queueProxyRefresh(`profile-changed-${source}`, 500); + + scheduleTimeout(() => { + void fetchProfilesConfig() + .then((data) => { + commitProfileSnapshot(data); + globalMutate("getProfiles", data, false); + }) + .catch((error) => + console.warn( + "[AppDataProvider] Failed to refresh profiles after profile change:", + error, + ), + ); + void refreshProxyProviders().catch((error) => + console.warn( + "[AppDataProvider] Proxy providers refresh failed after profile change:", + error, + ), ); - const unlistenProxy = await listen( - "verge://refresh-proxy-config", - handleRefreshProxy, + void refreshRules().catch((error) => + console.warn( + "[AppDataProvider] Rules refresh failed after profile change:", + error, + ), ); + void refreshRuleProviders().catch((error) => + console.warn( + "[AppDataProvider] Rule providers refresh failed after profile change:", + error, + ), + ); + }, 200); + }; - registerCleanup(() => { - unlistenClash(); - unlistenProxy(); - }); - } catch (error) { - console.warn("[AppDataProvider] 设置 Tauri 事件监听器失败:", error); + const handleProfileChanged = (event: { payload: string }) => { + runProfileChangedPipeline(event.payload ?? null, "tauri"); + }; - const fallbackHandlers: Array<[string, EventListener]> = [ - ["verge://refresh-clash-config", handleRefreshClash], - ["verge://refresh-proxy-config", handleRefreshProxy], - ]; + const runRefreshClashPipeline = (source: "tauri" | "window") => { + const now = Date.now(); + if (now - lastClashRefreshAtRef.current < CLASH_REFRESH_DEDUP_MS) { + return; + } - fallbackHandlers.forEach(([eventName, handler]) => { - registerCleanup(addWindowListener(eventName, handler)); - }); + lastClashRefreshAtRef.current = now; + + scheduleTimeout(() => { + void refreshClashConfig().catch((error) => + console.warn( + "[AppDataProvider] Clash config refresh failed after backend update:", + error, + ), + ); + void refreshRules().catch((error) => + console.warn( + "[AppDataProvider] Rules refresh failed after backend update:", + error, + ), + ); + void refreshRuleProviders().catch((error) => + console.warn( + "[AppDataProvider] Rule providers refresh failed after backend update:", + error, + ), + ); + void refreshProxyProviders().catch((error) => + console.warn( + "[AppDataProvider] Proxy providers refresh failed after backend update:", + error, + ), + ); + }, 0); + + queueProxyRefresh(`refresh-clash-config-${source}`, 400); + }; + + const handleProfileUpdateCompleted = (_: { payload: { uid: string } }) => { + queueProxyRefresh("profile-update-completed", 3000); + if (!isUnmountedRef.current) { + scheduleTimeout(() => { + void refreshProxyProviders().catch((error) => + console.warn( + "[DataProvider] Proxy providers refresh failed after profile update completed:", + error, + ), + ); + }, 0); } }; - void initializeListeners(); + const isProxiesPayload = ( + value: unknown, + ): value is ProxiesUpdatedPayload => { + if (!value || typeof value !== "object") { + return false; + } + const candidate = value as Partial; + return candidate.proxies !== undefined && candidate.proxies !== null; + }; + + const handleProxiesUpdatedPayload = ( + rawPayload: unknown, + source: "tauri" | "window", + ) => { + if (!isProxiesPayload(rawPayload)) { + console.warn( + `[AppDataProvider] Ignored ${source} proxies-updated payload`, + rawPayload, + ); + queueProxyRefresh(`proxies-updated-${source}-invalid`, 500); + return; + } + + try { + applyLiveProxyPayload(rawPayload); + } catch (error) { + console.warn( + `[AppDataProvider] Failed to apply ${source} proxies-updated payload`, + error, + ); + queueProxyRefresh(`proxies-updated-${source}-apply-failed`, 500); + } + }; + + listen<{ uid: string }>( + "profile-update-completed", + handleProfileUpdateCompleted, + ) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach profile update listeners:", + error, + ), + ); + + listen("profile-changed", handleProfileChanged) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach profile-changed listener:", + error, + ), + ); + + listen("proxies-updated", (event) => { + handleProxiesUpdatedPayload(event.payload, "tauri"); + }) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach proxies-updated listener:", + error, + ), + ); + + listen("verge://refresh-clash-config", () => { + runRefreshClashPipeline("tauri"); + }) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach refresh-clash-config listener:", + error, + ), + ); + + listen("verge://refresh-proxy-config", () => { + queueProxyRefresh("refresh-proxy-config-tauri", 500); + }) + .then(registerCleanup) + .catch((error) => + console.error( + "[AppDataProvider] failed to attach refresh-proxy-config listener:", + error, + ), + ); + + const fallbackHandlers: Array<[string, EventListener]> = [ + [ + "profile-update-completed", + ((event: Event) => { + const payload = (event as CustomEvent<{ uid: string }>).detail ?? { + uid: "", + }; + handleProfileUpdateCompleted({ payload }); + }) as EventListener, + ], + [ + "profile-changed", + ((event: Event) => { + const payload = (event as CustomEvent).detail ?? null; + runProfileChangedPipeline(payload, "window"); + }) as EventListener, + ], + [ + "proxies-updated", + ((event: Event) => { + const payload = (event as CustomEvent).detail; + handleProxiesUpdatedPayload(payload, "window"); + }) as EventListener, + ], + [ + "verge://refresh-clash-config", + (() => { + runRefreshClashPipeline("window"); + }) as EventListener, + ], + [ + "verge://refresh-proxy-config", + (() => { + queueProxyRefresh("refresh-proxy-config-window", 500); + }) as EventListener, + ], + ]; + + fallbackHandlers.forEach(([eventName, handler]) => { + registerCleanup(addWindowListener(eventName, handler)); + }); return () => { - isUnmounted = true; - clearAllTimeouts(); - - const errors: Error[] = []; - cleanupFns.splice(0).forEach((fn) => { + cleanupFns.forEach((fn) => { try { fn(); } catch (error) { - errors.push( - error instanceof Error ? error : new Error(String(error)), - ); + console.error("[AppDataProvider] cleanup error:", error); } }); - - if (errors.length > 0) { - console.error( - `[DataProvider] ${errors.length} errors during cleanup:`, - errors, - ); - } }; - }, [refreshProxy, refreshRules, refreshRuleProviders]); + }, [ + commitProfileSnapshot, + queueProxyRefresh, + refreshClashConfig, + refreshProxyProviders, + refreshRuleProviders, + refreshRules, + scheduleTimeout, + seedProxySnapshot, + ]); + + const switchTargetProfileId = + switchStatus?.active?.profileId ?? + (switchStatus && switchStatus.queue.length > 0 + ? switchStatus.queue[0].profileId + : null); + + const proxyTargetProfileId = + switchTargetProfileId ?? pendingProxyProfileId ?? proxyProfileId ?? null; + const displayProxyStateRef = useRef<{ + view: ProxiesView | null; + profileId: string | null; + }>({ + view: proxyView, + profileId: proxyTargetProfileId, + }); + + const currentDisplay = displayProxyStateRef.current; + + if (!proxyView) { + if ( + currentDisplay.view !== null || + currentDisplay.profileId !== proxyTargetProfileId + ) { + displayProxyStateRef.current = { + view: null, + profileId: proxyTargetProfileId, + }; + } + } else if (proxyHydration === "live") { + if ( + currentDisplay.view !== proxyView || + currentDisplay.profileId !== proxyTargetProfileId + ) { + displayProxyStateRef.current = { + view: proxyView, + profileId: proxyTargetProfileId, + }; + } + } else if (!currentDisplay.view) { + displayProxyStateRef.current = { + view: proxyView, + profileId: proxyTargetProfileId, + }; + } + const displayProxyState = displayProxyStateRef.current; + const proxyDisplayProfileId = displayProxyState.profileId; + const proxiesForRender = displayProxyState.view ?? proxyView; + const isProxyRefreshPending = + (switchStatus?.isSwitching ?? false) || + proxyHydration !== "live" || + proxyTargetProfileId !== proxyDisplayProfileId; const { data: sysproxy, mutate: refreshSysproxy } = useSWR( "getSystemProxy", @@ -240,10 +675,10 @@ export const AppDataProvider = ({ errorRetryCount: 1, }); - // 提供统一的刷新方法 + // Provide unified refresh method const refreshAll = useCallback(async () => { await Promise.all([ - refreshProxy(), + fetchLiveProxies(), refreshClashConfig(), refreshRules(), refreshSysproxy(), @@ -251,7 +686,6 @@ export const AppDataProvider = ({ refreshRuleProviders(), ]); }, [ - refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, @@ -259,22 +693,22 @@ export const AppDataProvider = ({ refreshRuleProviders, ]); - // 聚合所有数据 + // Aggregate data into context value const value = useMemo(() => { - // 计算系统代理地址 + // Compute the system proxy address const calculateSystemProxyAddress = () => { if (!verge || !clashConfig) return "-"; const isPacMode = verge.proxy_auto_config ?? false; if (isPacMode) { - // PAC模式:显示我们期望设置的代理地址 + // PAC mode: display the desired proxy address const proxyHost = verge.proxy_host || "127.0.0.1"; const proxyPort = verge.verge_mixed_port || clashConfig.mixedPort || 7897; return `${proxyHost}:${proxyPort}`; } else { - // HTTP代理模式:优先使用系统地址,但如果格式不正确则使用期望地址 + // HTTP proxy mode: prefer system address, fallback to desired address if invalid const systemServer = sysproxy?.server; if ( systemServer && @@ -283,7 +717,7 @@ export const AppDataProvider = ({ ) { return systemServer; } else { - // 系统地址无效,返回期望的代理地址 + // System address invalid: fallback to desired proxy address const proxyHost = verge.proxy_host || "127.0.0.1"; const proxyPort = verge.verge_mixed_port || clashConfig.mixedPort || 7897; @@ -293,22 +727,27 @@ export const AppDataProvider = ({ }; return { - // 数据 - proxies: proxiesData, + // Data + proxies: proxiesForRender, + proxyHydration, + proxyTargetProfileId, + proxyDisplayProfileId, + isProxyRefreshPending, + switchStatus: switchStatus ?? null, clashConfig, rules: rulesData?.rules || [], sysproxy, runningMode, uptime: uptimeData || 0, - // 提供者数据 + // Provider data proxyProviders: proxyProviders || {}, ruleProviders: ruleProviders?.providers || {}, systemProxyAddress: calculateSystemProxyAddress(), - // 刷新方法 - refreshProxy, + // Refresh helpers + refreshProxy: fetchLiveProxies, refreshClashConfig, refreshRules, refreshSysproxy, @@ -317,7 +756,12 @@ export const AppDataProvider = ({ refreshAll, } as AppDataContextType; }, [ - proxiesData, + proxiesForRender, + proxyHydration, + proxyTargetProfileId, + proxyDisplayProfileId, + isProxyRefreshPending, + switchStatus, clashConfig, rulesData, sysproxy, @@ -326,7 +770,6 @@ export const AppDataProvider = ({ proxyProviders, ruleProviders, verge, - refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, diff --git a/src/services/cmds.ts b/src/services/cmds.ts index e1e686bd3..098944181 100644 --- a/src/services/cmds.ts +++ b/src/services/cmds.ts @@ -4,6 +4,52 @@ import { getProxies, getProxyProviders } from "tauri-plugin-mihomo-api"; import { showNotice } from "@/services/noticeService"; +export type ProxyProviderRecord = Record< + string, + IProxyProviderItem | undefined +>; + +export interface SwitchTaskStatus { + taskId: number; + profileId: string; + notify: boolean; + stage?: number | null; + queued: boolean; +} + +export interface SwitchResultStatus { + taskId: number; + profileId: string; + success: boolean; + cancelled?: boolean; + finishedAt: number; + errorStage?: string | null; + errorDetail?: string | null; +} + +export interface ProfileSwitchStatus { + isSwitching: boolean; + active?: SwitchTaskStatus | null; + queue: SwitchTaskStatus[]; + cleanupProfiles: string[]; + lastResult?: SwitchResultStatus | null; + lastUpdated: number; +} + +export interface SwitchResultEvent { + sequence: number; + result: SwitchResultStatus; +} + +// Persist the last proxy provider payload so UI can render while waiting on Mihomo. +let cachedProxyProviders: ProxyProviderRecord | null = null; + +export const getCachedProxyProviders = () => cachedProxyProviders; + +export const setCachedProxyProviders = (record: ProxyProviderRecord | null) => { + cachedProxyProviders = record; +}; + export async function copyClashEnv() { return invoke("copy_clash_env"); } @@ -20,6 +66,14 @@ export async function patchProfilesConfig(profiles: IProfilesConfig) { return invoke("patch_profiles_config", { profiles }); } +// Triggers the async state-machine driven switch flow on the backend. +export async function switchProfileCommand( + profileIndex: string, + notifySuccess: boolean, +) { + return invoke("switch_profile", { profileIndex, notifySuccess }); +} + export async function createProfile( item: Partial, fileData?: string | null, @@ -113,27 +167,29 @@ export async function syncTrayProxySelection() { return invoke("sync_tray_proxy_selection"); } -export async function calcuProxies(): Promise<{ +export interface ProxiesView { global: IProxyGroupItem; direct: IProxyItem; groups: IProxyGroupItem[]; records: Record; proxies: IProxyItem[]; -}> { - const [proxyResponse, providerResponse] = await Promise.all([ - getProxies(), - calcuProxyProviders(), - ]); +} +export function buildProxyView( + proxyResponse: Awaited>, + providerRecord?: ProxyProviderRecord | null, +): ProxiesView { const proxyRecord = proxyResponse.proxies; - const providerRecord = providerResponse; // provider name map - const providerMap = Object.fromEntries( - Object.entries(providerRecord).flatMap(([provider, item]) => - item!.proxies.map((p) => [p.name, { ...p, provider }]), - ), - ); + const providerMap = providerRecord + ? Object.fromEntries( + Object.entries(providerRecord).flatMap(([provider, item]) => { + if (!item) return []; + return item.proxies.map((p) => [p.name, { ...p, provider }]); + }), + ) + : {}; // compatible with proxy-providers const generateItem = (name: string) => { @@ -207,16 +263,56 @@ export async function calcuProxies(): Promise<{ }; } +export async function calcuProxies(): Promise { + const proxyResponse = await getProxies(); + + let providerRecord = cachedProxyProviders; + if (!providerRecord) { + try { + providerRecord = await calcuProxyProviders(); + } catch (error) { + console.warn("[calcuProxies] 代理提供者加载失败:", error); + } + } + + return buildProxyView(proxyResponse, providerRecord); +} + export async function calcuProxyProviders() { const providers = await getProxyProviders(); - return Object.fromEntries( - Object.entries(providers.providers) - .sort() - .filter( - ([_, item]) => - item?.vehicleType === "HTTP" || item?.vehicleType === "File", - ), - ); + const mappedEntries = Object.entries(providers.providers) + .sort() + .filter( + ([, item]) => + item?.vehicleType === "HTTP" || item?.vehicleType === "File", + ) + .map(([name, item]) => { + if (!item) return [name, undefined] as const; + + const subscriptionInfo = + item.subscriptionInfo && typeof item.subscriptionInfo === "object" + ? { + Upload: item.subscriptionInfo.Upload ?? 0, + Download: item.subscriptionInfo.Download ?? 0, + Total: item.subscriptionInfo.Total ?? 0, + Expire: item.subscriptionInfo.Expire ?? 0, + } + : undefined; + + const normalized: IProxyProviderItem = { + name: item.name, + type: item.type, + proxies: item.proxies ?? [], + updatedAt: item.updatedAt ?? "", + vehicleType: item.vehicleType ?? "", + subscriptionInfo, + }; + return [name, normalized] as const; + }); + + const mapped = Object.fromEntries(mappedEntries) as ProxyProviderRecord; + cachedProxyProviders = mapped; + return mapped; } export async function getClashLogs() { @@ -555,3 +651,13 @@ export const isAdmin = async () => { export async function getNextUpdateTime(uid: string) { return invoke("get_next_update_time", { uid }); } + +export async function getProfileSwitchStatus() { + return invoke("get_profile_switch_status"); +} + +export async function getProfileSwitchEvents(afterSequence: number) { + return invoke("get_profile_switch_events", { + afterSequence, + }); +} diff --git a/src/services/noticeService.ts b/src/services/noticeService.ts index 0a3505dac..7275dd458 100644 --- a/src/services/noticeService.ts +++ b/src/services/noticeService.ts @@ -14,10 +14,20 @@ let nextId = 0; let notices: NoticeItem[] = []; const listeners: Set = new Set(); -function notifyListeners() { +function flushListeners() { listeners.forEach((listener) => listener([...notices])); // Pass a copy } +let notifyScheduled = false; +function scheduleNotify() { + if (notifyScheduled) return; + notifyScheduled = true; + requestAnimationFrame(() => { + notifyScheduled = false; + flushListeners(); + }); +} + // Shows a notification. export function showNotice( @@ -44,7 +54,7 @@ export function showNotice( } notices = [...notices, newNotice]; - notifyListeners(); + scheduleNotify(); return id; } @@ -56,7 +66,7 @@ export function hideNotice(id: number) { clearTimeout(notice.timerId); // Clear timeout if manually closed } notices = notices.filter((n) => n.id !== id); - notifyListeners(); + scheduleNotify(); } // Subscribes a listener function to notice state changes. @@ -77,5 +87,5 @@ export function clearAllNotices() { if (n.timerId) clearTimeout(n.timerId); }); notices = []; - notifyListeners(); + scheduleNotify(); } diff --git a/src/services/refresh.ts b/src/services/refresh.ts new file mode 100644 index 000000000..6150680da --- /dev/null +++ b/src/services/refresh.ts @@ -0,0 +1,24 @@ +import { mutate } from "swr"; + +import { getAxios } from "@/services/api"; + +export const refreshClashData = async () => { + try { + await getAxios(true); + } catch (error) { + console.warn("[Refresh] getAxios failed during clash refresh:", error); + } + + mutate("getProxies"); + mutate("getVersion"); + mutate("getClashConfig"); + mutate("getProxyProviders"); +}; + +export const refreshVergeData = () => { + mutate("getVergeConfig"); + mutate("getSystemProxy"); + mutate("getAutotemProxy"); + mutate("getRunningMode"); + mutate("isServiceAvailable"); +}; diff --git a/src/stores/profile-store.ts b/src/stores/profile-store.ts new file mode 100644 index 000000000..446b62267 --- /dev/null +++ b/src/stores/profile-store.ts @@ -0,0 +1,59 @@ +import { create } from "zustand"; + +import type { SwitchResultStatus } from "@/services/cmds"; + +interface ProfileStoreState { + data: IProfilesConfig | null; + optimisticCurrent: string | null; + isHydrating: boolean; + lastEventSeq: number; + lastResult: SwitchResultStatus | null; + applySwitchResult: (result: SwitchResultStatus) => void; + commitHydrated: (data: IProfilesConfig) => void; + setLastEventSeq: (sequence: number) => void; +} + +export const useProfileStore = create((set) => ({ + data: null, + optimisticCurrent: null, + isHydrating: false, + lastEventSeq: 0, + lastResult: null, + applySwitchResult(result) { + // Record the optimistic switch outcome so the UI reflects the desired profile immediately. + set((state) => ({ + lastResult: result, + optimisticCurrent: result.success ? result.profileId : null, + isHydrating: result.success ? true : state.isHydrating, + })); + }, + commitHydrated(data) { + set({ + data, + optimisticCurrent: null, + isHydrating: false, + }); + }, + setLastEventSeq(sequence) { + set({ lastEventSeq: sequence }); + }, +})); + +export const selectEffectiveProfiles = (state: ProfileStoreState) => { + if (!state.data) { + return null; + } + // Prefer the optimistic selection while hydration is pending. + const current = state.optimisticCurrent ?? state.data.current; + if ( + state.optimisticCurrent && + state.optimisticCurrent !== state.data.current + ) { + return { ...state.data, current } as IProfilesConfig; + } + return state.data; +}; + +export const selectIsHydrating = (state: ProfileStoreState) => + state.isHydrating; +export const selectLastResult = (state: ProfileStoreState) => state.lastResult; diff --git a/src/stores/proxy-store.ts b/src/stores/proxy-store.ts new file mode 100644 index 000000000..b8c87ef80 --- /dev/null +++ b/src/stores/proxy-store.ts @@ -0,0 +1,298 @@ +import type { getProxies } from "tauri-plugin-mihomo-api"; +import { create } from "zustand"; + +import { + ProxiesView, + ProxyProviderRecord, + buildProxyView, + calcuProxies, + getCachedProxyProviders, + setCachedProxyProviders, +} from "@/services/cmds"; +import { AsyncEventQueue, nextTick } from "@/utils/asyncQueue"; + +type ProxyHydration = "none" | "snapshot" | "live"; +type RawProxiesResponse = Awaited>; + +export interface ProxiesUpdatedPayload { + proxies: RawProxiesResponse; + providers?: Record | null; + emittedAt?: number; + profileId?: string | null; +} + +interface ProxyStoreState { + data: ProxiesView | null; + hydration: ProxyHydration; + lastUpdated: number | null; + lastProfileId: string | null; + liveFetchRequestId: number; + lastAppliedFetchId: number; + pendingProfileId: string | null; + pendingSnapshotFetchId: number | null; + setSnapshot: (snapshot: ProxiesView, profileId: string) => void; + setLive: (payload: ProxiesUpdatedPayload) => void; + startLiveFetch: () => number; + completeLiveFetch: (requestId: number, view: ProxiesView) => void; + clearPendingProfile: () => void; + reset: () => void; +} + +const normalizeProviderPayload = ( + raw: ProxiesUpdatedPayload["providers"], +): ProxyProviderRecord | null => { + if (!raw || typeof raw !== "object") return null; + + const rawRecord = raw as Record; + const source = + rawRecord.providers && typeof rawRecord.providers === "object" + ? (rawRecord.providers as Record) + : rawRecord; + + const entries = Object.entries(source) + .sort(([a], [b]) => a.localeCompare(b)) + .filter(([, value]) => { + if (!value || typeof value !== "object") { + return false; + } + const vehicleType = value.vehicleType; + return vehicleType === "HTTP" || vehicleType === "File"; + }) + .map(([name, value]) => { + const normalized: IProxyProviderItem = { + name: value.name ?? name, + type: value.type ?? "", + proxies: Array.isArray(value.proxies) ? value.proxies : [], + updatedAt: value.updatedAt ?? "", + vehicleType: value.vehicleType ?? "", + subscriptionInfo: + value.subscriptionInfo && typeof value.subscriptionInfo === "object" + ? { + Upload: Number(value.subscriptionInfo.Upload ?? 0), + Download: Number(value.subscriptionInfo.Download ?? 0), + Total: Number(value.subscriptionInfo.Total ?? 0), + Expire: Number(value.subscriptionInfo.Expire ?? 0), + } + : undefined, + }; + + return [name, normalized] as const; + }); + + return Object.fromEntries(entries) as ProxyProviderRecord; +}; + +export const useProxyStore = create((set, get) => ({ + data: null, + hydration: "none", + lastUpdated: null, + lastProfileId: null, + liveFetchRequestId: 0, + lastAppliedFetchId: 0, + pendingProfileId: null, + pendingSnapshotFetchId: null, + setSnapshot(snapshot, profileId) { + const stateBefore = get(); + + set((state) => ({ + data: snapshot, + hydration: "snapshot", + lastUpdated: null, + pendingProfileId: profileId, + pendingSnapshotFetchId: state.liveFetchRequestId, + })); + + const hasLiveHydration = + stateBefore.hydration === "live" && + stateBefore.lastProfileId === profileId; + + if (profileId && !hasLiveHydration) { + void fetchLiveProxies().catch((error) => { + console.warn( + "[ProxyStore] Failed to bootstrap live proxies from snapshot:", + error, + ); + scheduleBootstrapLiveFetch(800); + }); + } + }, + setLive(payload) { + const state = get(); + const emittedAt = payload.emittedAt ?? Date.now(); + + if ( + state.hydration === "live" && + state.lastUpdated !== null && + emittedAt <= state.lastUpdated + ) { + return; + } + + const providersRecord = + normalizeProviderPayload(payload.providers) ?? getCachedProxyProviders(); + + if (providersRecord) { + setCachedProxyProviders(providersRecord); + } + + const view = buildProxyView(payload.proxies, providersRecord); + const nextProfileId = payload.profileId ?? state.lastProfileId; + + set((current) => ({ + data: view, + hydration: "live", + lastUpdated: emittedAt, + lastProfileId: nextProfileId ?? null, + lastAppliedFetchId: current.liveFetchRequestId, + pendingProfileId: null, + pendingSnapshotFetchId: null, + })); + }, + startLiveFetch() { + let nextRequestId = 0; + set((state) => { + nextRequestId = state.liveFetchRequestId + 1; + return { + liveFetchRequestId: nextRequestId, + }; + }); + return nextRequestId; + }, + completeLiveFetch(requestId, view) { + const state = get(); + if (requestId <= state.lastAppliedFetchId) { + return; + } + + const shouldAdoptPending = + state.pendingProfileId !== null && + requestId >= (state.pendingSnapshotFetchId ?? 0); + + set({ + data: view, + hydration: "live", + lastUpdated: Date.now(), + lastProfileId: shouldAdoptPending + ? state.pendingProfileId + : state.lastProfileId, + lastAppliedFetchId: requestId, + pendingProfileId: shouldAdoptPending ? null : state.pendingProfileId, + pendingSnapshotFetchId: shouldAdoptPending + ? null + : state.pendingSnapshotFetchId, + }); + }, + clearPendingProfile() { + set({ + pendingProfileId: null, + pendingSnapshotFetchId: null, + }); + }, + reset() { + set({ + data: null, + hydration: "none", + lastUpdated: null, + lastProfileId: null, + liveFetchRequestId: 0, + lastAppliedFetchId: 0, + pendingProfileId: null, + pendingSnapshotFetchId: null, + }); + scheduleBootstrapLiveFetch(200); + }, +})); + +const liveApplyQueue = new AsyncEventQueue(); +let pendingLivePayload: ProxiesUpdatedPayload | null = null; +let liveApplyScheduled = false; + +const scheduleLiveApply = () => { + if (liveApplyScheduled) return; + liveApplyScheduled = true; + + const dispatch = () => { + liveApplyScheduled = false; + const payload = pendingLivePayload; + pendingLivePayload = null; + if (!payload) return; + + liveApplyQueue.enqueue(async () => { + await nextTick(); + useProxyStore.getState().setLive(payload); + }); + }; + + if ( + typeof window !== "undefined" && + typeof window.requestAnimationFrame === "function" + ) { + window.requestAnimationFrame(dispatch); + } else { + setTimeout(dispatch, 16); + } +}; + +export const applyLiveProxyPayload = (payload: ProxiesUpdatedPayload) => { + pendingLivePayload = payload; + scheduleLiveApply(); +}; + +export const fetchLiveProxies = async () => { + const requestId = useProxyStore.getState().startLiveFetch(); + const view = await calcuProxies(); + useProxyStore.getState().completeLiveFetch(requestId, view); +}; + +const MAX_BOOTSTRAP_ATTEMPTS = 5; +const BOOTSTRAP_BASE_DELAY_MS = 600; +let bootstrapAttempts = 0; +let bootstrapTimer: number | null = null; + +function attemptBootstrapLiveFetch() { + const state = useProxyStore.getState(); + if (state.hydration === "live") { + bootstrapAttempts = 0; + return; + } + + if (bootstrapAttempts >= MAX_BOOTSTRAP_ATTEMPTS) { + return; + } + + const attemptNumber = ++bootstrapAttempts; + + void fetchLiveProxies() + .then(() => { + bootstrapAttempts = 0; + }) + .catch((error) => { + console.warn( + `[ProxyStore] Bootstrap live fetch attempt ${attemptNumber} failed:`, + error, + ); + if (attemptNumber < MAX_BOOTSTRAP_ATTEMPTS) { + scheduleBootstrapLiveFetch(BOOTSTRAP_BASE_DELAY_MS * attemptNumber); + } + }); +} + +function scheduleBootstrapLiveFetch(delay = 0) { + if (typeof window === "undefined") { + return; + } + + if (bootstrapTimer !== null) { + window.clearTimeout(bootstrapTimer); + bootstrapTimer = null; + } + + bootstrapTimer = window.setTimeout(() => { + bootstrapTimer = null; + attemptBootstrapLiveFetch(); + }, delay); +} + +if (typeof window !== "undefined") { + void nextTick().then(() => scheduleBootstrapLiveFetch(0)); +} diff --git a/src/utils/asyncQueue.ts b/src/utils/asyncQueue.ts new file mode 100644 index 000000000..927faa837 --- /dev/null +++ b/src/utils/asyncQueue.ts @@ -0,0 +1,31 @@ +export class AsyncEventQueue { + private tail: Promise = Promise.resolve(); + + enqueue(task: () => Promise | void) { + this.tail = this.tail + .then(async () => { + await task(); + }) + .catch((error) => { + console.error("AsyncEventQueue task failed", error); + }); + } + + clear() { + this.tail = Promise.resolve(); + } +} + +export const nextTick = () => + new Promise((resolve) => { + if (typeof queueMicrotask === "function") { + queueMicrotask(resolve); + } else { + Promise.resolve().then(() => resolve()); + } + }); + +export const afterPaint = () => + new Promise((resolve) => { + requestAnimationFrame(() => resolve()); + }); diff --git a/src/utils/proxy-snapshot.ts b/src/utils/proxy-snapshot.ts new file mode 100644 index 000000000..2e451db2f --- /dev/null +++ b/src/utils/proxy-snapshot.ts @@ -0,0 +1,205 @@ +import yaml from "js-yaml"; + +const createProxyItem = ( + name: string, + partial: Partial = {}, +): IProxyItem => ({ + name, + type: partial.type ?? "unknown", + udp: partial.udp ?? false, + xudp: partial.xudp ?? false, + tfo: partial.tfo ?? false, + mptcp: partial.mptcp ?? false, + smux: partial.smux ?? false, + history: [], + provider: partial.provider, + testUrl: partial.testUrl, + hidden: partial.hidden, + icon: partial.icon, + fixed: partial.fixed, +}); + +const createGroupItem = ( + name: string, + all: IProxyItem[], + partial: Partial = {}, +): IProxyGroupItem => { + const rest = { ...partial } as Partial; + delete (rest as Partial).all; + const base = createProxyItem(name, rest); + return { + ...base, + all, + now: partial.now ?? base.now, + }; +}; + +const ensureProxyItem = ( + map: Map, + name: string, + source?: Partial, +) => { + const key = String(name); + if (map.has(key)) return map.get(key)!; + const item = createProxyItem(key, source); + map.set(key, item); + return item; +}; + +const parseProxyEntry = (entry: any): IProxyItem | null => { + if (!entry || typeof entry !== "object") return null; + const name = entry.name || entry.uid || entry.id; + if (!name) return null; + return createProxyItem(String(name), { + type: entry.type ? String(entry.type) : undefined, + udp: Boolean(entry.udp), + xudp: Boolean(entry.xudp), + tfo: Boolean(entry.tfo), + mptcp: Boolean(entry.mptcp), + smux: Boolean(entry.smux), + testUrl: entry.test_url || entry.testUrl, + }); +}; + +const isNonEmptyString = (value: unknown): value is string => + typeof value === "string" && value.trim().length > 0; + +const parseProxyGroup = ( + entry: any, + proxyMap: Map, +): IProxyGroupItem | null => { + if (!entry || typeof entry !== "object") return null; + const name = entry.name; + if (!name) return null; + + const rawProxies: unknown[] = Array.isArray(entry.proxies) + ? entry.proxies + : []; + + const proxyRefs: string[] = rawProxies + .filter(isNonEmptyString) + .map((item) => item.trim()); + + const uniqueNames: string[] = Array.from(new Set(proxyRefs)); + + const all = uniqueNames.map((proxyName) => + ensureProxyItem(proxyMap, proxyName), + ); + + return createGroupItem(String(name), all, { + type: entry.type ? String(entry.type) : "Selector", + provider: entry.provider, + testUrl: entry.testUrl || entry.test_url, + now: typeof entry.now === "string" ? entry.now : undefined, + }); +}; + +const mapRecords = ( + proxies: Map, + groups: IProxyGroupItem[], + extra: IProxyItem[] = [], +): Record => { + const result: Record = {}; + proxies.forEach((item, key) => { + result[key] = item; + }); + groups.forEach((group) => { + result[group.name] = group as unknown as IProxyItem; + }); + extra.forEach((item) => { + result[item.name] = item; + }); + return result; +}; + +export const createProxySnapshotFromProfile = ( + yamlContent: string, +): { + global: IProxyGroupItem; + direct: IProxyItem; + groups: IProxyGroupItem[]; + records: Record; + proxies: IProxyItem[]; +} | null => { + let parsed: any; + try { + parsed = yaml.load(yamlContent); + } catch (error) { + console.warn("[ProxySnapshot] Failed to parse YAML:", error); + return null; + } + + if (!parsed || typeof parsed !== "object") { + return null; + } + + const proxyMap = new Map(); + + if (Array.isArray((parsed as any).proxies)) { + for (const entry of (parsed as any).proxies) { + const item = parseProxyEntry(entry); + if (item) { + proxyMap.set(item.name, item); + } + } + } + + const proxyProviders = (parsed as any)["proxy-providers"]; + if (proxyProviders && typeof proxyProviders === "object") { + for (const key of Object.keys(proxyProviders)) { + const provider = proxyProviders[key]; + if (provider && Array.isArray(provider.proxies)) { + provider.proxies + .filter( + (proxyName: unknown): proxyName is string => + typeof proxyName === "string", + ) + .forEach((proxyName: string) => ensureProxyItem(proxyMap, proxyName)); + } + } + } + + const groups: IProxyGroupItem[] = []; + if (Array.isArray((parsed as any)["proxy-groups"])) { + for (const entry of (parsed as any)["proxy-groups"]) { + const groupItem = parseProxyGroup(entry, proxyMap); + if (groupItem) { + groups.push(groupItem); + } + } + } + + const direct = createProxyItem("DIRECT", { type: "Direct" }); + const reject = createProxyItem("REJECT", { type: "Reject" }); + + ensureProxyItem(proxyMap, direct.name, direct); + ensureProxyItem(proxyMap, reject.name, reject); + + let global = groups.find((group) => group.name === "GLOBAL"); + if (!global) { + const globalRefs = groups.flatMap((group) => + group.all.map((proxy) => proxy.name), + ); + const unique = Array.from(new Set(globalRefs)); + const all = unique.map((name) => ensureProxyItem(proxyMap, name)); + global = createGroupItem("GLOBAL", all, { + type: "Selector", + hidden: true, + }); + groups.unshift(global); + } + + const proxies = Array.from(proxyMap.values()).filter( + (item) => !groups.some((group) => group.name === item.name), + ); + + const records = mapRecords(proxyMap, groups, [direct, reject]); + + return { + global, + direct, + groups, + records, + proxies, + }; +};