From a869dbb441de8d1fb537e20772aee18c58ff5eff Mon Sep 17 00:00:00 2001 From: Tunglies <77394545+Tunglies@users.noreply.github.com> Date: Thu, 30 Oct 2025 18:11:04 +0800 Subject: [PATCH] Revert "refactor: profile switch (#5197)" This reverts commit c2dcd867228aae3f2fa0855226244cc701ba4f43. --- UPDATELOG.md | 2 - src-tauri/src/cmd/frontend.rs | 48 - src-tauri/src/cmd/mod.rs | 3 - src-tauri/src/cmd/profile.rs | 704 +++++++++------ src-tauri/src/cmd/profile_switch/driver.rs | 683 -------------- src-tauri/src/cmd/profile_switch/mod.rs | 34 - src-tauri/src/cmd/profile_switch/state.rs | 353 -------- .../src/cmd/profile_switch/validation.rs | 113 --- src-tauri/src/cmd/profile_switch/workflow.rs | 385 -------- .../cmd/profile_switch/workflow/cleanup.rs | 65 -- .../workflow/state_machine/context.rs | 178 ---- .../workflow/state_machine/core.rs | 284 ------ .../workflow/state_machine/mod.rs | 11 - .../workflow/state_machine/stages.rs | 597 ------------- src-tauri/src/core/handle.rs | 131 +-- src-tauri/src/core/manager/config.rs | 237 +---- src-tauri/src/core/notification.rs | 269 +----- src-tauri/src/lib.rs | 26 - src-tauri/src/utils/draft.rs | 7 - src/components/home/current-proxy-card.tsx | 22 +- src/components/proxy/provider-button.tsx | 310 +++---- src/components/proxy/proxy-groups.tsx | 122 +-- src/components/proxy/use-render-list.ts | 89 +- src/hooks/use-current-proxy.ts | 12 +- src/hooks/use-profiles.ts | 77 +- src/pages/_layout/useLayoutEvents.ts | 36 +- src/pages/profiles.tsx | 844 +++++++----------- src/providers/app-data-context.ts | 9 +- src/providers/app-data-provider.tsx | 719 +++------------ src/services/cmds.ts | 146 +-- src/services/noticeService.ts | 18 +- src/services/refresh.ts | 24 - src/stores/profile-store.ts | 59 -- src/stores/proxy-store.ts | 298 ------- src/utils/asyncQueue.ts | 31 - src/utils/proxy-snapshot.ts | 205 ----- 36 files changed, 1257 insertions(+), 5894 deletions(-) delete mode 100644 src-tauri/src/cmd/frontend.rs delete mode 100644 src-tauri/src/cmd/profile_switch/driver.rs delete mode 100644 src-tauri/src/cmd/profile_switch/mod.rs delete mode 100644 src-tauri/src/cmd/profile_switch/state.rs delete mode 100644 src-tauri/src/cmd/profile_switch/validation.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/cleanup.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs delete mode 100644 src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs delete mode 100644 src/services/refresh.ts delete mode 100644 src/stores/profile-store.ts delete mode 100644 src/stores/proxy-store.ts delete mode 100644 src/utils/asyncQueue.ts delete mode 100644 src/utils/proxy-snapshot.ts diff --git a/UPDATELOG.md b/UPDATELOG.md index b37eca6e0..071d8ac15 100644 --- a/UPDATELOG.md +++ b/UPDATELOG.md @@ -30,7 +30,6 @@ - 修复悬浮跳转导航失效 - 修复小键盘热键映射错误 - 修复前端无法及时刷新操作状态 -- 修复切换订阅卡死
✨ 新增功能 @@ -78,7 +77,6 @@ - 优化首页当前节点对MATCH规则的支持 - 允许在 `界面设置` 修改 `悬浮跳转导航延迟` - 添加热键绑定错误的提示信息 -- 重构订阅切换,保证代理页面的及时刷新 - 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题
diff --git a/src-tauri/src/cmd/frontend.rs b/src-tauri/src/cmd/frontend.rs deleted file mode 100644 index 8559c5899..000000000 --- a/src-tauri/src/cmd/frontend.rs +++ /dev/null @@ -1,48 +0,0 @@ -use super::CmdResult; -use crate::{logging, utils::logging::Type}; -use serde::Deserialize; - -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct FrontendLogPayload { - pub level: Option, - pub message: String, - pub context: Option, -} - -#[tauri::command] -pub fn frontend_log(payload: FrontendLogPayload) -> CmdResult<()> { - let level = payload.level.as_deref().unwrap_or("info"); - match level { - "trace" | "debug" => logging!( - debug, - Type::Frontend, - "[frontend] {}", - payload.message.as_str() - ), - "warn" => logging!( - warn, - Type::Frontend, - "[frontend] {}", - payload.message.as_str() - ), - "error" => logging!( - error, - Type::Frontend, - "[frontend] {}", - payload.message.as_str() - ), - _ => logging!( - info, - Type::Frontend, - "[frontend] {}", - payload.message.as_str() - ), - } - - if let Some(context) = payload.context { - logging!(info, Type::Frontend, "[frontend] context: {}", context); - } - - Ok(()) -} diff --git a/src-tauri/src/cmd/mod.rs b/src-tauri/src/cmd/mod.rs index 2cf768981..6c7486873 100644 --- a/src-tauri/src/cmd/mod.rs +++ b/src-tauri/src/cmd/mod.rs @@ -7,12 +7,10 @@ pub type CmdResult = Result; pub mod app; pub mod backup; pub mod clash; -pub mod frontend; pub mod lightweight; pub mod media_unlock_checker; pub mod network; pub mod profile; -mod profile_switch; pub mod proxy; pub mod runtime; pub mod save_profile; @@ -27,7 +25,6 @@ pub mod webdav; pub use app::*; pub use backup::*; pub use clash::*; -pub use frontend::*; pub use lightweight::*; pub use media_unlock_checker::*; pub use network::*; diff --git a/src-tauri/src/cmd/profile.rs b/src-tauri/src/cmd/profile.rs index 37cdd2e9f..151779363 100644 --- a/src-tauri/src/cmd/profile.rs +++ b/src-tauri/src/cmd/profile.rs @@ -1,4 +1,5 @@ -use super::{CmdResult, StringifyErr, profile_switch}; +use super::CmdResult; +use super::StringifyErr; use crate::{ config::{ Config, IProfiles, PrfItem, PrfOption, @@ -8,191 +9,77 @@ use crate::{ }, profiles_append_item_safe, }, - core::{CoreManager, handle, timer::Timer}, - feat, logging, ret_err, + core::{CoreManager, handle, timer::Timer, tray::Tray}, + feat, logging, + process::AsyncHandler, + ret_err, utils::{dirs, help, logging::Type}, }; -use once_cell::sync::Lazy; -use parking_lot::RwLock; use smartstring::alias::String; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::time::Duration; -use crate::cmd::profile_switch::{ProfileSwitchStatus, SwitchResultEvent}; +// 全局请求序列号跟踪,用于避免队列化执行 +static CURRENT_REQUEST_SEQUENCE: AtomicU64 = AtomicU64::new(0); -#[derive(Clone)] -struct CachedProfiles { - snapshot: IProfiles, - captured_at: Instant, -} +static CURRENT_SWITCHING_PROFILE: AtomicBool = AtomicBool::new(false); -static PROFILES_CACHE: Lazy>> = Lazy::new(|| RwLock::new(None)); - -#[derive(Default)] -struct SnapshotMetrics { - fast_hits: AtomicU64, - cache_hits: AtomicU64, - blocking_hits: AtomicU64, - refresh_scheduled: AtomicU64, - last_log_ms: AtomicU64, -} - -static SNAPSHOT_METRICS: Lazy = Lazy::new(SnapshotMetrics::default); - -/// Store the latest snapshot so cache consumers can reuse it without hitting the lock again. -fn update_profiles_cache(snapshot: &IProfiles) { - *PROFILES_CACHE.write() = Some(CachedProfiles { - snapshot: snapshot.clone(), - captured_at: Instant::now(), - }); -} - -/// Return the cached snapshot and how old it is, if present. -fn cached_profiles_snapshot() -> Option<(IProfiles, u128)> { - PROFILES_CACHE.read().as_ref().map(|entry| { - ( - entry.snapshot.clone(), - entry.captured_at.elapsed().as_millis(), - ) - }) -} - -/// Return the latest profiles snapshot, preferring cached data so UI requests never block. #[tauri::command] pub async fn get_profiles() -> CmdResult { - let started_at = Instant::now(); + // 策略1: 尝试快速获取latest数据 + let latest_result = tokio::time::timeout(Duration::from_millis(500), async { + let profiles = Config::profiles().await; + let latest = profiles.latest_ref(); + IProfiles { + current: latest.current.clone(), + items: latest.items.clone(), + } + }) + .await; - // Resolve snapshots in three tiers so UI reads never stall on a mutex: - // 1) try a non-blocking read, 2) fall back to the last cached copy while a - // writer holds the lock, 3) block and refresh the cache as a final resort. - if let Some(snapshot) = read_profiles_snapshot_nonblocking().await { - let item_count = snapshot - .items - .as_ref() - .map(|items| items.len()) - .unwrap_or(0); - update_profiles_cache(&snapshot); - SNAPSHOT_METRICS.fast_hits.fetch_add(1, Ordering::Relaxed); - logging!( - debug, - Type::Cmd, - "[Profiles] Snapshot served (path=fast, items={}, elapsed={}ms)", - item_count, - started_at.elapsed().as_millis() - ); - maybe_log_snapshot_metrics(); - return Ok(snapshot); + match latest_result { + Ok(profiles) => { + logging!(info, Type::Cmd, "快速获取配置列表成功"); + return Ok(profiles); + } + Err(_) => { + logging!(warn, Type::Cmd, "快速获取配置超时(500ms)"); + } } - if let Some((cached, age_ms)) = cached_profiles_snapshot() { - SNAPSHOT_METRICS.cache_hits.fetch_add(1, Ordering::Relaxed); - logging!( - debug, - Type::Cmd, - "[Profiles] Served cached snapshot while lock busy (age={}ms)", - age_ms - ); - schedule_profiles_snapshot_refresh(); - maybe_log_snapshot_metrics(); - return Ok(cached); + // 策略2: 如果快速获取失败,尝试获取data() + let data_result = tokio::time::timeout(Duration::from_secs(2), async { + let profiles = Config::profiles().await; + let data = profiles.latest_ref(); + IProfiles { + current: data.current.clone(), + items: data.items.clone(), + } + }) + .await; + + match data_result { + Ok(profiles) => { + logging!(info, Type::Cmd, "获取draft配置列表成功"); + return Ok(profiles); + } + Err(join_err) => { + logging!( + error, + Type::Cmd, + "获取draft配置任务失败或超时: {}", + join_err + ); + } } - let snapshot = read_profiles_snapshot_blocking().await; - let item_count = snapshot - .items - .as_ref() - .map(|items| items.len()) - .unwrap_or(0); - update_profiles_cache(&snapshot); - SNAPSHOT_METRICS - .blocking_hits - .fetch_add(1, Ordering::Relaxed); - logging!( - debug, - Type::Cmd, - "[Profiles] Snapshot served (path=blocking, items={}, elapsed={}ms)", - item_count, - started_at.elapsed().as_millis() - ); - maybe_log_snapshot_metrics(); - Ok(snapshot) + // 策略3: fallback,尝试重新创建配置 + logging!(warn, Type::Cmd, "所有获取配置策略都失败,尝试fallback"); + + Ok(IProfiles::new().await) } -/// Try to grab the latest profile data without waiting for the writer. -async fn read_profiles_snapshot_nonblocking() -> Option { - let profiles = Config::profiles().await; - profiles.try_latest_ref().map(|guard| (**guard).clone()) -} - -/// Fall back to a blocking read when we absolutely must have fresh data. -async fn read_profiles_snapshot_blocking() -> IProfiles { - let profiles = Config::profiles().await; - let guard = profiles.latest_ref(); - (**guard).clone() -} - -/// Schedule a background cache refresh once the exclusive lock becomes available again. -fn schedule_profiles_snapshot_refresh() { - crate::process::AsyncHandler::spawn(|| async { - // Once the lock is released we refresh the cached snapshot so the next - // request observes the latest data instead of the stale fallback. - SNAPSHOT_METRICS - .refresh_scheduled - .fetch_add(1, Ordering::Relaxed); - let snapshot = read_profiles_snapshot_blocking().await; - update_profiles_cache(&snapshot); - logging!( - debug, - Type::Cmd, - "[Profiles] Cache refreshed after busy snapshot" - ); - }); -} - -fn maybe_log_snapshot_metrics() { - const LOG_INTERVAL_MS: u64 = 5_000; - let now_ms = current_millis(); - let last_ms = SNAPSHOT_METRICS.last_log_ms.load(Ordering::Relaxed); - if now_ms.saturating_sub(last_ms) < LOG_INTERVAL_MS { - return; - } - - if SNAPSHOT_METRICS - .last_log_ms - .compare_exchange(last_ms, now_ms, Ordering::SeqCst, Ordering::Relaxed) - .is_err() - { - return; - } - - let fast = SNAPSHOT_METRICS.fast_hits.swap(0, Ordering::SeqCst); - let cache = SNAPSHOT_METRICS.cache_hits.swap(0, Ordering::SeqCst); - let blocking = SNAPSHOT_METRICS.blocking_hits.swap(0, Ordering::SeqCst); - let refresh = SNAPSHOT_METRICS.refresh_scheduled.swap(0, Ordering::SeqCst); - - if fast == 0 && cache == 0 && blocking == 0 && refresh == 0 { - return; - } - - logging!( - debug, - Type::Cmd, - "[Profiles][Metrics] 5s window => fast={}, cache={}, blocking={}, refresh_jobs={}", - fast, - cache, - blocking, - refresh - ); -} - -fn current_millis() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or(Duration::ZERO) - .as_millis() as u64 -} - -/// Run the optional enhancement pipeline and refresh Clash when it completes. +/// 增强配置文件 #[tauri::command] pub async fn enhance_profiles() -> CmdResult { match feat::enhance_profiles().await { @@ -206,106 +93,79 @@ pub async fn enhance_profiles() -> CmdResult { Ok(()) } -/// Download a profile from the given URL and persist it to the local catalog. +/// 导入配置文件 #[tauri::command] pub async fn import_profile(url: std::string::String, option: Option) -> CmdResult { - logging!(info, Type::Cmd, "[Profile Import] Begin: {}", url); + logging!(info, Type::Cmd, "[导入订阅] 开始导入: {}", url); - // Rely on PrfItem::from_url internal timeout/retry logic instead of wrapping with tokio::time::timeout + // 直接依赖 PrfItem::from_url 自身的超时/重试逻辑,不再使用 tokio::time::timeout 包裹 let item = match PrfItem::from_url(&url, None, None, option).await { Ok(it) => { - logging!( - info, - Type::Cmd, - "[Profile Import] Download complete; saving configuration" - ); + logging!(info, Type::Cmd, "[导入订阅] 下载完成,开始保存配置"); it } Err(e) => { - logging!(error, Type::Cmd, "[Profile Import] Download failed: {}", e); - return Err(format!("Profile import failed: {}", e).into()); + logging!(error, Type::Cmd, "[导入订阅] 下载失败: {}", e); + return Err(format!("导入订阅失败: {}", e).into()); } }; match profiles_append_item_safe(item.clone()).await { Ok(_) => match profiles_save_file_safe().await { Ok(_) => { - logging!( - info, - Type::Cmd, - "[Profile Import] Configuration file saved successfully" - ); + logging!(info, Type::Cmd, "[导入订阅] 配置文件保存成功"); } Err(e) => { - logging!( - error, - Type::Cmd, - "[Profile Import] Failed to save configuration file: {}", - e - ); + logging!(error, Type::Cmd, "[导入订阅] 保存配置文件失败: {}", e); } }, Err(e) => { - logging!( - error, - Type::Cmd, - "[Profile Import] Failed to persist configuration: {}", - e - ); - return Err(format!("Profile import failed: {}", e).into()); + logging!(error, Type::Cmd, "[导入订阅] 保存配置失败: {}", e); + return Err(format!("导入订阅失败: {}", e).into()); } } - // Immediately emit a configuration change notification + // 立即发送配置变更通知 if let Some(uid) = &item.uid { - logging!( - info, - Type::Cmd, - "[Profile Import] Emitting configuration change event: {}", - uid - ); + logging!(info, Type::Cmd, "[导入订阅] 发送配置变更通知: {}", uid); handle::Handle::notify_profile_changed(uid.clone()); } - // Save configuration asynchronously and emit a global notification + // 异步保存配置文件并发送全局通知 let uid_clone = item.uid.clone(); if let Some(uid) = uid_clone { - // Delay notification to ensure the file is fully written + // 延迟发送,确保文件已完全写入 tokio::time::sleep(Duration::from_millis(100)).await; handle::Handle::notify_profile_changed(uid); } - logging!(info, Type::Cmd, "[Profile Import] Completed: {}", url); + logging!(info, Type::Cmd, "[导入订阅] 导入完成: {}", url); Ok(()) } -/// Move a profile in the list relative to another entry. +/// 调整profile的顺序 #[tauri::command] pub async fn reorder_profile(active_id: String, over_id: String) -> CmdResult { match profiles_reorder_safe(active_id, over_id).await { Ok(_) => { - log::info!(target: "app", "Reordered profiles"); + log::info!(target: "app", "重新排序配置文件"); Ok(()) } Err(err) => { - log::error!(target: "app", "Failed to reorder profiles: {}", err); - Err(format!("Failed to reorder profiles: {}", err).into()) + log::error!(target: "app", "重新排序配置文件失败: {}", err); + Err(format!("重新排序配置文件失败: {}", err).into()) } } } -/// Create a new profile entry and optionally write its backing file. +/// 创建新的profile +/// 创建一个新的配置文件 #[tauri::command] pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResult { match profiles_append_item_with_filedata_safe(item.clone(), file_data).await { Ok(_) => { - // Emit configuration change notification + // 发送配置变更通知 if let Some(uid) = &item.uid { - logging!( - info, - Type::Cmd, - "[Profile Create] Emitting configuration change event: {}", - uid - ); + logging!(info, Type::Cmd, "[创建订阅] 发送配置变更通知: {}", uid); handle::Handle::notify_profile_changed(uid.clone()); } Ok(()) @@ -317,7 +177,7 @@ pub async fn create_profile(item: PrfItem, file_data: Option) -> CmdResu } } -/// Force-refresh a profile from its remote source, if available. +/// 更新配置文件 #[tauri::command] pub async fn update_profile(index: String, option: Option) -> CmdResult { match feat::update_profile(index, option, Some(true)).await { @@ -329,11 +189,11 @@ pub async fn update_profile(index: String, option: Option) -> CmdResu } } -/// Remove a profile and refresh the running configuration if necessary. +/// 删除配置文件 #[tauri::command] pub async fn delete_profile(index: String) -> CmdResult { println!("delete_profile: {}", index); - // Use send-safe helper function + // 使用Send-safe helper函数 let should_update = profiles_delete_item_safe(index.clone()) .await .stringify_err()?; @@ -343,13 +203,8 @@ pub async fn delete_profile(index: String) -> CmdResult { match CoreManager::global().update_config().await { Ok(_) => { handle::Handle::refresh_clash(); - // Emit configuration change notification - logging!( - info, - Type::Cmd, - "[Profile Delete] Emitting configuration change event: {}", - index - ); + // 发送配置变更通知 + logging!(info, Type::Cmd, "[删除订阅] 发送配置变更通知: {}", index); handle::Handle::notify_profile_changed(index); } Err(e) => { @@ -361,28 +216,361 @@ pub async fn delete_profile(index: String) -> CmdResult { Ok(()) } -/// Apply partial profile list updates through the switching workflow. +/// 验证新配置文件的语法 +async fn validate_new_profile(new_profile: &String) -> Result<(), ()> { + logging!(info, Type::Cmd, "正在切换到新配置: {}", new_profile); + + // 获取目标配置文件路径 + let config_file_result = { + let profiles_config = Config::profiles().await; + let profiles_data = profiles_config.latest_ref(); + match profiles_data.get_item(new_profile) { + Ok(item) => { + if let Some(file) = &item.file { + let path = dirs::app_profiles_dir().map(|dir| dir.join(file.as_str())); + path.ok() + } else { + None + } + } + Err(e) => { + logging!(error, Type::Cmd, "获取目标配置信息失败: {}", e); + None + } + } + }; + + // 如果获取到文件路径,检查YAML语法 + if let Some(file_path) = config_file_result { + if !file_path.exists() { + logging!( + error, + Type::Cmd, + "目标配置文件不存在: {}", + file_path.display() + ); + handle::Handle::notice_message( + "config_validate::file_not_found", + format!("{}", file_path.display()), + ); + return Err(()); + } + + // 超时保护 + let file_read_result = tokio::time::timeout( + Duration::from_secs(5), + tokio::fs::read_to_string(&file_path), + ) + .await; + + match file_read_result { + Ok(Ok(content)) => { + let yaml_parse_result = AsyncHandler::spawn_blocking(move || { + serde_yaml_ng::from_str::(&content) + }) + .await; + + match yaml_parse_result { + Ok(Ok(_)) => { + logging!(info, Type::Cmd, "目标配置文件语法正确"); + Ok(()) + } + Ok(Err(err)) => { + let error_msg = format!(" {err}"); + logging!( + error, + Type::Cmd, + "目标配置文件存在YAML语法错误:{}", + error_msg + ); + handle::Handle::notice_message( + "config_validate::yaml_syntax_error", + error_msg.clone(), + ); + Err(()) + } + Err(join_err) => { + let error_msg = format!("YAML解析任务失败: {join_err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::yaml_parse_error", + error_msg.clone(), + ); + Err(()) + } + } + } + Ok(Err(err)) => { + let error_msg = format!("无法读取目标配置文件: {err}"); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::file_read_error", + error_msg.clone(), + ); + Err(()) + } + Err(_) => { + let error_msg = "读取配置文件超时(5秒)".to_string(); + logging!(error, Type::Cmd, "{}", error_msg); + handle::Handle::notice_message( + "config_validate::file_read_timeout", + error_msg.clone(), + ); + Err(()) + } + } + } else { + Ok(()) + } +} + +/// 执行配置更新并处理结果 +async fn restore_previous_profile(prev_profile: String) -> CmdResult<()> { + logging!(info, Type::Cmd, "尝试恢复到之前的配置: {}", prev_profile); + let restore_profiles = IProfiles { + current: Some(prev_profile), + items: None, + }; + Config::profiles() + .await + .draft_mut() + .patch_config(restore_profiles) + .stringify_err()?; + Config::profiles().await.apply(); + crate::process::AsyncHandler::spawn(|| async move { + if let Err(e) = profiles_save_file_safe().await { + log::warn!(target: "app", "异步保存恢复配置文件失败: {e}"); + } + }); + logging!(info, Type::Cmd, "成功恢复到之前的配置"); + Ok(()) +} + +async fn handle_success(current_sequence: u64, current_value: Option) -> CmdResult { + let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); + if current_sequence < latest_sequence { + logging!( + info, + Type::Cmd, + "内核操作后发现更新的请求 (序列号: {} < {}),忽略当前结果", + current_sequence, + latest_sequence + ); + Config::profiles().await.discard(); + return Ok(false); + } + + logging!( + info, + Type::Cmd, + "配置更新成功,序列号: {}", + current_sequence + ); + Config::profiles().await.apply(); + handle::Handle::refresh_clash(); + + if let Err(e) = Tray::global().update_tooltip().await { + log::warn!(target: "app", "异步更新托盘提示失败: {e}"); + } + + if let Err(e) = Tray::global().update_menu().await { + log::warn!(target: "app", "异步更新托盘菜单失败: {e}"); + } + + if let Err(e) = profiles_save_file_safe().await { + log::warn!(target: "app", "异步保存配置文件失败: {e}"); + } + + if let Some(current) = ¤t_value { + logging!( + info, + Type::Cmd, + "向前端发送配置变更事件: {}, 序列号: {}", + current, + current_sequence + ); + handle::Handle::notify_profile_changed(current.clone()); + } + + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(true) +} + +async fn handle_validation_failure( + error_msg: String, + current_profile: Option, +) -> CmdResult { + logging!(warn, Type::Cmd, "配置验证失败: {}", error_msg); + Config::profiles().await.discard(); + if let Some(prev_profile) = current_profile { + restore_previous_profile(prev_profile).await?; + } + handle::Handle::notice_message("config_validate::error", error_msg); + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(false) +} + +async fn handle_update_error(e: E, current_sequence: u64) -> CmdResult { + logging!( + warn, + Type::Cmd, + "更新过程发生错误: {}, 序列号: {}", + e, + current_sequence + ); + Config::profiles().await.discard(); + handle::Handle::notice_message("config_validate::boot_error", e.to_string()); + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(false) +} + +async fn handle_timeout(current_profile: Option, current_sequence: u64) -> CmdResult { + let timeout_msg = "配置更新超时(30秒),可能是配置验证或核心通信阻塞"; + logging!( + error, + Type::Cmd, + "{}, 序列号: {}", + timeout_msg, + current_sequence + ); + Config::profiles().await.discard(); + if let Some(prev_profile) = current_profile { + restore_previous_profile(prev_profile).await?; + } + handle::Handle::notice_message("config_validate::timeout", timeout_msg); + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + Ok(false) +} + +async fn perform_config_update( + current_sequence: u64, + current_value: Option, + current_profile: Option, +) -> CmdResult { + logging!( + info, + Type::Cmd, + "开始内核配置更新,序列号: {}", + current_sequence + ); + let update_result = tokio::time::timeout( + Duration::from_secs(30), + CoreManager::global().update_config(), + ) + .await; + + match update_result { + Ok(Ok((true, _))) => handle_success(current_sequence, current_value).await, + Ok(Ok((false, error_msg))) => handle_validation_failure(error_msg, current_profile).await, + Ok(Err(e)) => handle_update_error(e, current_sequence).await, + Err(_) => handle_timeout(current_profile, current_sequence).await, + } +} + +/// 修改profiles的配置 #[tauri::command] pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { - profile_switch::patch_profiles_config(profiles).await + if CURRENT_SWITCHING_PROFILE.load(Ordering::SeqCst) { + logging!(info, Type::Cmd, "当前正在切换配置,放弃请求"); + return Ok(false); + } + CURRENT_SWITCHING_PROFILE.store(true, Ordering::SeqCst); + + // 为当前请求分配序列号 + let current_sequence = CURRENT_REQUEST_SEQUENCE.fetch_add(1, Ordering::SeqCst) + 1; + let target_profile = profiles.current.clone(); + + logging!( + info, + Type::Cmd, + "开始修改配置文件,请求序列号: {}, 目标profile: {:?}", + current_sequence, + target_profile + ); + + let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); + if current_sequence < latest_sequence { + logging!( + info, + Type::Cmd, + "获取锁后发现更新的请求 (序列号: {} < {}),放弃当前请求", + current_sequence, + latest_sequence + ); + return Ok(false); + } + + // 保存当前配置,以便在验证失败时恢复 + let current_profile = Config::profiles().await.latest_ref().current.clone(); + logging!(info, Type::Cmd, "当前配置: {:?}", current_profile); + + // 如果要切换配置,先检查目标配置文件是否有语法错误 + if let Some(new_profile) = profiles.current.as_ref() + && current_profile.as_ref() != Some(new_profile) + && validate_new_profile(new_profile).await.is_err() + { + CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst); + return Ok(false); + } + + // 检查请求有效性 + let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); + if current_sequence < latest_sequence { + logging!( + info, + Type::Cmd, + "在核心操作前发现更新的请求 (序列号: {} < {}),放弃当前请求", + current_sequence, + latest_sequence + ); + return Ok(false); + } + + // 更新profiles配置 + logging!( + info, + Type::Cmd, + "正在更新配置草稿,序列号: {}", + current_sequence + ); + + let current_value = profiles.current.clone(); + + let _ = Config::profiles().await.draft_mut().patch_config(profiles); + + // 在调用内核前再次验证请求有效性 + let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst); + if current_sequence < latest_sequence { + logging!( + info, + Type::Cmd, + "在内核交互前发现更新的请求 (序列号: {} < {}),放弃当前请求", + current_sequence, + latest_sequence + ); + Config::profiles().await.discard(); + return Ok(false); + } + + perform_config_update(current_sequence, current_value, current_profile).await } -/// Switch to the provided profile index and wait for completion before returning. +/// 根据profile name修改profiles #[tauri::command] pub async fn patch_profiles_config_by_profile_index(profile_index: String) -> CmdResult { - profile_switch::patch_profiles_config_by_profile_index(profile_index).await + logging!(info, Type::Cmd, "切换配置到: {}", profile_index); + + let profiles = IProfiles { + current: Some(profile_index), + items: None, + }; + patch_profiles_config(profiles).await } -/// Enqueue a profile switch request and optionally notify on success. -#[tauri::command] -pub async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult { - profile_switch::switch_profile(profile_index, notify_success).await -} - -/// Update a specific profile item and refresh timers if its schedule changed. +/// 修改某个profile item的 #[tauri::command] pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { - // Check for update_interval changes before saving + // 保存修改前检查是否有更新 update_interval let profiles = Config::profiles().await; let should_refresh_timer = if let Ok(old_profile) = profiles.latest_ref().get_item(&index) { let old_interval = old_profile.option.as_ref().and_then(|o| o.update_interval); @@ -401,19 +589,15 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { .await .stringify_err()?; - // If the interval or auto-update flag changes, refresh the timer asynchronously + // 如果更新间隔或允许自动更新变更,异步刷新定时器 if should_refresh_timer { let index_clone = index.clone(); crate::process::AsyncHandler::spawn(move || async move { - logging!( - info, - Type::Timer, - "Timer interval changed; refreshing timer..." - ); + logging!(info, Type::Timer, "定时器更新间隔已变更,正在刷新定时器..."); if let Err(e) = crate::core::Timer::global().refresh().await { - logging!(error, Type::Timer, "Failed to refresh timer: {}", e); + logging!(error, Type::Timer, "刷新定时器失败: {}", e); } else { - // After refreshing successfully, emit a custom event without triggering a reload + // 刷新成功后发送自定义事件,不触发配置重载 crate::core::handle::Handle::notify_timer_updated(index_clone); } }); @@ -422,7 +606,7 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult { Ok(()) } -/// Open the profile file in the system viewer. +/// 查看配置文件 #[tauri::command] pub async fn view_profile(index: String) -> CmdResult { let profiles = Config::profiles().await; @@ -444,7 +628,7 @@ pub async fn view_profile(index: String) -> CmdResult { help::open_file(path).stringify_err() } -/// Return the raw YAML contents for the given profile file. +/// 读取配置文件内容 #[tauri::command] pub async fn read_profile_file(index: String) -> CmdResult { let profiles = Config::profiles().await; @@ -454,22 +638,10 @@ pub async fn read_profile_file(index: String) -> CmdResult { Ok(data) } -/// Report the scheduled refresh timestamp (if any) for the profile timer. +/// 获取下一次更新时间 #[tauri::command] pub async fn get_next_update_time(uid: String) -> CmdResult> { let timer = Timer::global(); let next_time = timer.get_next_update_time(&uid).await; Ok(next_time) } - -/// Return the latest driver snapshot describing active and queued switch tasks. -#[tauri::command] -pub async fn get_profile_switch_status() -> CmdResult { - profile_switch::get_switch_status() -} - -/// Fetch switch result events newer than the provided sequence number. -#[tauri::command] -pub async fn get_profile_switch_events(after_sequence: u64) -> CmdResult> { - profile_switch::get_switch_events(after_sequence) -} diff --git a/src-tauri/src/cmd/profile_switch/driver.rs b/src-tauri/src/cmd/profile_switch/driver.rs deleted file mode 100644 index 8815458e8..000000000 --- a/src-tauri/src/cmd/profile_switch/driver.rs +++ /dev/null @@ -1,683 +0,0 @@ -use super::{ - CmdResult, - state::{ - ProfileSwitchStatus, SwitchCancellation, SwitchManager, SwitchRequest, SwitchResultStatus, - SwitchTaskStatus, current_millis, manager, - }, - workflow::{self, SwitchPanicInfo, SwitchStage}, -}; -use crate::{logging, utils::logging::Type}; -use futures::FutureExt; -use once_cell::sync::OnceCell; -use smartstring::alias::String as SmartString; -use std::{ - collections::{HashMap, VecDeque}, - panic::AssertUnwindSafe, - time::Duration, -}; -use tokio::{ - sync::{ - Mutex as AsyncMutex, - mpsc::{self, error::TrySendError}, - oneshot, - }, - time::{self, MissedTickBehavior}, -}; - -// Single shared queue so profile switches are executed sequentially and can -// collapse redundant requests for the same profile. -const SWITCH_QUEUE_CAPACITY: usize = 32; -static SWITCH_QUEUE: OnceCell> = OnceCell::new(); - -type CompletionRegistry = AsyncMutex>>; - -static SWITCH_COMPLETION_WAITERS: OnceCell = OnceCell::new(); - -/// Global map of task id -> completion channel sender used when callers await the result. -fn completion_waiters() -> &'static CompletionRegistry { - SWITCH_COMPLETION_WAITERS.get_or_init(|| AsyncMutex::new(HashMap::new())) -} - -/// Register a oneshot sender so `switch_profile_and_wait` can be notified when its task finishes. -async fn register_completion_waiter(task_id: u64) -> oneshot::Receiver { - let (sender, receiver) = oneshot::channel(); - let mut guard = completion_waiters().lock().await; - if guard.insert(task_id, sender).is_some() { - logging!( - warn, - Type::Cmd, - "Replacing existing completion waiter for task {}", - task_id - ); - } - receiver -} - -/// Remove an outstanding completion waiter; used when enqueue fails or succeeds immediately. -async fn remove_completion_waiter(task_id: u64) -> Option> { - completion_waiters().lock().await.remove(&task_id) -} - -/// Fire-and-forget notify helper so we do not block the driver loop. -fn notify_completion_waiter(task_id: u64, result: SwitchResultStatus) { - tokio::spawn(async move { - let sender = completion_waiters().lock().await.remove(&task_id); - if let Some(sender) = sender { - let _ = sender.send(result); - } - }); -} - -const WATCHDOG_TIMEOUT: Duration = Duration::from_secs(5); -const WATCHDOG_TICK: Duration = Duration::from_millis(500); - -// Mutable snapshot of the driver's world; all mutations happen on the driver task. -#[derive(Debug, Default)] -struct SwitchDriverState { - active: Option, - queue: VecDeque, - latest_tokens: HashMap, - cleanup_profiles: HashMap>, - last_result: Option, -} - -// Messages passed through SWITCH_QUEUE so the driver can react to events in order. -#[derive(Debug)] -enum SwitchDriverMessage { - Request { - request: SwitchRequest, - respond_to: oneshot::Sender, - }, - Completion { - request: SwitchRequest, - outcome: SwitchJobOutcome, - }, - CleanupDone { - profile: SmartString, - }, -} - -#[derive(Debug)] -enum SwitchJobOutcome { - Completed { - success: bool, - cleanup: workflow::CleanupHandle, - }, - Panicked { - info: SwitchPanicInfo, - cleanup: workflow::CleanupHandle, - }, -} - -pub(super) async fn switch_profile( - profile_index: impl Into, - notify_success: bool, -) -> CmdResult { - switch_profile_impl(profile_index.into(), notify_success, false).await -} - -pub(super) async fn switch_profile_and_wait( - profile_index: impl Into, - notify_success: bool, -) -> CmdResult { - switch_profile_impl(profile_index.into(), notify_success, true).await -} - -async fn switch_profile_impl( - profile_index: SmartString, - notify_success: bool, - wait_for_completion: bool, -) -> CmdResult { - // wait_for_completion is used by CLI flows that must block until the switch finishes. - let manager = manager(); - let sender = switch_driver_sender(); - - let request = SwitchRequest::new( - manager.next_task_id(), - profile_index.clone(), - notify_success, - ); - - logging!( - info, - Type::Cmd, - "Queue profile switch task {} -> {} (notify={})", - request.task_id(), - profile_index, - notify_success - ); - - let task_id = request.task_id(); - let mut completion_rx = if wait_for_completion { - Some(register_completion_waiter(task_id).await) - } else { - None - }; - - let (tx, rx) = oneshot::channel(); - - let enqueue_result = match sender.try_send(SwitchDriverMessage::Request { - request, - respond_to: tx, - }) { - Ok(_) => match rx.await { - Ok(result) => Ok(result), - Err(err) => { - logging!( - error, - Type::Cmd, - "Failed to receive enqueue result for profile {}: {}", - profile_index, - err - ); - Err("switch profile queue unavailable".into()) - } - }, - Err(TrySendError::Full(msg)) => { - logging!( - warn, - Type::Cmd, - "Profile switch queue is full; waiting for space: {}", - profile_index - ); - match sender.send(msg).await { - Ok(_) => match rx.await { - Ok(result) => Ok(result), - Err(err) => { - logging!( - error, - Type::Cmd, - "Failed to receive enqueue result after wait for {}: {}", - profile_index, - err - ); - Err("switch profile queue unavailable".into()) - } - }, - Err(err) => { - logging!( - error, - Type::Cmd, - "Profile switch queue closed while waiting ({}): {}", - profile_index, - err - ); - Err("switch profile queue unavailable".into()) - } - } - } - Err(TrySendError::Closed(_)) => { - logging!( - error, - Type::Cmd, - "Profile switch queue is closed, cannot enqueue: {}", - profile_index - ); - Err("switch profile queue unavailable".into()) - } - }; - - let accepted = match enqueue_result { - Ok(result) => result, - Err(err) => { - if completion_rx.is_some() { - remove_completion_waiter(task_id).await; - } - return Err(err); - } - }; - - if !accepted { - if completion_rx.is_some() { - remove_completion_waiter(task_id).await; - } - return Ok(false); - } - - if let Some(rx_completion) = completion_rx.take() { - match rx_completion.await { - Ok(status) => Ok(status.success), - Err(err) => { - logging!( - error, - Type::Cmd, - "Switch task {} completion channel dropped: {}", - task_id, - err - ); - Err("profile switch completion unavailable".into()) - } - } - } else { - Ok(true) - } -} - -fn switch_driver_sender() -> &'static mpsc::Sender { - SWITCH_QUEUE.get_or_init(|| { - let (tx, rx) = mpsc::channel::(SWITCH_QUEUE_CAPACITY); - let driver_tx = tx.clone(); - tokio::spawn(async move { - let manager = manager(); - let driver = SwitchDriver::new(manager, driver_tx); - driver.run(rx).await; - }); - tx - }) -} - -struct SwitchDriver { - manager: &'static SwitchManager, - sender: mpsc::Sender, - state: SwitchDriverState, -} - -impl SwitchDriver { - fn new(manager: &'static SwitchManager, sender: mpsc::Sender) -> Self { - let state = SwitchDriverState::default(); - manager.set_status(state.snapshot(manager)); - Self { - manager, - sender, - state, - } - } - - async fn run(mut self, mut rx: mpsc::Receiver) { - while let Some(message) = rx.recv().await { - match message { - SwitchDriverMessage::Request { - request, - respond_to, - } => { - self.handle_enqueue(request, respond_to); - } - SwitchDriverMessage::Completion { request, outcome } => { - self.handle_completion(request, outcome); - } - SwitchDriverMessage::CleanupDone { profile } => { - self.handle_cleanup_done(profile); - } - } - } - } - - fn handle_enqueue(&mut self, request: SwitchRequest, respond_to: oneshot::Sender) { - // Each new request supersedes older ones for the same profile to avoid thrashing the core. - let mut responder = Some(respond_to); - let accepted = true; - let profile_key = request.profile_id().clone(); - let cleanup_pending = - self.state.active.is_none() && !self.state.cleanup_profiles.is_empty(); - - if cleanup_pending && self.state.cleanup_profiles.contains_key(&profile_key) { - logging!( - debug, - Type::Cmd, - "Cleanup running for {}; queueing switch task {} -> {} to run afterwards", - profile_key, - request.task_id(), - profile_key - ); - if let Some(previous) = self - .state - .latest_tokens - .insert(profile_key.clone(), request.cancel_token().clone()) - { - previous.cancel(); - } - self.state - .queue - .retain(|queued| queued.profile_id() != &profile_key); - self.state.queue.push_back(request); - if let Some(sender) = responder.take() { - let _ = sender.send(accepted); - } - self.publish_status(); - return; - } - - if cleanup_pending { - logging!( - debug, - Type::Cmd, - "Cleanup running for {} profile(s); queueing task {} -> {} to run after cleanup without clearing existing requests", - self.state.cleanup_profiles.len(), - request.task_id(), - profile_key - ); - } - - if let Some(previous) = self - .state - .latest_tokens - .insert(profile_key.clone(), request.cancel_token().clone()) - { - previous.cancel(); - } - - if let Some(active) = self.state.active.as_mut() - && active.profile_id() == &profile_key - { - active.cancel_token().cancel(); - active.merge_notify(request.notify()); - self.state - .queue - .retain(|queued| queued.profile_id() != &profile_key); - self.state.queue.push_front(request.clone()); - if let Some(sender) = responder.take() { - let _ = sender.send(accepted); - } - self.publish_status(); - return; - } - - if let Some(active) = self.state.active.as_ref() { - logging!( - debug, - Type::Cmd, - "Cancelling active switch task {} (profile={}) in favour of task {} -> {}", - active.task_id(), - active.profile_id(), - request.task_id(), - profile_key - ); - active.cancel_token().cancel(); - } - - self.state - .queue - .retain(|queued| queued.profile_id() != &profile_key); - - self.state.queue.push_back(request.clone()); - if let Some(sender) = responder.take() { - let _ = sender.send(accepted); - } - - self.start_next_job(); - self.publish_status(); - } - - fn handle_completion(&mut self, request: SwitchRequest, outcome: SwitchJobOutcome) { - // Translate the workflow result into an event the frontend can understand. - let result_record = match &outcome { - SwitchJobOutcome::Completed { success, .. } => { - logging!( - info, - Type::Cmd, - "Switch task {} completed (success={})", - request.task_id(), - success - ); - if *success { - SwitchResultStatus::success(request.task_id(), request.profile_id()) - } else { - SwitchResultStatus::failed(request.task_id(), request.profile_id(), None, None) - } - } - SwitchJobOutcome::Panicked { info, .. } => { - logging!( - error, - Type::Cmd, - "Switch task {} panicked at stage {:?}: {}", - request.task_id(), - info.stage, - info.detail - ); - SwitchResultStatus::failed( - request.task_id(), - request.profile_id(), - Some(format!("{:?}", info.stage)), - Some(info.detail.clone()), - ) - } - }; - - if let Some(active) = self.state.active.as_ref() - && active.task_id() == request.task_id() - { - self.state.active = None; - } - - if let Some(latest) = self.state.latest_tokens.get(request.profile_id()) - && latest.same_token(request.cancel_token()) - { - self.state.latest_tokens.remove(request.profile_id()); - } - - let cleanup = match outcome { - SwitchJobOutcome::Completed { cleanup, .. } => cleanup, - SwitchJobOutcome::Panicked { cleanup, .. } => cleanup, - }; - - self.track_cleanup(request.profile_id().clone(), cleanup); - - let event_record = result_record.clone(); - self.state.last_result = Some(result_record); - notify_completion_waiter(request.task_id(), event_record.clone()); - self.manager.push_event(event_record); - self.start_next_job(); - self.publish_status(); - } - - fn handle_cleanup_done(&mut self, profile: SmartString) { - if let Some(handle) = self.state.cleanup_profiles.remove(&profile) { - handle.abort(); - } - self.start_next_job(); - self.publish_status(); - } - - fn start_next_job(&mut self) { - if self.state.active.is_some() || !self.state.cleanup_profiles.is_empty() { - self.publish_status(); - return; - } - - while let Some(request) = self.state.queue.pop_front() { - if request.cancel_token().is_cancelled() { - self.discard_request(request); - continue; - } - - self.state.active = Some(request.clone()); - self.start_switch_job(request); - break; - } - - self.publish_status(); - } - - fn track_cleanup(&mut self, profile: SmartString, cleanup: workflow::CleanupHandle) { - if let Some(existing) = self.state.cleanup_profiles.remove(&profile) { - existing.abort(); - } - - let driver_tx = self.sender.clone(); - let profile_clone = profile.clone(); - let handle = tokio::spawn(async move { - let profile_label = profile_clone.clone(); - if let Err(err) = cleanup.await { - logging!( - warn, - Type::Cmd, - "Cleanup task for profile {} failed: {}", - profile_label.as_str(), - err - ); - } - if let Err(err) = driver_tx - .send(SwitchDriverMessage::CleanupDone { - profile: profile_clone, - }) - .await - { - logging!( - error, - Type::Cmd, - "Failed to push cleanup completion for profile {}: {}", - profile_label.as_str(), - err - ); - } - }); - self.state.cleanup_profiles.insert(profile, handle); - } - - fn start_switch_job(&self, request: SwitchRequest) { - // Run the workflow in a background task while the driver keeps processing messages. - let driver_tx = self.sender.clone(); - let manager = self.manager; - - let completion_request = request.clone(); - let heartbeat = request.heartbeat().clone(); - let cancel_token = request.cancel_token().clone(); - let task_id = request.task_id(); - let profile_label = request.profile_id().clone(); - - tokio::spawn(async move { - let mut watchdog_interval = time::interval(WATCHDOG_TICK); - watchdog_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); - - let workflow_fut = - AssertUnwindSafe(workflow::run_switch_job(manager, request)).catch_unwind(); - tokio::pin!(workflow_fut); - - let job_result = loop { - tokio::select! { - res = workflow_fut.as_mut() => { - break match res { - Ok(Ok(result)) => SwitchJobOutcome::Completed { - success: result.success, - cleanup: result.cleanup, - }, - Ok(Err(error)) => SwitchJobOutcome::Panicked { - info: error.info, - cleanup: error.cleanup, - }, - Err(payload) => { - let info = SwitchPanicInfo::driver_task( - workflow::describe_panic_payload(payload.as_ref()), - ); - let cleanup = workflow::schedule_post_switch_failure( - profile_label.clone(), - completion_request.notify(), - completion_request.task_id(), - ); - SwitchJobOutcome::Panicked { info, cleanup } - } - }; - } - _ = watchdog_interval.tick() => { - if cancel_token.is_cancelled() { - continue; - } - let elapsed = heartbeat.elapsed(); - if elapsed > WATCHDOG_TIMEOUT { - let stage = SwitchStage::from_code(heartbeat.stage_code()) - .unwrap_or(SwitchStage::Workflow); - logging!( - warn, - Type::Cmd, - "Switch task {} watchdog timeout (profile={} stage={:?}, elapsed={:?}); cancelling", - task_id, - profile_label.as_str(), - stage, - elapsed - ); - cancel_token.cancel(); - } - } - } - }; - - let request_for_error = completion_request.clone(); - - if let Err(err) = driver_tx - .send(SwitchDriverMessage::Completion { - request: completion_request, - outcome: job_result, - }) - .await - { - logging!( - error, - Type::Cmd, - "Failed to push switch completion to driver: {}", - err - ); - notify_completion_waiter( - request_for_error.task_id(), - SwitchResultStatus::failed( - request_for_error.task_id(), - request_for_error.profile_id(), - Some("driver".to_string()), - Some(format!("completion dispatch failed: {}", err)), - ), - ); - } - }); - } - - /// Mark a request as failed because a newer request superseded it. - fn discard_request(&mut self, request: SwitchRequest) { - let key = request.profile_id().clone(); - let should_remove = self - .state - .latest_tokens - .get(&key) - .map(|latest| latest.same_token(request.cancel_token())) - .unwrap_or(false); - - if should_remove { - self.state.latest_tokens.remove(&key); - } - - if !request.cancel_token().is_cancelled() { - request.cancel_token().cancel(); - } - - let event = SwitchResultStatus::cancelled( - request.task_id(), - request.profile_id(), - Some("request superseded".to_string()), - ); - - self.state.last_result = Some(event.clone()); - notify_completion_waiter(request.task_id(), event.clone()); - self.manager.push_event(event); - } - - fn publish_status(&self) { - self.manager.set_status(self.state.snapshot(self.manager)); - } -} - -impl SwitchDriverState { - /// Lightweight struct suitable for sharing across the command boundary. - fn snapshot(&self, manager: &SwitchManager) -> ProfileSwitchStatus { - let active = self - .active - .as_ref() - .map(|req| SwitchTaskStatus::from_request(req, false)); - let queue = self - .queue - .iter() - .map(|req| SwitchTaskStatus::from_request(req, true)) - .collect::>(); - let cleanup_profiles = self - .cleanup_profiles - .keys() - .map(|key| key.to_string()) - .collect::>(); - - ProfileSwitchStatus { - is_switching: manager.is_switching(), - active, - queue, - cleanup_profiles, - last_result: self.last_result.clone(), - last_updated: current_millis(), - } - } -} diff --git a/src-tauri/src/cmd/profile_switch/mod.rs b/src-tauri/src/cmd/profile_switch/mod.rs deleted file mode 100644 index 0729c68d1..000000000 --- a/src-tauri/src/cmd/profile_switch/mod.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Profile switch orchestration: plumbing between the public tauri commands, -// the async driver queue, validation helpers, and the state machine workflow. -mod driver; -mod state; -mod validation; -mod workflow; - -pub use state::{ProfileSwitchStatus, SwitchResultEvent}; - -use smartstring::alias::String; - -use super::CmdResult; - -pub(super) async fn patch_profiles_config(profiles: crate::config::IProfiles) -> CmdResult { - workflow::patch_profiles_config(profiles).await -} - -pub(super) async fn patch_profiles_config_by_profile_index( - profile_index: String, -) -> CmdResult { - driver::switch_profile_and_wait(profile_index, false).await -} - -pub(super) async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult { - driver::switch_profile(profile_index, notify_success).await -} - -pub(super) fn get_switch_status() -> CmdResult { - Ok(state::manager().status_snapshot()) -} - -pub(super) fn get_switch_events(after_sequence: u64) -> CmdResult> { - Ok(state::manager().events_after(after_sequence)) -} diff --git a/src-tauri/src/cmd/profile_switch/state.rs b/src-tauri/src/cmd/profile_switch/state.rs deleted file mode 100644 index 1bb52d6b2..000000000 --- a/src-tauri/src/cmd/profile_switch/state.rs +++ /dev/null @@ -1,353 +0,0 @@ -use once_cell::sync::OnceCell; -use parking_lot::RwLock; -use serde::Serialize; -use smartstring::alias::String as SmartString; -use std::collections::VecDeque; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering}; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use tokio::sync::{Mutex, Notify}; - -pub(super) const SWITCH_JOB_TIMEOUT: Duration = Duration::from_secs(30); -pub(super) const SWITCH_CLEANUP_TIMEOUT: Duration = Duration::from_secs(5); - -static SWITCH_MANAGER: OnceCell = OnceCell::new(); - -pub(super) fn manager() -> &'static SwitchManager { - SWITCH_MANAGER.get_or_init(SwitchManager::default) -} - -#[derive(Debug)] -// Central coordination point shared between the driver and workflow state machine. -pub(super) struct SwitchManager { - core_mutex: Mutex<()>, - request_sequence: AtomicU64, - switching: AtomicBool, - task_sequence: AtomicU64, - status: RwLock, - event_sequence: AtomicU64, - recent_events: RwLock>, -} - -impl Default for SwitchManager { - fn default() -> Self { - Self { - core_mutex: Mutex::new(()), - request_sequence: AtomicU64::new(0), - switching: AtomicBool::new(false), - task_sequence: AtomicU64::new(0), - status: RwLock::new(ProfileSwitchStatus::default()), - event_sequence: AtomicU64::new(0), - recent_events: RwLock::new(VecDeque::with_capacity(32)), - } - } -} - -impl SwitchManager { - pub(super) fn core_mutex(&self) -> &Mutex<()> { - &self.core_mutex - } - - // Monotonic identifiers so logs can correlate enqueue/finish pairs. - pub(super) fn next_task_id(&self) -> u64 { - self.task_sequence.fetch_add(1, Ordering::SeqCst) + 1 - } - - /// Sequence id assigned to each enqueue request so we can spot stale work. - pub(super) fn next_request_sequence(&self) -> u64 { - self.request_sequence.fetch_add(1, Ordering::SeqCst) + 1 - } - - pub(super) fn latest_request_sequence(&self) -> u64 { - self.request_sequence.load(Ordering::SeqCst) - } - - pub(super) fn begin_switch(&'static self) -> SwitchScope<'static> { - self.switching.store(true, Ordering::SeqCst); - SwitchScope { manager: self } - } - - pub(super) fn is_switching(&self) -> bool { - self.switching.load(Ordering::SeqCst) - } - - pub(super) fn set_status(&self, status: ProfileSwitchStatus) { - *self.status.write() = status; - } - - pub(super) fn status_snapshot(&self) -> ProfileSwitchStatus { - self.status.read().clone() - } - pub(super) fn push_event(&self, result: SwitchResultStatus) { - const MAX_EVENTS: usize = 64; - let sequence = self.event_sequence.fetch_add(1, Ordering::SeqCst) + 1; - let mut guard = self.recent_events.write(); - if guard.len() == MAX_EVENTS { - guard.pop_front(); - } - guard.push_back(SwitchResultEvent { sequence, result }); - } - - pub(super) fn events_after(&self, sequence: u64) -> Vec { - self.recent_events - .read() - .iter() - .filter(|event| event.sequence > sequence) - .cloned() - .collect() - } -} - -pub(super) struct SwitchScope<'a> { - manager: &'a SwitchManager, -} - -impl Drop for SwitchScope<'_> { - fn drop(&mut self) { - self.manager.switching.store(false, Ordering::SeqCst); - } -} - -#[derive(Debug, Clone)] -pub(super) struct SwitchCancellation { - flag: Arc, - notify: Arc, -} - -impl SwitchCancellation { - pub(super) fn new() -> Self { - Self { - flag: Arc::new(AtomicBool::new(false)), - notify: Arc::new(Notify::new()), - } - } - - pub(super) fn cancel(&self) { - self.flag.store(true, Ordering::SeqCst); - self.notify.notify_waiters(); - } - - /// True if another request already cancelled this job. - pub(super) fn is_cancelled(&self) -> bool { - self.flag.load(Ordering::SeqCst) - } - - pub(super) fn same_token(&self, other: &SwitchCancellation) -> bool { - Arc::ptr_eq(&self.flag, &other.flag) - } - - pub(super) async fn cancelled_future(&self) { - // Used by async blocks that want to pause until a newer request pre-empts them. - if self.is_cancelled() { - return; - } - self.notify.notified().await; - } -} - -#[derive(Debug, Clone)] -pub(super) struct SwitchRequest { - task_id: u64, - profile_id: SmartString, - notify: bool, - cancel_token: SwitchCancellation, - heartbeat: SwitchHeartbeat, -} - -impl SwitchRequest { - pub(super) fn new(task_id: u64, profile_id: SmartString, notify: bool) -> Self { - Self { - task_id, - profile_id, - notify, - cancel_token: SwitchCancellation::new(), - heartbeat: SwitchHeartbeat::new(), - } - } - - pub(super) fn task_id(&self) -> u64 { - self.task_id - } - - pub(super) fn profile_id(&self) -> &SmartString { - &self.profile_id - } - - pub(super) fn notify(&self) -> bool { - self.notify - } - - pub(super) fn merge_notify(&mut self, notify: bool) { - // When a new request wants a toast, remember it even if an older request did not. - if notify { - self.notify = true; - } - } - - pub(super) fn cancel_token(&self) -> &SwitchCancellation { - &self.cancel_token - } - - pub(super) fn heartbeat(&self) -> &SwitchHeartbeat { - &self.heartbeat - } -} - -#[derive(Debug, Clone)] -pub(super) struct SwitchHeartbeat { - last_tick_millis: Arc, - stage_code: Arc, -} - -fn now_millis() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or(Duration::ZERO) - .as_millis() as u64 -} - -#[derive(Debug, Clone, Serialize, Default)] -#[serde(rename_all = "camelCase")] -pub struct ProfileSwitchStatus { - pub is_switching: bool, - pub active: Option, - pub queue: Vec, - pub cleanup_profiles: Vec, - pub last_result: Option, - pub last_updated: u64, -} - -#[derive(Debug, Clone, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct SwitchTaskStatus { - pub task_id: u64, - pub profile_id: String, - pub notify: bool, - pub stage: Option, - pub queued: bool, -} - -impl SwitchTaskStatus { - pub(super) fn from_request(request: &SwitchRequest, queued: bool) -> Self { - Self { - task_id: request.task_id(), - profile_id: request.profile_id().to_string(), - notify: request.notify(), - stage: if queued { - None - } else { - Some(request.heartbeat().stage_code()) - }, - queued, - } - } -} - -#[derive(Debug, Clone, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct SwitchResultStatus { - pub task_id: u64, - pub profile_id: String, - pub success: bool, - pub cancelled: bool, - pub finished_at: u64, - pub error_stage: Option, - pub error_detail: Option, -} - -impl SwitchResultStatus { - pub(super) fn success(task_id: u64, profile_id: &SmartString) -> Self { - Self { - task_id, - profile_id: profile_id.to_string(), - success: true, - cancelled: false, - finished_at: now_millis(), - error_stage: None, - error_detail: None, - } - } - - pub(super) fn failed( - task_id: u64, - profile_id: &SmartString, - stage: Option, - detail: Option, - ) -> Self { - Self { - task_id, - profile_id: profile_id.to_string(), - success: false, - cancelled: false, - finished_at: now_millis(), - error_stage: stage, - error_detail: detail, - } - } - - pub(super) fn cancelled( - task_id: u64, - profile_id: &SmartString, - detail: Option, - ) -> Self { - Self { - task_id, - profile_id: profile_id.to_string(), - success: false, - cancelled: true, - finished_at: now_millis(), - error_stage: Some("cancelled".to_string()), - error_detail: detail, - } - } -} - -#[derive(Debug, Clone, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct SwitchResultEvent { - pub sequence: u64, - pub result: SwitchResultStatus, -} - -pub(super) fn current_millis() -> u64 { - now_millis() -} - -impl SwitchHeartbeat { - fn now_millis() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or(Duration::ZERO) - .as_millis() as u64 - } - - pub(super) fn new() -> Self { - let heartbeat = Self { - last_tick_millis: Arc::new(AtomicU64::new(Self::now_millis())), - stage_code: Arc::new(AtomicU32::new(0)), - }; - heartbeat.touch(); - heartbeat - } - - pub(super) fn touch(&self) { - self.last_tick_millis - .store(Self::now_millis(), Ordering::SeqCst); - } - - /// Update the internal timer to reflect the amount of time since the last heartbeat. - pub(super) fn elapsed(&self) -> Duration { - let last = self.last_tick_millis.load(Ordering::SeqCst); - let now = Self::now_millis(); - Duration::from_millis(now.saturating_sub(last)) - } - - pub(super) fn set_stage(&self, stage: u32) { - self.stage_code.store(stage, Ordering::SeqCst); - self.touch(); - } - - pub(super) fn stage_code(&self) -> u32 { - self.stage_code.load(Ordering::SeqCst) - } -} diff --git a/src-tauri/src/cmd/profile_switch/validation.rs b/src-tauri/src/cmd/profile_switch/validation.rs deleted file mode 100644 index b15806cfa..000000000 --- a/src-tauri/src/cmd/profile_switch/validation.rs +++ /dev/null @@ -1,113 +0,0 @@ -use crate::{ - config::Config, - logging, - process::AsyncHandler, - utils::{dirs, logging::Type}, -}; -use serde_yaml_ng as serde_yaml; -use smartstring::alias::String; -use std::time::Duration; -use tokio::{fs as tokio_fs, time}; - -const YAML_READ_TIMEOUT: Duration = Duration::from_secs(5); - -/// Verify that the requested profile exists locally and is well-formed before switching. -pub(super) async fn validate_switch_request(task_id: u64, profile_id: &str) -> Result<(), String> { - logging!( - info, - Type::Cmd, - "Validating profile switch task {} -> {}", - task_id, - profile_id - ); - - let profile_key: String = profile_id.into(); - let (file_path, profile_type, is_current, remote_url) = { - let profiles_guard = Config::profiles().await; - let latest = profiles_guard.latest_ref(); - let item = latest.get_item(&profile_key).map_err(|err| -> String { - format!("Target profile {} not found: {}", profile_id, err).into() - })?; - ( - item.file.clone().map(|f| f.to_string()), - item.itype.clone().map(|t| t.to_string()), - latest - .current - .as_ref() - .map(|current| current.as_str() == profile_id) - .unwrap_or(false), - item.url.clone().map(|u| u.to_string()), - ) - }; - - if is_current { - logging!( - info, - Type::Cmd, - "Switch task {} is targeting the current profile {}; skipping validation", - task_id, - profile_id - ); - return Ok(()); - } - - if matches!(profile_type.as_deref(), Some("remote")) { - // Remote profiles must retain a URL so the subsequent refresh job knows where to download. - let has_url = remote_url.as_ref().map(|u| !u.is_empty()).unwrap_or(false); - if !has_url { - return Err({ - let msg = format!("Remote profile {} is missing a download URL", profile_id); - msg.into() - }); - } - } - - if let Some(file) = file_path { - let profiles_dir = dirs::app_profiles_dir().map_err(|err| -> String { - format!("Failed to resolve profiles directory: {}", err).into() - })?; - let path = profiles_dir.join(&file); - - let contents = match time::timeout(YAML_READ_TIMEOUT, tokio_fs::read_to_string(&path)).await - { - Ok(Ok(contents)) => contents, - Ok(Err(err)) => { - return Err( - format!("Failed to read profile file {}: {}", path.display(), err).into(), - ); - } - Err(_) => { - return Err(format!( - "Timed out reading profile file {} after {:?}", - path.display(), - YAML_READ_TIMEOUT - ) - .into()); - } - }; - - let parse_result = AsyncHandler::spawn_blocking(move || { - serde_yaml::from_str::(&contents) - }) - .await; - - match parse_result { - Ok(Ok(_)) => {} - Ok(Err(err)) => { - return Err( - format!("Profile YAML parse failed for {}: {}", path.display(), err).into(), - ); - } - Err(join_err) => { - return Err(format!( - "Profile YAML parse task panicked for {}: {}", - path.display(), - join_err - ) - .into()); - } - } - } - - Ok(()) -} diff --git a/src-tauri/src/cmd/profile_switch/workflow.rs b/src-tauri/src/cmd/profile_switch/workflow.rs deleted file mode 100644 index 27d162696..000000000 --- a/src-tauri/src/cmd/profile_switch/workflow.rs +++ /dev/null @@ -1,385 +0,0 @@ -use super::{ - CmdResult, - state::{SWITCH_JOB_TIMEOUT, SwitchManager, SwitchRequest, manager}, - validation::validate_switch_request, -}; -use crate::cmd::StringifyErr; -use crate::{ - config::{Config, IProfiles, profiles::profiles_save_file_safe}, - core::handle, - logging, - process::AsyncHandler, - utils::{dirs, logging::Type}, -}; -use futures::FutureExt; -use serde_yaml_ng as serde_yaml; -use smartstring::alias::String as SmartString; -use std::{any::Any, panic::AssertUnwindSafe, time::Duration}; -use tokio::{fs as tokio_fs, time}; - -mod cleanup; -mod state_machine; -pub(super) use cleanup::{ - CleanupHandle, schedule_post_switch_failure, schedule_post_switch_success, -}; - -use state_machine::{CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchStateMachine}; -pub(super) use state_machine::{SwitchPanicInfo, SwitchStage}; - -pub(super) struct SwitchWorkflowResult { - pub success: bool, - pub cleanup: CleanupHandle, -} - -pub(super) struct SwitchWorkflowError { - pub info: SwitchPanicInfo, - pub cleanup: CleanupHandle, -} - -pub(super) async fn run_switch_job( - manager: &'static SwitchManager, - request: SwitchRequest, -) -> Result { - // Short-circuit cancelled jobs before we allocate resources or emit events. - if request.cancel_token().is_cancelled() { - logging!( - info, - Type::Cmd, - "Switch task {} cancelled before validation", - request.task_id() - ); - let cleanup = schedule_post_switch_failure( - request.profile_id().clone(), - request.notify(), - request.task_id(), - ); - return Ok(SwitchWorkflowResult { - success: false, - cleanup, - }); - } - - let profile_id = request.profile_id().clone(); - let task_id = request.task_id(); - let notify = request.notify(); - - if let Err(err) = validate_switch_request(task_id, profile_id.as_str()).await { - logging!( - warn, - Type::Cmd, - "Validation failed for switch task {} -> {}: {}", - task_id, - profile_id, - err - ); - handle::Handle::notice_message("config_validate::error", err.clone()); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - return Ok(SwitchWorkflowResult { - success: false, - cleanup, - }); - } - - logging!( - info, - Type::Cmd, - "Starting switch task {} for profile {} (notify={})", - task_id, - profile_id, - notify - ); - - let pipeline_request = request; - // The state machine owns the heavy lifting. We wrap it with timeout/panic guards so the driver never hangs. - let pipeline = async move { - let target_profile = pipeline_request.profile_id().clone(); - SwitchStateMachine::new( - manager, - Some(pipeline_request), - IProfiles { - current: Some(target_profile), - items: None, - }, - ) - .run() - .await - }; - - match time::timeout( - SWITCH_JOB_TIMEOUT, - AssertUnwindSafe(pipeline).catch_unwind(), - ) - .await - { - Err(_) => { - logging!( - error, - Type::Cmd, - "Profile switch task {} timed out after {:?}", - task_id, - SWITCH_JOB_TIMEOUT - ); - handle::Handle::notice_message( - "config_validate::error", - format!("profile switch timed out: {}", profile_id), - ); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - Ok(SwitchWorkflowResult { - success: false, - cleanup, - }) - } - Ok(Err(panic_payload)) => { - let panic_message = describe_panic_payload(panic_payload.as_ref()); - logging!( - error, - Type::Cmd, - "Panic captured during profile switch task {} ({}): {}", - task_id, - profile_id, - panic_message - ); - handle::Handle::notice_message( - "config_validate::panic", - format!("profile switch panic: {}", profile_id), - ); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - Err(SwitchWorkflowError { - info: SwitchPanicInfo::workflow_root(panic_message), - cleanup, - }) - } - Ok(Ok(machine_result)) => match machine_result { - Ok(cmd_result) => match cmd_result { - Ok(success) => { - let cleanup = - schedule_post_switch_success(profile_id.clone(), success, notify, task_id); - Ok(SwitchWorkflowResult { success, cleanup }) - } - Err(err) => { - logging!( - error, - Type::Cmd, - "Profile switch failed ({}): {}", - profile_id, - err - ); - handle::Handle::notice_message("config_validate::error", err.clone()); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - Ok(SwitchWorkflowResult { - success: false, - cleanup, - }) - } - }, - Err(panic_info) => { - logging!( - error, - Type::Cmd, - "State machine panic during profile switch task {} ({} {:?}): {}", - task_id, - profile_id, - panic_info.stage, - panic_info.detail - ); - handle::Handle::notice_message( - "config_validate::panic", - format!("profile switch panic: {}", profile_id), - ); - let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id); - Err(SwitchWorkflowError { - info: panic_info, - cleanup, - }) - } - }, - } -} - -/// Allow patch operations (no driver request) to use the same state machine pipeline. -pub(super) async fn patch_profiles_config(profiles: IProfiles) -> CmdResult { - match SwitchStateMachine::new(manager(), None, profiles) - .run() - .await - { - Ok(result) => result, - Err(panic_info) => Err(format!( - "profile switch panic ({:?}): {}", - panic_info.stage, panic_info.detail - ) - .into()), - } -} - -/// Parse the target profile YAML on a background thread to catch syntax errors early. -pub(super) async fn validate_profile_yaml(profile: &SmartString) -> CmdResult { - let file_path = { - let profiles_guard = Config::profiles().await; - let profiles_data = profiles_guard.latest_ref(); - match profiles_data.get_item(profile) { - Ok(item) => item.file.as_ref().and_then(|file| { - dirs::app_profiles_dir() - .ok() - .map(|dir| dir.join(file.as_str())) - }), - Err(e) => { - logging!( - error, - Type::Cmd, - "Failed to load target profile metadata: {}", - e - ); - return Ok(false); - } - } - }; - - let Some(path) = file_path else { - return Ok(true); - }; - - if !path.exists() { - logging!( - error, - Type::Cmd, - "Target profile file does not exist: {}", - path.display() - ); - handle::Handle::notice_message( - "config_validate::file_not_found", - format!("{}", path.display()), - ); - return Ok(false); - } - - let file_read_result = - time::timeout(Duration::from_secs(5), tokio_fs::read_to_string(&path)).await; - - match file_read_result { - Ok(Ok(content)) => { - let yaml_parse_result = AsyncHandler::spawn_blocking(move || { - serde_yaml::from_str::(&content) - }) - .await; - - match yaml_parse_result { - Ok(Ok(_)) => { - logging!(info, Type::Cmd, "Target profile YAML syntax is valid"); - Ok(true) - } - Ok(Err(err)) => { - let error_msg = format!(" {err}"); - logging!( - error, - Type::Cmd, - "Target profile contains YAML syntax errors: {}", - error_msg - ); - handle::Handle::notice_message( - "config_validate::yaml_syntax_error", - error_msg.clone(), - ); - Ok(false) - } - Err(join_err) => { - let error_msg = format!("YAML parsing task failed: {join_err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message( - "config_validate::yaml_parse_error", - error_msg.clone(), - ); - Ok(false) - } - } - } - Ok(Err(err)) => { - let error_msg = format!("Failed to read target profile file: {err}"); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message("config_validate::file_read_error", error_msg.clone()); - Ok(false) - } - Err(_) => { - let error_msg = "Timed out reading profile file (5s)".to_string(); - logging!(error, Type::Cmd, "{}", error_msg); - handle::Handle::notice_message("config_validate::file_read_timeout", error_msg.clone()); - Err(error_msg.into()) - } - } -} - -/// Best-effort rollback invoked when a switch fails midway through the pipeline. -pub(super) async fn restore_previous_profile(previous: Option) -> CmdResult<()> { - if let Some(prev_profile) = previous { - logging!( - info, - Type::Cmd, - "Attempting to restore previous configuration: {}", - prev_profile - ); - let restore_profiles = IProfiles { - current: Some(prev_profile), - items: None, - }; - Config::profiles() - .await - .draft_mut() - .patch_config(restore_profiles) - .stringify_err()?; - if time::timeout(CONFIG_APPLY_TIMEOUT, async { - Config::profiles().await.apply(); - }) - .await - .is_err() - { - logging!( - warn, - Type::Cmd, - "Restoring previous configuration timed out after {:?}", - CONFIG_APPLY_TIMEOUT - ); - return Ok(()); - } - - AsyncHandler::spawn(|| async move { - let save_future = AsyncHandler::spawn_blocking(|| { - futures::executor::block_on(async { profiles_save_file_safe().await }) - }); - match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await { - Ok(join_res) => match join_res { - Ok(Ok(())) => {} - Ok(Err(err)) => { - logging!( - warn, - Type::Cmd, - "Failed to persist restored configuration asynchronously: {}", - err - ); - } - Err(join_err) => { - logging!(warn, Type::Cmd, "Blocking save task failed: {}", join_err); - } - }, - Err(_) => { - logging!( - warn, - Type::Cmd, - "Persisting restored configuration timed out after {:?}", - SAVE_PROFILES_TIMEOUT - ); - } - } - }); - } - - Ok(()) -} - -pub(super) fn describe_panic_payload(payload: &(dyn Any + Send)) -> String { - if let Some(message) = payload.downcast_ref::<&str>() { - (*message).to_string() - } else if let Some(message) = payload.downcast_ref::() { - message.clone() - } else { - "unknown panic".into() - } -} diff --git a/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs b/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs deleted file mode 100644 index 2f7e1aaca..000000000 --- a/src-tauri/src/cmd/profile_switch/workflow/cleanup.rs +++ /dev/null @@ -1,65 +0,0 @@ -use super::super::state::SWITCH_CLEANUP_TIMEOUT; -use crate::{core::handle, logging, process::AsyncHandler, utils::logging::Type}; -use smartstring::alias::String as SmartString; -use tokio::time; - -pub(crate) type CleanupHandle = tauri::async_runtime::JoinHandle<()>; - -pub(crate) fn schedule_post_switch_success( - profile_id: SmartString, - success: bool, - notify: bool, - task_id: u64, -) -> CleanupHandle { - // Post-success cleanup runs detached from the driver so the queue keeps moving. - AsyncHandler::spawn(move || async move { - handle::Handle::notify_profile_switch_finished( - profile_id.clone(), - success, - notify, - task_id, - ); - if success { - close_connections_after_switch(profile_id).await; - } - }) -} - -pub(crate) fn schedule_post_switch_failure( - profile_id: SmartString, - notify: bool, - task_id: u64, -) -> CleanupHandle { - // Failures or cancellations do not alter the active profile, so skip draining live connections. - AsyncHandler::spawn(move || async move { - handle::Handle::notify_profile_switch_finished(profile_id.clone(), false, notify, task_id); - }) -} - -async fn close_connections_after_switch(profile_id: SmartString) { - match time::timeout(SWITCH_CLEANUP_TIMEOUT, async { - handle::Handle::mihomo().await.close_all_connections().await - }) - .await - { - Ok(Ok(())) => {} - Ok(Err(err)) => { - logging!( - warn, - Type::Cmd, - "Failed to close connections after profile switch ({}): {}", - profile_id, - err - ); - } - Err(_) => { - logging!( - warn, - Type::Cmd, - "Closing connections after profile switch ({}) timed out after {:?}", - profile_id, - SWITCH_CLEANUP_TIMEOUT - ); - } - } -} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs deleted file mode 100644 index 9de753dbc..000000000 --- a/src-tauri/src/cmd/profile_switch/workflow/state_machine/context.rs +++ /dev/null @@ -1,178 +0,0 @@ -use super::{CmdResult, core::SwitchStage}; -use crate::{ - cmd::profile_switch::state::{ - SwitchCancellation, SwitchHeartbeat, SwitchManager, SwitchRequest, SwitchScope, - }, - config::IProfiles, - logging, - utils::logging::Type, -}; -use smartstring::alias::String as SmartString; -use tokio::sync::MutexGuard; - -pub(super) struct SwitchContext { - pub(super) manager: &'static SwitchManager, - pub(super) request: Option, - pub(super) profiles_patch: Option, - pub(super) sequence: Option, - pub(super) target_profile: Option, - pub(super) previous_profile: Option, - pub(super) new_profile_for_event: Option, - pub(super) switch_scope: Option>, - pub(super) core_guard: Option>, - pub(super) heartbeat: SwitchHeartbeat, - pub(super) task_id: Option, - pub(super) profile_label: SmartString, - pub(super) active_stage: SwitchStage, -} - -impl SwitchContext { - // Captures all mutable data required across states (locks, profile ids, etc). - pub(super) fn new( - manager: &'static SwitchManager, - request: Option, - profiles: IProfiles, - heartbeat: SwitchHeartbeat, - ) -> Self { - let task_id = request.as_ref().map(|req| req.task_id()); - let profile_label = request - .as_ref() - .map(|req| req.profile_id().clone()) - .or_else(|| profiles.current.clone()) - .unwrap_or_else(|| SmartString::from("unknown")); - heartbeat.touch(); - Self { - manager, - request, - profiles_patch: Some(profiles), - sequence: None, - target_profile: None, - previous_profile: None, - new_profile_for_event: None, - switch_scope: None, - core_guard: None, - heartbeat, - task_id, - profile_label, - active_stage: SwitchStage::Start, - } - } - - pub(super) fn ensure_target_profile(&mut self) { - // Lazily determine which profile we're switching to so shared paths (patch vs. driver) behave the same. - if let Some(patch) = self.profiles_patch.as_mut() { - if patch.current.is_none() - && let Some(request) = self.request.as_ref() - { - patch.current = Some(request.profile_id().clone()); - } - self.target_profile = patch.current.clone(); - } - } - - pub(super) fn take_profiles_patch(&mut self) -> CmdResult { - self.profiles_patch - .take() - .ok_or_else(|| "profiles patch already consumed".into()) - } - - pub(super) fn cancel_token(&self) -> Option { - self.request.as_ref().map(|req| req.cancel_token().clone()) - } - - pub(super) fn cancelled(&self) -> bool { - self.request - .as_ref() - .map(|req| req.cancel_token().is_cancelled()) - .unwrap_or(false) - } - - pub(super) fn log_cancelled(&self, stage: &str) { - if let Some(request) = self.request.as_ref() { - logging!( - info, - Type::Cmd, - "Switch task {} cancelled {}; profile={}", - request.task_id(), - stage, - request.profile_id() - ); - } else { - logging!(info, Type::Cmd, "Profile switch cancelled {}", stage); - } - } - - pub(super) fn should_validate_target(&self) -> bool { - match (&self.target_profile, &self.previous_profile) { - (Some(target), Some(current)) => current != target, - (Some(_), None) => true, - _ => false, - } - } - - pub(super) fn stale(&self) -> bool { - self.sequence - .map(|seq| seq < self.manager.latest_request_sequence()) - .unwrap_or(false) - } - - pub(super) fn sequence(&self) -> u64 { - self.sequence.unwrap_or_else(|| { - logging!( - warn, - Type::Cmd, - "Sequence unexpectedly missing in switch context; defaulting to 0" - ); - 0 - }) - } - - pub(super) fn record_stage(&mut self, stage: SwitchStage) { - let since_last = self.heartbeat.elapsed(); - let previous = self.active_stage; - self.active_stage = stage; - self.heartbeat.set_stage(stage.as_code()); - - match self.task_id { - Some(task_id) => logging!( - debug, - Type::Cmd, - "Switch task {} (profile={}) transitioned {:?} -> {:?} after {:?}", - task_id, - self.profile_label, - previous, - stage, - since_last - ), - None => logging!( - debug, - Type::Cmd, - "Profile patch {} transitioned {:?} -> {:?} after {:?}", - self.profile_label, - previous, - stage, - since_last - ), - } - } - - pub(super) fn release_core_guard(&mut self) { - self.core_guard = None; - } - - pub(super) fn release_switch_scope(&mut self) { - self.switch_scope = None; - } - - pub(super) fn release_locks(&mut self) { - self.release_core_guard(); - self.release_switch_scope(); - } -} - -impl Drop for SwitchContext { - fn drop(&mut self) { - self.core_guard.take(); - self.switch_scope.take(); - } -} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs deleted file mode 100644 index 1c4e32ab2..000000000 --- a/src-tauri/src/cmd/profile_switch/workflow/state_machine/core.rs +++ /dev/null @@ -1,284 +0,0 @@ -use super::{CmdResult, context::SwitchContext, describe_panic_payload}; -use crate::{ - cmd::profile_switch::state::{SwitchHeartbeat, SwitchManager, SwitchRequest}, - config::IProfiles, - logging, - utils::logging::Type, -}; -use futures::FutureExt; -use std::{ - mem, - panic::AssertUnwindSafe, - time::{Duration, Instant}, -}; -pub(crate) const CONFIG_APPLY_TIMEOUT: Duration = Duration::from_secs(5); -pub(crate) const TRAY_UPDATE_TIMEOUT: Duration = Duration::from_secs(3); -pub(crate) const REFRESH_TIMEOUT: Duration = Duration::from_secs(3); -pub(crate) const SAVE_PROFILES_TIMEOUT: Duration = Duration::from_secs(5); -pub(crate) const SWITCH_IDLE_WAIT_TIMEOUT: Duration = Duration::from_secs(30); -pub(crate) const SWITCH_IDLE_WAIT_POLL: Duration = Duration::from_millis(25); -pub(crate) const SWITCH_IDLE_WAIT_MAX_BACKOFF: Duration = Duration::from_millis(250); - -/// Explicit state machine for profile switching so we can reason about -/// cancellation, stale requests, and side effects at each stage. -pub(crate) struct SwitchStateMachine { - pub(super) ctx: SwitchContext, - state: SwitchState, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) enum SwitchStage { - Start, - AcquireCore, - Prepare, - ValidateTarget, - PatchDraft, - UpdateCore, - Finalize, - Workflow, - DriverTask, -} - -impl SwitchStage { - pub(crate) fn as_code(self) -> u32 { - match self { - SwitchStage::Start => 0, - SwitchStage::AcquireCore => 1, - SwitchStage::Prepare => 2, - SwitchStage::ValidateTarget => 3, - SwitchStage::PatchDraft => 4, - SwitchStage::UpdateCore => 5, - SwitchStage::Finalize => 6, - SwitchStage::Workflow => 7, - SwitchStage::DriverTask => 8, - } - } - - pub(crate) fn from_code(code: u32) -> Option { - Some(match code { - 0 => SwitchStage::Start, - 1 => SwitchStage::AcquireCore, - 2 => SwitchStage::Prepare, - 3 => SwitchStage::ValidateTarget, - 4 => SwitchStage::PatchDraft, - 5 => SwitchStage::UpdateCore, - 6 => SwitchStage::Finalize, - 7 => SwitchStage::Workflow, - 8 => SwitchStage::DriverTask, - _ => return None, - }) - } -} - -#[derive(Debug, Clone)] -pub(crate) struct SwitchPanicInfo { - pub(crate) stage: SwitchStage, - pub(crate) detail: String, -} - -impl SwitchPanicInfo { - pub(crate) fn new(stage: SwitchStage, detail: String) -> Self { - Self { stage, detail } - } - - pub(crate) fn workflow_root(detail: String) -> Self { - Self::new(SwitchStage::Workflow, detail) - } - - pub(crate) fn driver_task(detail: String) -> Self { - Self::new(SwitchStage::DriverTask, detail) - } -} - -/// High-level state machine nodes executed in strict sequence. -pub(crate) enum SwitchState { - Start, - AcquireCore, - Prepare, - ValidateTarget, - PatchDraft, - UpdateCore, - Finalize(CoreUpdateOutcome), - Complete(bool), -} - -/// Result of trying to apply the draft configuration to the core. -pub(crate) enum CoreUpdateOutcome { - Success, - ValidationFailed { message: String }, - CoreError { message: String }, - Timeout, -} - -/// Indicates where a stale request was detected so logs stay descriptive. -pub(crate) enum StaleStage { - AfterLock, - BeforeCoreOperation, - BeforeCoreInteraction, - AfterCoreOperation, -} - -impl StaleStage { - pub(super) fn log(&self, ctx: &SwitchContext) { - let sequence = ctx.sequence(); - let latest = ctx.manager.latest_request_sequence(); - match self { - StaleStage::AfterLock => logging!( - info, - Type::Cmd, - "Detected a newer request after acquiring the lock (sequence: {} < {}), abandoning current request", - sequence, - latest - ), - StaleStage::BeforeCoreOperation => logging!( - info, - Type::Cmd, - "Detected a newer request before core operation (sequence: {} < {}), abandoning current request", - sequence, - latest - ), - StaleStage::BeforeCoreInteraction => logging!( - info, - Type::Cmd, - "Detected a newer request before core interaction (sequence: {} < {}), abandoning current request", - sequence, - latest - ), - StaleStage::AfterCoreOperation => logging!( - info, - Type::Cmd, - "Detected a newer request after core operation (sequence: {} < {}), ignoring current result", - sequence, - latest - ), - } - } -} - -impl SwitchStateMachine { - pub(crate) fn new( - manager: &'static SwitchManager, - request: Option, - profiles: IProfiles, - ) -> Self { - let heartbeat = request - .as_ref() - .map(|req| req.heartbeat().clone()) - .unwrap_or_else(SwitchHeartbeat::new); - - Self { - ctx: SwitchContext::new(manager, request, profiles, heartbeat), - state: SwitchState::Start, - } - } - - pub(crate) async fn run(mut self) -> Result, SwitchPanicInfo> { - // Drive the state machine until we either complete successfully or bubble up a panic. - loop { - let current_state = mem::replace(&mut self.state, SwitchState::Complete(false)); - match current_state { - SwitchState::Complete(result) => return Ok(Ok(result)), - _ => match self.run_state(current_state).await? { - Ok(state) => self.state = state, - Err(err) => return Ok(Err(err)), - }, - } - } - } - - async fn run_state( - &mut self, - current: SwitchState, - ) -> Result, SwitchPanicInfo> { - match current { - SwitchState::Start => { - self.with_stage( - SwitchStage::Start, - |this| async move { this.handle_start() }, - ) - .await - } - SwitchState::AcquireCore => { - self.with_stage(SwitchStage::AcquireCore, |this| async move { - this.handle_acquire_core().await - }) - .await - } - SwitchState::Prepare => { - self.with_stage(SwitchStage::Prepare, |this| async move { - this.handle_prepare().await - }) - .await - } - SwitchState::ValidateTarget => { - self.with_stage(SwitchStage::ValidateTarget, |this| async move { - this.handle_validate_target().await - }) - .await - } - SwitchState::PatchDraft => { - self.with_stage(SwitchStage::PatchDraft, |this| async move { - this.handle_patch_draft().await - }) - .await - } - SwitchState::UpdateCore => { - self.with_stage(SwitchStage::UpdateCore, |this| async move { - this.handle_update_core().await - }) - .await - } - SwitchState::Finalize(outcome) => { - self.with_stage(SwitchStage::Finalize, |this| async move { - this.handle_finalize(outcome).await - }) - .await - } - SwitchState::Complete(result) => Ok(Ok(SwitchState::Complete(result))), - } - } - - /// Helper that wraps each stage with consistent logging and panic reporting. - async fn with_stage<'a, F, Fut>( - &'a mut self, - stage: SwitchStage, - f: F, - ) -> Result, SwitchPanicInfo> - where - F: FnOnce(&'a mut Self) -> Fut, - Fut: std::future::Future> + 'a, - { - let sequence = self.ctx.sequence(); - let task = self.ctx.task_id; - let profile = self.ctx.profile_label.clone(); - logging!( - info, - Type::Cmd, - "Enter {:?} (sequence={}, task={:?}, profile={})", - stage, - sequence, - task, - profile - ); - let stage_start = Instant::now(); - self.ctx.record_stage(stage); - AssertUnwindSafe(f(self)) - .catch_unwind() - .await - .map_err(|payload| { - SwitchPanicInfo::new(stage, describe_panic_payload(payload.as_ref())) - }) - .inspect(|_| { - logging!( - info, - Type::Cmd, - "Exit {:?} (sequence={}, task={:?}, profile={}, elapsed={}ms)", - stage, - sequence, - task, - profile, - stage_start.elapsed().as_millis() - ); - }) - } -} diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs deleted file mode 100644 index 84ee0f491..000000000 --- a/src-tauri/src/cmd/profile_switch/workflow/state_machine/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -mod context; -mod core; -mod stages; - -pub(crate) use core::{ - CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchPanicInfo, SwitchStage, SwitchStateMachine, -}; - -pub(super) use super::{ - CmdResult, describe_panic_payload, restore_previous_profile, validate_profile_yaml, -}; diff --git a/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs b/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs deleted file mode 100644 index 78c313d42..000000000 --- a/src-tauri/src/cmd/profile_switch/workflow/state_machine/stages.rs +++ /dev/null @@ -1,597 +0,0 @@ -use super::{ - CmdResult, - core::{ - CONFIG_APPLY_TIMEOUT, CoreUpdateOutcome, REFRESH_TIMEOUT, SAVE_PROFILES_TIMEOUT, - SWITCH_IDLE_WAIT_MAX_BACKOFF, SWITCH_IDLE_WAIT_POLL, SWITCH_IDLE_WAIT_TIMEOUT, StaleStage, - SwitchState, SwitchStateMachine, TRAY_UPDATE_TIMEOUT, - }, - restore_previous_profile, validate_profile_yaml, -}; -use crate::{ - config::{Config, profiles::profiles_save_file_safe}, - core::{CoreManager, handle, tray::Tray}, - logging, - process::AsyncHandler, - utils::logging::Type, -}; -use anyhow::Error; -use futures::future; -use smartstring::alias::String as SmartString; -use std::{ - pin::Pin, - time::{Duration, Instant}, -}; -use tokio::time; - -impl SwitchStateMachine { - pub(super) fn handle_start(&mut self) -> CmdResult { - if self.ctx.manager.is_switching() { - logging!( - info, - Type::Cmd, - "Profile switch already in progress; queuing request for task={:?}, profile={}", - self.ctx.task_id, - self.ctx.profile_label - ); - } - Ok(SwitchState::AcquireCore) - } - - /// Grab the core lock, mark the manager as switching, and compute the target profile. - pub(super) async fn handle_acquire_core(&mut self) -> CmdResult { - let manager = self.ctx.manager; - let core_guard = manager.core_mutex().lock().await; - - if manager.is_switching() { - logging!( - info, - Type::Cmd, - "Active profile switch detected; waiting before acquiring scope" - ); - let wait_start = Instant::now(); - let mut backoff = SWITCH_IDLE_WAIT_POLL; - while manager.is_switching() { - if self.ctx.cancelled() { - self.ctx - .log_cancelled("while waiting for active switch to finish"); - return Ok(SwitchState::Complete(false)); - } - if wait_start.elapsed() >= SWITCH_IDLE_WAIT_TIMEOUT { - let message = format!( - "Timed out after {:?} waiting for active profile switch to finish", - SWITCH_IDLE_WAIT_TIMEOUT - ); - logging!(error, Type::Cmd, "{}", message); - return Err(message.into()); - } - - time::sleep(backoff).await; - backoff = backoff.saturating_mul(2).min(SWITCH_IDLE_WAIT_MAX_BACKOFF); - } - let waited = wait_start.elapsed().as_millis(); - if waited > 0 { - logging!( - info, - Type::Cmd, - "Waited {}ms for active switch to finish before acquiring scope", - waited - ); - } - } - - self.ctx.core_guard = Some(core_guard); - self.ctx.switch_scope = Some(manager.begin_switch()); - self.ctx.sequence = Some(manager.next_request_sequence()); - self.ctx.ensure_target_profile(); - - logging!( - info, - Type::Cmd, - "Begin modifying configuration; sequence: {}, target profile: {:?}", - self.ctx.sequence(), - self.ctx.target_profile - ); - - if self.ctx.cancelled() { - self.ctx.log_cancelled("after acquiring core lock"); - return Ok(SwitchState::Complete(false)); - } - - if self.ctx.stale() { - StaleStage::AfterLock.log(&self.ctx); - return Ok(SwitchState::Complete(false)); - } - - Ok(SwitchState::Prepare) - } - - pub(super) async fn handle_prepare(&mut self) -> CmdResult { - let current_profile = { - let profiles_guard = Config::profiles().await; - profiles_guard.latest_ref().current.clone() - }; - - logging!(info, Type::Cmd, "Current profile: {:?}", current_profile); - self.ctx.previous_profile = current_profile; - Ok(SwitchState::ValidateTarget) - } - - pub(super) async fn handle_validate_target(&mut self) -> CmdResult { - if self.ctx.cancelled() { - self.ctx.log_cancelled("before validation"); - return Ok(SwitchState::Complete(false)); - } - - if self.ctx.should_validate_target() { - let Some(target) = self.ctx.target_profile.clone() else { - logging!( - error, - Type::Cmd, - "Missing target profile while validation was requested; aborting switch" - ); - return Err("missing target profile at validation".into()); - }; - if !validate_profile_yaml(&target).await? { - return Ok(SwitchState::Complete(false)); - } - } - - if self.ctx.stale() { - StaleStage::BeforeCoreOperation.log(&self.ctx); - return Ok(SwitchState::Complete(false)); - } - - Ok(SwitchState::PatchDraft) - } - - pub(super) async fn handle_patch_draft(&mut self) -> CmdResult { - if self.ctx.cancelled() { - self.ctx.log_cancelled("before patching configuration"); - return Ok(SwitchState::Complete(false)); - } - - logging!( - info, - Type::Cmd, - "Updating configuration draft, sequence: {}", - self.ctx.sequence() - ); - - let patch = self.ctx.take_profiles_patch()?; - self.ctx.new_profile_for_event = patch.current.clone(); - let _ = Config::profiles().await.draft_mut().patch_config(patch); - - if self.ctx.stale() { - StaleStage::BeforeCoreInteraction.log(&self.ctx); - Config::profiles().await.discard(); - return Ok(SwitchState::Complete(false)); - } - - Ok(SwitchState::UpdateCore) - } - - pub(super) async fn handle_update_core(&mut self) -> CmdResult { - let sequence = self.ctx.sequence(); - let task_id = self.ctx.task_id; - let profile = self.ctx.profile_label.clone(); - logging!( - info, - Type::Cmd, - "Starting core configuration update, sequence: {}, task={:?}, profile={}", - sequence, - task_id, - profile - ); - - let heartbeat = self.ctx.heartbeat.clone(); - let start = Instant::now(); - let mut ticker = time::interval(Duration::from_secs(1)); - ticker.set_missed_tick_behavior(time::MissedTickBehavior::Delay); - - let update_future = CoreManager::global().update_config(); - tokio::pin!(update_future); - - let timeout = time::sleep(Duration::from_secs(30)); - tokio::pin!(timeout); - - let cancel_token = self.ctx.cancel_token(); - let mut cancel_notifier: Pin + Send>> = - match cancel_token { - Some(token) => Box::pin(async move { - token.cancelled_future().await; - }), - None => Box::pin(future::pending()), - }; - - enum UpdateOutcome { - Finished(Result<(bool, SmartString), Error>), - Timeout, - Cancelled, - } - - let update_outcome = loop { - tokio::select! { - res = &mut update_future => break UpdateOutcome::Finished(res), - _ = &mut timeout => break UpdateOutcome::Timeout, - _ = &mut cancel_notifier => break UpdateOutcome::Cancelled, - _ = ticker.tick() => { - let elapsed_ms = start.elapsed().as_millis(); - heartbeat.touch(); - match task_id { - Some(id) => logging!( - debug, - Type::Cmd, - "Switch task {} (profile={}) UpdateCore still running (elapsed={}ms)", - id, - profile, - elapsed_ms - ), - None => logging!( - debug, - Type::Cmd, - "Profile patch {} UpdateCore still running (elapsed={}ms)", - profile, - elapsed_ms - ), - } - } - } - }; - - let elapsed_ms = start.elapsed().as_millis(); - - let outcome = match update_outcome { - UpdateOutcome::Finished(Ok((true, _))) => { - logging!( - info, - Type::Cmd, - "Core configuration update succeeded in {}ms", - elapsed_ms - ); - CoreUpdateOutcome::Success - } - UpdateOutcome::Finished(Ok((false, msg))) => { - logging!( - warn, - Type::Cmd, - "Core configuration update validation failed in {}ms: {}", - elapsed_ms, - msg - ); - CoreUpdateOutcome::ValidationFailed { - message: msg.to_string(), - } - } - UpdateOutcome::Finished(Err(err)) => { - logging!( - error, - Type::Cmd, - "Core configuration update errored in {}ms: {}", - elapsed_ms, - err - ); - CoreUpdateOutcome::CoreError { - message: err.to_string(), - } - } - UpdateOutcome::Timeout => { - logging!( - error, - Type::Cmd, - "Core configuration update timed out after {}ms", - elapsed_ms - ); - CoreUpdateOutcome::Timeout - } - UpdateOutcome::Cancelled => { - self.ctx.log_cancelled("during core update"); - logging!( - info, - Type::Cmd, - "Core configuration update cancelled after {}ms", - elapsed_ms - ); - self.ctx.release_locks(); - Config::profiles().await.discard(); - return Ok(SwitchState::Complete(false)); - } - }; - - self.ctx.release_core_guard(); - - Ok(SwitchState::Finalize(outcome)) - } - - pub(super) async fn handle_finalize( - &mut self, - outcome: CoreUpdateOutcome, - ) -> CmdResult { - let next_state = match outcome { - CoreUpdateOutcome::Success => self.finalize_success().await, - CoreUpdateOutcome::ValidationFailed { message } => { - self.finalize_validation_failed(message).await - } - CoreUpdateOutcome::CoreError { message } => self.finalize_core_error(message).await, - CoreUpdateOutcome::Timeout => self.finalize_timeout().await, - }; - - if next_state.is_err() || matches!(next_state, Ok(SwitchState::Complete(_))) { - self.ctx.release_switch_scope(); - } - - next_state - } - - pub(super) async fn finalize_success(&mut self) -> CmdResult { - if self.abort_if_stale_post_core().await? { - return Ok(SwitchState::Complete(false)); - } - - self.log_successful_update(); - - if !self.apply_config_with_timeout().await? { - logging!( - warn, - Type::Cmd, - "Apply step failed; attempting to restore previous profile before completing" - ); - restore_previous_profile(self.ctx.previous_profile.clone()).await?; - return Ok(SwitchState::Complete(false)); - } - - self.refresh_clash_with_timeout().await; - self.update_tray_tooltip_with_timeout().await; - self.update_tray_menu_with_timeout().await; - if let Err(err) = self.persist_profiles_with_timeout().await { - logging!( - error, - Type::Cmd, - "Persisting new profile configuration failed; attempting to restore previous profile: {}", - err - ); - restore_previous_profile(self.ctx.previous_profile.clone()).await?; - return Err(err); - } - self.emit_profile_change_event(); - logging!( - debug, - Type::Cmd, - "Finalize success pipeline completed for sequence {}", - self.ctx.sequence() - ); - - Ok(SwitchState::Complete(true)) - } - - pub(super) async fn finalize_validation_failed( - &mut self, - message: String, - ) -> CmdResult { - logging!( - warn, - Type::Cmd, - "Configuration validation failed: {}", - message - ); - Config::profiles().await.discard(); - restore_previous_profile(self.ctx.previous_profile.clone()).await?; - handle::Handle::notice_message("config_validate::error", message); - Ok(SwitchState::Complete(false)) - } - - pub(super) async fn finalize_core_error(&mut self, message: String) -> CmdResult { - logging!( - warn, - Type::Cmd, - "Error occurred during update: {}, sequence: {}", - message, - self.ctx.sequence() - ); - Config::profiles().await.discard(); - handle::Handle::notice_message("config_validate::boot_error", message); - Ok(SwitchState::Complete(false)) - } - - pub(super) async fn finalize_timeout(&mut self) -> CmdResult { - let timeout_msg = - "Configuration update timed out (30s); possible validation or core communication stall"; - logging!( - error, - Type::Cmd, - "{}, sequence: {}", - timeout_msg, - self.ctx.sequence() - ); - Config::profiles().await.discard(); - restore_previous_profile(self.ctx.previous_profile.clone()).await?; - handle::Handle::notice_message("config_validate::timeout", timeout_msg); - Ok(SwitchState::Complete(false)) - } - - pub(super) async fn abort_if_stale_post_core(&mut self) -> CmdResult { - if self.ctx.stale() { - StaleStage::AfterCoreOperation.log(&self.ctx); - Config::profiles().await.discard(); - return Ok(true); - } - - Ok(false) - } - - pub(super) fn log_successful_update(&self) { - logging!( - info, - Type::Cmd, - "Configuration update succeeded, sequence: {}", - self.ctx.sequence() - ); - } - - pub(super) async fn apply_config_with_timeout(&mut self) -> CmdResult { - let apply_result = time::timeout(CONFIG_APPLY_TIMEOUT, async { - Config::profiles().await.apply() - }) - .await; - - if apply_result.is_ok() { - Ok(true) - } else { - logging!( - warn, - Type::Cmd, - "Applying profile configuration timed out after {:?}", - CONFIG_APPLY_TIMEOUT - ); - Config::profiles().await.discard(); - Ok(false) - } - } - - pub(super) async fn refresh_clash_with_timeout(&self) { - let start = Instant::now(); - let result = time::timeout(REFRESH_TIMEOUT, async { - handle::Handle::refresh_clash(); - }) - .await; - - let elapsed = start.elapsed().as_millis(); - match result { - Ok(_) => logging!( - debug, - Type::Cmd, - "refresh_clash_with_timeout completed in {}ms", - elapsed - ), - Err(_) => logging!( - warn, - Type::Cmd, - "Refreshing Clash state timed out after {:?} (elapsed={}ms)", - REFRESH_TIMEOUT, - elapsed - ), - } - } - - pub(super) async fn update_tray_tooltip_with_timeout(&self) { - let start = Instant::now(); - let update_tooltip = time::timeout(TRAY_UPDATE_TIMEOUT, async { - Tray::global().update_tooltip().await - }) - .await; - let elapsed = start.elapsed().as_millis(); - - if update_tooltip.is_err() { - logging!( - warn, - Type::Cmd, - "Updating tray tooltip timed out after {:?} (elapsed={}ms)", - TRAY_UPDATE_TIMEOUT, - elapsed - ); - } else if let Ok(Err(err)) = update_tooltip { - logging!( - warn, - Type::Cmd, - "Failed to update tray tooltip asynchronously: {}", - err - ); - } else { - logging!( - debug, - Type::Cmd, - "update_tray_tooltip_with_timeout completed in {}ms", - elapsed - ); - } - } - - pub(super) async fn update_tray_menu_with_timeout(&self) { - let start = Instant::now(); - let update_menu = time::timeout(TRAY_UPDATE_TIMEOUT, async { - Tray::global().update_menu().await - }) - .await; - let elapsed = start.elapsed().as_millis(); - - if update_menu.is_err() { - logging!( - warn, - Type::Cmd, - "Updating tray menu timed out after {:?} (elapsed={}ms)", - TRAY_UPDATE_TIMEOUT, - elapsed - ); - } else if let Ok(Err(err)) = update_menu { - logging!( - warn, - Type::Cmd, - "Failed to update tray menu asynchronously: {}", - err - ); - } else { - logging!( - debug, - Type::Cmd, - "update_tray_menu_with_timeout completed in {}ms", - elapsed - ); - } - } - - pub(super) async fn persist_profiles_with_timeout(&self) -> CmdResult<()> { - let start = Instant::now(); - let save_future = AsyncHandler::spawn_blocking(|| { - futures::executor::block_on(async { profiles_save_file_safe().await }) - }); - - let elapsed = start.elapsed().as_millis(); - match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await { - Err(_) => { - let message = format!( - "Persisting configuration file timed out after {:?} (elapsed={}ms)", - SAVE_PROFILES_TIMEOUT, elapsed - ); - logging!(warn, Type::Cmd, "{}", message); - Err(message.into()) - } - Ok(join_result) => match join_result { - Err(join_err) => { - let message = format!( - "Persisting configuration file failed: blocking task join error: {join_err}" - ); - logging!(error, Type::Cmd, "{}", message); - Err(message.into()) - } - Ok(save_result) => match save_result { - Ok(()) => { - logging!( - debug, - Type::Cmd, - "persist_profiles_with_timeout completed in {}ms", - elapsed - ); - Ok(()) - } - Err(err) => { - let message = format!("Persisting configuration file failed: {}", err); - logging!(error, Type::Cmd, "{}", message); - Err(message.into()) - } - }, - }, - } - } - - pub(super) fn emit_profile_change_event(&self) { - if let Some(current) = self.ctx.new_profile_for_event.clone() { - logging!( - info, - Type::Cmd, - "Emitting configuration change event to frontend: {}, sequence: {}", - current, - self.ctx.sequence() - ); - handle::Handle::notify_profile_changed(current); - } - } -} diff --git a/src-tauri/src/core/handle.rs b/src-tauri/src/core/handle.rs index e3735d461..ef868f59d 100644 --- a/src-tauri/src/core/handle.rs +++ b/src-tauri/src/core/handle.rs @@ -1,14 +1,7 @@ -use crate::{ - APP_HANDLE, config::Config, constants::timing, logging, singleton, utils::logging::Type, -}; +use crate::{APP_HANDLE, constants::timing, singleton}; use parking_lot::RwLock; -use serde_json::{Value, json}; use smartstring::alias::String; -use std::{ - sync::Arc, - thread, - time::{SystemTime, UNIX_EPOCH}, -}; +use std::{sync::Arc, thread}; use tauri::{AppHandle, Manager, WebviewWindow}; use tauri_plugin_mihomo::{Mihomo, MihomoExt}; use tokio::sync::RwLockReadGuard; @@ -73,14 +66,10 @@ impl Handle { return; } - { - let system_opt = handle.notification_system.read(); - if let Some(system) = system_opt.as_ref() { - system.send_event(FrontendEvent::RefreshClash); - } + let system_opt = handle.notification_system.read(); + if let Some(system) = system_opt.as_ref() { + system.send_event(FrontendEvent::RefreshClash); } - - Self::spawn_proxy_snapshot(); } pub fn refresh_verge() { @@ -96,37 +85,11 @@ impl Handle { } pub fn notify_profile_changed(profile_id: String) { - let handle = Self::global(); - if handle.is_exiting() { - return; - } - - let system_opt = handle.notification_system.read(); - if let Some(system) = system_opt.as_ref() { - system.send_event(FrontendEvent::ProfileChanged { - current_profile_id: profile_id, - }); - } - } - - pub fn notify_profile_switch_finished( - profile_id: String, - success: bool, - notify: bool, - task_id: u64, - ) { - Self::send_event(FrontendEvent::ProfileSwitchFinished { - profile_id, - success, - notify, - task_id, + Self::send_event(FrontendEvent::ProfileChanged { + current_profile_id: profile_id, }); } - pub fn notify_rust_panic(message: String, location: String) { - Self::send_event(FrontendEvent::RustPanic { message, location }); - } - pub fn notify_timer_updated(profile_index: String) { Self::send_event(FrontendEvent::TimerUpdated { profile_index }); } @@ -137,86 +100,6 @@ impl Handle { pub fn notify_profile_update_completed(uid: String) { Self::send_event(FrontendEvent::ProfileUpdateCompleted { uid }); - Self::spawn_proxy_snapshot(); - } - - pub fn notify_proxies_updated(payload: Value) { - Self::send_event(FrontendEvent::ProxiesUpdated { payload }); - } - - pub async fn build_proxy_snapshot() -> Option { - let mihomo_guard = Self::mihomo().await; - let proxies = match mihomo_guard.get_proxies().await { - Ok(data) => match serde_json::to_value(&data) { - Ok(value) => value, - Err(error) => { - logging!( - warn, - Type::Frontend, - "Failed to serialize proxies snapshot: {error}" - ); - return None; - } - }, - Err(error) => { - logging!( - warn, - Type::Frontend, - "Failed to fetch proxies for snapshot: {error}" - ); - return None; - } - }; - - drop(mihomo_guard); - - let providers_guard = Self::mihomo().await; - let providers_value = match providers_guard.get_proxy_providers().await { - Ok(data) => serde_json::to_value(&data).unwrap_or_else(|error| { - logging!( - warn, - Type::Frontend, - "Failed to serialize proxy providers for snapshot: {error}" - ); - Value::Null - }), - Err(error) => { - logging!( - warn, - Type::Frontend, - "Failed to fetch proxy providers for snapshot: {error}" - ); - Value::Null - } - }; - - drop(providers_guard); - - let profile_guard = Config::profiles().await; - let profile_id = profile_guard.latest_ref().current.clone(); - drop(profile_guard); - - let emitted_at = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map(|duration| duration.as_millis() as i64) - .unwrap_or(0); - - let payload = json!({ - "proxies": proxies, - "providers": providers_value, - "profileId": profile_id, - "emittedAt": emitted_at, - }); - - Some(payload) - } - - fn spawn_proxy_snapshot() { - tauri::async_runtime::spawn(async { - if let Some(payload) = Handle::build_proxy_snapshot().await { - Handle::notify_proxies_updated(payload); - } - }); } pub fn notice_message, M: Into>(status: S, msg: M) { diff --git a/src-tauri/src/core/manager/config.rs b/src-tauri/src/core/manager/config.rs index e93d5244a..263ddb4b0 100644 --- a/src-tauri/src/core/manager/config.rs +++ b/src-tauri/src/core/manager/config.rs @@ -10,10 +10,7 @@ use anyhow::{Result, anyhow}; use smartstring::alias::String; use std::{path::PathBuf, time::Instant}; use tauri_plugin_mihomo::Error as MihomoError; -use tokio::time::{sleep, timeout}; - -const RELOAD_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(5); -const MAX_RELOAD_ATTEMPTS: usize = 3; +use tokio::time::sleep; impl CoreManager { pub async fn use_default_config(&self, error_key: &str, error_msg: &str) -> Result<()> { @@ -42,38 +39,12 @@ impl CoreManager { return Ok((true, String::new())); } - let start = Instant::now(); - let _permit = self .update_semaphore .try_acquire() .map_err(|_| anyhow!("Config update already in progress"))?; - let result = self.perform_config_update().await; - - match &result { - Ok((success, msg)) => { - logging!( - info, - Type::Core, - "[ConfigUpdate] Finished (success={}, elapsed={}ms, msg={})", - success, - start.elapsed().as_millis(), - msg - ); - } - Err(err) => { - logging!( - error, - Type::Core, - "[ConfigUpdate] Failed after {}ms: {}", - start.elapsed().as_millis(), - err - ); - } - } - - result + self.perform_config_update().await } fn should_update_config(&self) -> Result { @@ -91,73 +62,20 @@ impl CoreManager { } async fn perform_config_update(&self) -> Result<(bool, String)> { - logging!(debug, Type::Core, "[ConfigUpdate] Pipeline start"); - let total_start = Instant::now(); - - let mut stage_timer = Instant::now(); Config::generate().await?; - logging!( - debug, - Type::Core, - "[ConfigUpdate] Generation completed in {}ms", - stage_timer.elapsed().as_millis() - ); - stage_timer = Instant::now(); - let validation_result = CoreConfigValidator::global().validate_config().await; - logging!( - debug, - Type::Core, - "[ConfigUpdate] Validation completed in {}ms", - stage_timer.elapsed().as_millis() - ); - - match validation_result { + match CoreConfigValidator::global().validate_config().await { Ok((true, _)) => { - stage_timer = Instant::now(); let run_path = Config::generate_file(ConfigType::Run).await?; - logging!( - debug, - Type::Core, - "[ConfigUpdate] Runtime file generated in {}ms", - stage_timer.elapsed().as_millis() - ); - stage_timer = Instant::now(); self.apply_config(run_path).await?; - logging!( - debug, - Type::Core, - "[ConfigUpdate] Core apply completed in {}ms", - stage_timer.elapsed().as_millis() - ); - logging!( - debug, - Type::Core, - "[ConfigUpdate] Pipeline succeeded in {}ms", - total_start.elapsed().as_millis() - ); Ok((true, String::new())) } Ok((false, error_msg)) => { Config::runtime().await.discard(); - logging!( - warn, - Type::Core, - "[ConfigUpdate] Validation reported failure after {}ms: {}", - total_start.elapsed().as_millis(), - error_msg - ); Ok((false, error_msg)) } Err(e) => { Config::runtime().await.discard(); - logging!( - error, - Type::Core, - "[ConfigUpdate] Validation errored after {}ms: {}", - total_start.elapsed().as_millis(), - e - ); Err(e) } } @@ -170,49 +88,17 @@ impl CoreManager { pub(super) async fn apply_config(&self, path: PathBuf) -> Result<()> { let path_str = dirs::path_to_str(&path)?; - let reload_start = Instant::now(); - match self.reload_config_with_retry(path_str).await { + match self.reload_config(path_str).await { Ok(_) => { Config::runtime().await.apply(); - logging!( - debug, - Type::Core, - "Configuration applied (reload={}ms)", - reload_start.elapsed().as_millis() - ); + logging!(info, Type::Core, "Configuration applied"); Ok(()) } + Err(err) if Self::should_restart_on_error(&err) => { + self.retry_with_restart(path_str).await + } Err(err) => { - if Self::should_restart_for_anyhow(&err) { - logging!( - warn, - Type::Core, - "Reload failed after {}ms with retryable/timeout error; attempting restart: {}", - reload_start.elapsed().as_millis(), - err - ); - match self.retry_with_restart(path_str).await { - Ok(_) => return Ok(()), - Err(retry_err) => { - logging!( - error, - Type::Core, - "Reload retry with restart failed: {}", - retry_err - ); - Config::runtime().await.discard(); - return Err(retry_err); - } - } - } Config::runtime().await.discard(); - logging!( - error, - Type::Core, - "Failed to apply config after {}ms: {}", - reload_start.elapsed().as_millis(), - err - ); Err(anyhow!("Failed to apply config: {}", err)) } } @@ -227,116 +113,17 @@ impl CoreManager { self.restart_core().await?; sleep(timing::CONFIG_RELOAD_DELAY).await; - self.reload_config_with_retry(config_path).await?; + self.reload_config(config_path).await?; Config::runtime().await.apply(); logging!(info, Type::Core, "Configuration applied after restart"); Ok(()) } - async fn reload_config_with_retry(&self, path: &str) -> Result<()> { - for attempt in 1..=MAX_RELOAD_ATTEMPTS { - let attempt_start = Instant::now(); - let reload_future = self.reload_config_once(path); - match timeout(RELOAD_TIMEOUT, reload_future).await { - Ok(Ok(())) => { - logging!( - debug, - Type::Core, - "reload_config attempt {}/{} succeeded in {}ms", - attempt, - MAX_RELOAD_ATTEMPTS, - attempt_start.elapsed().as_millis() - ); - return Ok(()); - } - Ok(Err(err)) => { - logging!( - warn, - Type::Core, - "reload_config attempt {}/{} failed after {}ms: {}", - attempt, - MAX_RELOAD_ATTEMPTS, - attempt_start.elapsed().as_millis(), - err - ); - if attempt == MAX_RELOAD_ATTEMPTS { - return Err(anyhow!( - "Failed to reload config after {} attempts: {}", - attempt, - err - )); - } - } - Err(_) => { - logging!( - warn, - Type::Core, - "reload_config attempt {}/{} timed out after {:?}", - attempt, - MAX_RELOAD_ATTEMPTS, - RELOAD_TIMEOUT - ); - if attempt == MAX_RELOAD_ATTEMPTS { - return Err(anyhow!( - "Config reload timed out after {:?} ({} attempts)", - RELOAD_TIMEOUT, - MAX_RELOAD_ATTEMPTS - )); - } - } - } - } - - Err(anyhow!( - "Config reload retry loop exited unexpectedly ({} attempts)", - MAX_RELOAD_ATTEMPTS - )) - } - - async fn reload_config_once(&self, path: &str) -> Result<(), MihomoError> { - logging!( - info, - Type::Core, - "[ConfigUpdate] reload_config_once begin path={} ", - path - ); - let start = Instant::now(); - let result = handle::Handle::mihomo() + async fn reload_config(&self, path: &str) -> Result<(), MihomoError> { + handle::Handle::mihomo() .await .reload_config(true, path) - .await; - let elapsed = start.elapsed().as_millis(); - match result { - Ok(()) => { - logging!( - info, - Type::Core, - "[ConfigUpdate] reload_config_once succeeded (elapsed={}ms)", - elapsed - ); - Ok(()) - } - Err(err) => { - logging!( - warn, - Type::Core, - "[ConfigUpdate] reload_config_once failed (elapsed={}ms, err={})", - elapsed, - err - ); - Err(err) - } - } - } - - fn should_restart_for_anyhow(err: &anyhow::Error) -> bool { - if let Some(mihomo_err) = err.downcast_ref::() { - return Self::should_restart_on_error(mihomo_err); - } - let msg = err.to_string(); - msg.contains("timed out") - || msg.contains("reload") - || msg.contains("Failed to apply config") + .await } fn should_restart_on_error(err: &MihomoError) -> bool { diff --git a/src-tauri/src/core/notification.rs b/src-tauri/src/core/notification.rs index 5754fecb2..071bcedb9 100644 --- a/src-tauri/src/core/notification.rs +++ b/src-tauri/src/core/notification.rs @@ -1,71 +1,38 @@ -use crate::{constants::retry, logging, utils::logging::Type}; -use once_cell::sync::Lazy; +use crate::{ + constants::{retry, timing}, + logging, + utils::logging::Type, +}; use parking_lot::RwLock; use smartstring::alias::String; use std::{ sync::{ - Arc, - atomic::{AtomicBool, AtomicU64, Ordering}, + atomic::{AtomicU64, Ordering}, mpsc, }, thread, time::Instant, }; -use tauri::Emitter; -use tauri::async_runtime; +use tauri::{Emitter, WebviewWindow}; -#[allow(dead_code)] // Temporarily suppress warnings while diagnostics disable certain events #[derive(Debug, Clone)] pub enum FrontendEvent { RefreshClash, RefreshVerge, - RefreshProxy, - ProxiesUpdated { - payload: serde_json::Value, - }, - NoticeMessage { - status: String, - message: String, - }, - ProfileChanged { - current_profile_id: String, - }, - ProfileSwitchFinished { - profile_id: String, - success: bool, - notify: bool, - task_id: u64, - }, - TimerUpdated { - profile_index: String, - }, - ProfileUpdateStarted { - uid: String, - }, - ProfileUpdateCompleted { - uid: String, - }, - RustPanic { - message: String, - location: String, - }, + NoticeMessage { status: String, message: String }, + ProfileChanged { current_profile_id: String }, + TimerUpdated { profile_index: String }, + ProfileUpdateStarted { uid: String }, + ProfileUpdateCompleted { uid: String }, } -static EMIT_SERIALIZER: Lazy> = Lazy::new(|| tokio::sync::Mutex::new(())); - #[derive(Debug, Default)] struct EventStats { + total_sent: AtomicU64, total_errors: AtomicU64, last_error_time: RwLock>, } -#[derive(Debug, Default)] -#[allow(dead_code)] -struct BufferedProxies { - pending: parking_lot::Mutex>, - in_flight: AtomicBool, -} - #[derive(Debug, Clone)] pub struct ErrorMessage { pub status: String, @@ -80,7 +47,6 @@ pub struct NotificationSystem { pub(super) is_running: bool, stats: EventStats, emergency_mode: RwLock, - proxies_buffer: Arc, } impl Default for NotificationSystem { @@ -97,7 +63,6 @@ impl NotificationSystem { is_running: false, stats: EventStats::default(), emergency_mode: RwLock::new(false), - proxies_buffer: Arc::new(BufferedProxies::default()), } } @@ -152,78 +117,13 @@ impl NotificationSystem { return; }; - let event_label = Self::describe_event(&event); - - match event { - FrontendEvent::ProxiesUpdated { payload } => { - logging!( - debug, - Type::Frontend, - "Queueing proxies-updated event for buffered emit: {}", - event_label - ); - system.enqueue_proxies_updated(payload); - } - other => { - logging!( - debug, - Type::Frontend, - "Queueing event for async emit: {}", - event_label - ); - - let (event_name, payload_result) = system.serialize_event(other); - let payload = match payload_result { - Ok(value) => value, - Err(err) => { - logging!( - warn, - Type::Frontend, - "Failed to serialize event {}: {}", - event_name, - err - ); - return; - } - }; - - logging!( - debug, - Type::Frontend, - "Dispatching async emit: {}", - event_name - ); - let _ = Self::emit_via_app(event_name, payload); - } - } - } - - fn enqueue_proxies_updated(&self, payload: serde_json::Value) { - let replaced = { - let mut slot = self.proxies_buffer.pending.lock(); - let had_pending = slot.is_some(); - *slot = Some(payload); - had_pending - }; - - if replaced { - logging!( - debug, - Type::Frontend, - "Replaced pending proxies-updated payload with latest snapshot" - ); + if system.should_skip_event(&event) { + return; } - if self - .proxies_buffer - .in_flight - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) - .is_ok() - { - let buffer = Arc::clone(&self.proxies_buffer); - async_runtime::spawn(async move { - Self::flush_proxies(buffer).await; - }); + if let Some(window) = super::handle::Handle::get_window() { + system.emit_to_window(&window, event); + thread::sleep(timing::EVENT_EMIT_DELAY); } } @@ -235,95 +135,25 @@ impl NotificationSystem { ) } - fn emit_via_app(event_name: &'static str, payload: serde_json::Value) -> Result<(), String> { - let app_handle = super::handle::Handle::app_handle().clone(); - let event_name = event_name.to_string(); - async_runtime::spawn(async move { - if let Err(err) = app_handle.emit_to("main", event_name.as_str(), payload) { - logging!( - warn, - Type::Frontend, - "emit_to failed for {}: {}", - event_name, - err - ); + fn emit_to_window(&self, window: &WebviewWindow, event: FrontendEvent) { + let (event_name, payload) = self.serialize_event(event); + + let Ok(payload) = payload else { + self.stats.total_errors.fetch_add(1, Ordering::Relaxed); + return; + }; + + match window.emit(event_name, payload) { + Ok(_) => { + self.stats.total_sent.fetch_add(1, Ordering::Relaxed); } - }); - Ok(()) - } - - async fn flush_proxies(buffer: Arc) { - const EVENT_NAME: &str = "proxies-updated"; - - loop { - let payload_opt = { - let mut guard = buffer.pending.lock(); - guard.take() - }; - - let Some(payload) = payload_opt else { - buffer.in_flight.store(false, Ordering::Release); - - if buffer.pending.lock().is_some() - && buffer - .in_flight - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) - .is_ok() - { - continue; - } - - break; - }; - - logging!(debug, Type::Frontend, "Dispatching buffered proxies emit"); - let _guard = EMIT_SERIALIZER.lock().await; - if let Err(err) = Self::emit_via_app(EVENT_NAME, payload) { - logging!( - warn, - Type::Frontend, - "Buffered proxies emit failed: {}", - err - ); + Err(e) => { + logging!(warn, Type::Frontend, "Event emit failed: {}", e); + self.handle_emit_error(); } } } - fn describe_event(event: &FrontendEvent) -> String { - match event { - FrontendEvent::RefreshClash => "RefreshClash".into(), - FrontendEvent::RefreshVerge => "RefreshVerge".into(), - FrontendEvent::RefreshProxy => "RefreshProxy".into(), - FrontendEvent::ProxiesUpdated { .. } => "ProxiesUpdated".into(), - FrontendEvent::NoticeMessage { status, .. } => { - format!("NoticeMessage({})", status).into() - } - FrontendEvent::ProfileChanged { current_profile_id } => { - format!("ProfileChanged({})", current_profile_id).into() - } - FrontendEvent::ProfileSwitchFinished { - profile_id, - task_id, - .. - } => format!( - "ProfileSwitchFinished(profile={}, task={})", - profile_id, task_id - ) - .into(), - FrontendEvent::TimerUpdated { profile_index } => { - format!("TimerUpdated({})", profile_index).into() - } - FrontendEvent::ProfileUpdateStarted { uid } => { - format!("ProfileUpdateStarted({})", uid).into() - } - FrontendEvent::ProfileUpdateCompleted { uid } => { - format!("ProfileUpdateCompleted({})", uid).into() - } - FrontendEvent::RustPanic { message, .. } => format!("RustPanic({})", message).into(), - } - } - - #[allow(dead_code)] fn serialize_event( &self, event: FrontendEvent, @@ -337,25 +167,9 @@ impl NotificationSystem { "verge://notice-message", serde_json::to_value((status, message)), ), - FrontendEvent::RefreshProxy => ("verge://refresh-proxy-config", Ok(json!("yes"))), - FrontendEvent::ProxiesUpdated { payload } => ("proxies-updated", Ok(payload)), FrontendEvent::ProfileChanged { current_profile_id } => { ("profile-changed", Ok(json!(current_profile_id))) } - FrontendEvent::ProfileSwitchFinished { - profile_id, - success, - notify, - task_id, - } => ( - "profile-switch-finished", - Ok(json!({ - "profileId": profile_id, - "success": success, - "notify": notify, - "taskId": task_id - })), - ), FrontendEvent::TimerUpdated { profile_index } => { ("verge://timer-updated", Ok(json!(profile_index))) } @@ -365,10 +179,6 @@ impl NotificationSystem { FrontendEvent::ProfileUpdateCompleted { uid } => { ("profile-update-completed", Ok(json!({ "uid": uid }))) } - FrontendEvent::RustPanic { message, location } => ( - "rust-panic", - Ok(json!({ "message": message, "location": location })), - ), } } @@ -394,19 +204,10 @@ impl NotificationSystem { } if let Some(sender) = &self.sender { - if sender.send(event).is_err() { - logging!( - warn, - Type::Frontend, - "Failed to send event to worker thread" - ); - self.handle_emit_error(); - return false; - } - return true; + sender.send(event).is_ok() + } else { + false } - - false } pub fn shutdown(&mut self) { diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 848389627..bbd16795e 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -194,7 +194,6 @@ mod app_init { cmd::get_profiles, cmd::enhance_profiles, cmd::patch_profiles_config, - cmd::switch_profile, cmd::view_profile, cmd::patch_profile, cmd::create_profile, @@ -205,8 +204,6 @@ mod app_init { cmd::read_profile_file, cmd::save_profile_file, cmd::get_next_update_time, - cmd::get_profile_switch_status, - cmd::get_profile_switch_events, cmd::script_validate_notice, cmd::validate_script_file, cmd::create_local_backup, @@ -223,7 +220,6 @@ mod app_init { cmd::get_system_info, cmd::get_unlock_items, cmd::check_media_unlock, - cmd::frontend_log, ] } } @@ -362,28 +358,6 @@ pub fn run() { } } - std::panic::set_hook(Box::new(|info| { - let payload = info - .payload() - .downcast_ref::<&'static str>() - .map(|s| (*s).to_string()) - .or_else(|| info.payload().downcast_ref::().cloned()) - .unwrap_or_else(|| "Unknown panic".to_string()); - let location = info - .location() - .map(|loc| format!("{}:{}", loc.file(), loc.line())) - .unwrap_or_else(|| "unknown location".to_string()); - - logging!( - error, - Type::System, - "Rust panic captured: {} @ {}", - payload, - location - ); - handle::Handle::notify_rust_panic(payload.into(), location.into()); - })); - #[cfg(feature = "clippy")] let context = tauri::test::mock_context(tauri::test::noop_assets()); #[cfg(feature = "clippy")] diff --git a/src-tauri/src/utils/draft.rs b/src-tauri/src/utils/draft.rs index cc72f9c32..044f6f1fa 100644 --- a/src-tauri/src/utils/draft.rs +++ b/src-tauri/src/utils/draft.rs @@ -68,13 +68,6 @@ impl Draft> { }) } - /// 尝试获取最新只读视图,若当前持有写锁则返回 `None` - pub fn try_latest_ref(&self) -> Option>> { - self.inner - .try_read() - .map(|guard| RwLockReadGuard::map(guard, |inner| inner.1.as_ref().unwrap_or(&inner.0))) - } - /// 提交草稿,返回旧正式数据 pub fn apply(&self) -> Option> { let mut inner = self.inner.write(); diff --git a/src/components/home/current-proxy-card.tsx b/src/components/home/current-proxy-card.tsx index ed74def85..ceea82d78 100644 --- a/src/components/home/current-proxy-card.tsx +++ b/src/components/home/current-proxy-card.tsx @@ -100,12 +100,10 @@ export const CurrentProxyCard = () => { const { t } = useTranslation(); const navigate = useNavigate(); const theme = useTheme(); - const { proxies, proxyHydration, clashConfig, refreshProxy, rules } = - useAppData(); + const { proxies, clashConfig, refreshProxy, rules } = useAppData(); const { verge } = useVerge(); const { current: currentProfile } = useProfiles(); const autoDelayEnabled = verge?.enable_auto_delay_detection ?? false; - const isLiveHydration = proxyHydration === "live"; const currentProfileId = currentProfile?.uid || null; const getProfileStorageKey = useCallback( @@ -717,6 +715,7 @@ export const CurrentProxyCard = () => { ); } } + refreshProxy(); if (sortType === 1) { setDelaySortRefresh((prev) => prev + 1); @@ -841,24 +840,13 @@ export const CurrentProxyCard = () => { iconColor={currentProxy ? "primary" : undefined} action={ - {!isLiveHydration && ( - - )} @@ -972,7 +960,7 @@ export const CurrentProxyCard = () => { value={state.selection.group} onChange={handleGroupChange} label={t("Group")} - disabled={isGlobalMode || isDirectMode || !isLiveHydration} + disabled={isGlobalMode || isDirectMode} > {state.proxyData.groups.map((group) => ( @@ -990,7 +978,7 @@ export const CurrentProxyCard = () => { value={state.selection.proxy} onChange={handleProxyChange} label={t("Proxy")} - disabled={isDirectMode || !isLiveHydration} + disabled={isDirectMode} renderValue={renderProxyValue} MenuProps={{ PaperProps: { diff --git a/src/components/proxy/provider-button.tsx b/src/components/proxy/provider-button.tsx index 4d0f2c397..e22b856e0 100644 --- a/src/components/proxy/provider-button.tsx +++ b/src/components/proxy/provider-button.tsx @@ -1,7 +1,6 @@ import { RefreshRounded, StorageOutlined } from "@mui/icons-material"; import { Box, - Chip, Button, Dialog, DialogActions, @@ -19,7 +18,7 @@ import { } from "@mui/material"; import { useLockFn } from "ahooks"; import dayjs from "dayjs"; -import { useMemo, useState } from "react"; +import { useState } from "react"; import { useTranslation } from "react-i18next"; import { updateProxyProvider } from "tauri-plugin-mihomo-api"; @@ -49,61 +48,29 @@ const parseExpire = (expire?: number) => { export const ProviderButton = () => { const { t } = useTranslation(); const [open, setOpen] = useState(false); - const { - proxyProviders, - proxyHydration, - refreshProxy, - refreshProxyProviders, - } = useAppData(); - - const isHydrating = proxyHydration !== "live"; + const { proxyProviders, refreshProxy, refreshProxyProviders } = useAppData(); const [updating, setUpdating] = useState>({}); // 检查是否有提供者 const hasProviders = Object.keys(proxyProviders || {}).length > 0; - // Hydration hint badge keeps users aware of sync state - const hydrationChip = useMemo(() => { - if (proxyHydration === "live") return null; - - return ( - - ); - }, [proxyHydration, t]); - // 更新单个代理提供者 const updateProvider = useLockFn(async (name: string) => { - if (isHydrating) { - showNotice("info", t("Proxy data is syncing, please wait")); - return; - } - try { // 设置更新状态 setUpdating((prev) => ({ ...prev, [name]: true })); + await updateProxyProvider(name); - await refreshProxyProviders(); + + // 刷新数据 await refreshProxy(); - showNotice( - "success", - t("Provider {{name}} updated successfully", { name }), - ); + await refreshProxyProviders(); + + showNotice("success", `${name} 更新成功`); } catch (err: any) { showNotice( "error", - t("Provider {{name}} update failed: {{message}}", { - name, - message: err?.message || err.toString(), - }), + `${name} 更新失败: ${err?.message || err.toString()}`, ); } finally { // 清除更新状态 @@ -113,16 +80,11 @@ export const ProviderButton = () => { // 更新所有代理提供者 const updateAllProviders = useLockFn(async () => { - if (isHydrating) { - showNotice("info", t("Proxy data is syncing, please wait")); - return; - } - try { // 获取所有provider的名称 const allProviders = Object.keys(proxyProviders || {}); if (allProviders.length === 0) { - showNotice("info", t("No providers to update")); + showNotice("info", "没有可更新的代理提供者"); return; } @@ -148,67 +110,54 @@ export const ProviderButton = () => { } } - await refreshProxyProviders(); + // 刷新数据 await refreshProxy(); - showNotice("success", t("All providers updated successfully")); + await refreshProxyProviders(); + + showNotice("success", "全部代理提供者更新成功"); } catch (err: any) { - showNotice( - "error", - t("Failed to update providers: {{message}}", { - message: err?.message || err.toString(), - }), - ); + showNotice("error", `更新失败: ${err?.message || err.toString()}`); } finally { // 清除所有更新状态 setUpdating({}); } }); - const handleClose = () => setOpen(false); + const handleClose = () => { + setOpen(false); + }; if (!hasProviders) return null; return ( <> - - - {hydrationChip} - + {t("Proxy Provider")} - + + + @@ -217,63 +166,54 @@ export const ProviderButton = () => { {Object.entries(proxyProviders || {}) .sort() .map(([key, item]) => { - if (!item) return null; - - const time = dayjs(item.updatedAt); + const provider = item; + const time = dayjs(provider.updatedAt); const isUpdating = updating[key]; - const sub = item.subscriptionInfo; - const hasSubInfo = Boolean(sub); - const upload = sub?.Upload ?? 0; - const download = sub?.Download ?? 0; - const total = sub?.Total ?? 0; - const expire = sub?.Expire ?? 0; + + // 订阅信息 + const sub = provider.subscriptionInfo; + const hasSubInfo = !!sub; + const upload = sub?.Upload || 0; + const download = sub?.Download || 0; + const total = sub?.Total || 0; + const expire = sub?.Expire || 0; + + // 流量使用进度 const progress = total > 0 ? Math.min( + Math.round(((download + upload) * 100) / total) + 1, 100, - Math.max(0, ((upload + download) / total) * 100), ) : 0; return ( - updateProvider(key)} - disabled={isUpdating || isHydrating} - sx={{ - animation: isUpdating - ? "spin 1s linear infinite" - : "none", - "@keyframes spin": { - "0%": { transform: "rotate(0deg)" }, - "100%": { transform: "rotate(360deg)" }, - }, - }} - title={t("Update Provider") as string} - > - - - - } - sx={{ - mb: 1, - borderRadius: 1, - border: "1px solid", - borderColor: alpha("#ccc", 0.4), - backgroundColor: alpha("#fff", 0.02), - }} + sx={[ + { + p: 0, + mb: "8px", + borderRadius: 2, + overflow: "hidden", + transition: "all 0.2s", + }, + ({ palette: { mode, primary } }) => { + const bgcolor = + mode === "light" ? "#ffffff" : "#24252f"; + const hoverColor = + mode === "light" + ? alpha(primary.main, 0.1) + : alpha(primary.main, 0.2); + + return { + backgroundColor: bgcolor, + "&:hover": { + backgroundColor: hoverColor, + }, + }; + }, + ]} > { display: "flex", justifyContent: "space-between", alignItems: "center", - gap: 1, }} > { title={key} sx={{ display: "flex", alignItems: "center" }} > - {key} + {key} - {item.proxies.length} + {provider.proxies.length} - {item.vehicleType} + {provider.vehicleType} @@ -313,39 +252,72 @@ export const ProviderButton = () => { } secondary={ - hasSubInfo ? ( - <> - - - {parseTraffic(upload + download)} /{" "} - {parseTraffic(total)} - - - {parseExpire(expire)} - - + <> + {/* 订阅信息 */} + {hasSubInfo && ( + <> + + + {parseTraffic(upload + download)} /{" "} + {parseTraffic(total)} + + + {parseExpire(expire)} + + - 0 ? 1 : 0, - }} - /> - - ) : null + {/* 进度条 */} + 0 ? 1 : 0, + }} + /> + + )} + } /> + + { + updateProvider(key); + }} + disabled={isUpdating} + sx={{ + animation: isUpdating + ? "spin 1s linear infinite" + : "none", + "@keyframes spin": { + "0%": { transform: "rotate(0deg)" }, + "100%": { transform: "rotate(360deg)" }, + }, + }} + title={t("Update Provider") as string} + > + + + ); })} diff --git a/src/components/proxy/proxy-groups.tsx b/src/components/proxy/proxy-groups.tsx index 1a82f2c83..eec3f2e88 100644 --- a/src/components/proxy/proxy-groups.tsx +++ b/src/components/proxy/proxy-groups.tsx @@ -61,17 +61,10 @@ export const ProxyGroups = (props: Props) => { }>({ open: false, message: "" }); const { verge } = useVerge(); - const { - proxies: proxiesData, - proxyHydration, - proxyTargetProfileId, - proxyDisplayProfileId, - isProxyRefreshPending, - } = useAppData(); + const { proxies: proxiesData } = useAppData(); const groups = proxiesData?.groups; const availableGroups = useMemo(() => groups ?? [], [groups]); - const showHydrationOverlay = isProxyRefreshPending; - const pendingProfileSwitch = proxyTargetProfileId !== proxyDisplayProfileId; + const defaultRuleGroup = useMemo(() => { if (isChainMode && mode === "rule" && availableGroups.length > 0) { return availableGroups[0].name; @@ -83,35 +76,6 @@ export const ProxyGroups = (props: Props) => { () => selectedGroup ?? defaultRuleGroup, [selectedGroup, defaultRuleGroup], ); - const hydrationChip = useMemo(() => { - if (proxyHydration === "live") return null; - - const label = - proxyHydration === "snapshot" ? t("Snapshot data") : t("Syncing..."); - - return ( - - ); - }, [proxyHydration, t]); - - const overlayMessage = useMemo(() => { - if (!showHydrationOverlay) return null; - - if (pendingProfileSwitch) { - return t("Loading proxy data for the selected profile..."); - } - - if (proxyHydration === "snapshot") { - return t("Preparing proxy snapshot..."); - } - - return t("Syncing proxy data..."); - }, [showHydrationOverlay, pendingProfileSwitch, proxyHydration, t]); const { renderList, onProxies, onHeadState } = useRenderList( mode, @@ -129,7 +93,7 @@ export const ProxyGroups = (props: Props) => { [renderList], ); - // 系统代理选择 + // 统代理选择 const { handleProxyGroupChange } = useProxySelection({ onSuccess: () => { onProxies(); @@ -342,7 +306,12 @@ export const ProxyGroups = (props: Props) => { try { await Promise.race([ delayManager.checkListDelay(names, groupName, timeout), - delayGroup(groupName, url, timeout), + delayGroup(groupName, url, timeout).then((result) => { + console.log( + `[ProxyGroups] getGroupProxyDelays返回结果数量:`, + Object.keys(result || {}).length, + ); + }), // 查询group delays 将清除fixed(不关注调用结果) ]); console.log(`[ProxyGroups] 延迟测试完成,组: ${groupName}`); } catch (error) { @@ -407,11 +376,6 @@ export const ProxyGroups = (props: Props) => { } if (isChainMode) { - const chainVirtuosoHeight = - mode === "rule" && proxyGroupNames.length > 0 - ? "calc(100% - 80px)" - : "calc(100% - 14px)"; - // 获取所有代理组 const proxyGroups = proxiesData?.groups || []; @@ -490,7 +454,10 @@ export const ProxyGroups = (props: Props) => { 0 + ? "calc(100% - 80px)" // 只有标题的高度 + : "calc(100% - 14px)", }} totalCount={renderList.length} increaseViewportBy={{ top: 200, bottom: 200 }} @@ -581,9 +548,7 @@ export const ProxyGroups = (props: Props) => { {group.name} - {`${t("Group Type")}: ${group.type} · ${t("Proxy Count")}: ${ - Array.isArray(group.all) ? group.all.length : 0 - }`} + {group.type} · {group.all.length} 节点 @@ -591,7 +556,7 @@ export const ProxyGroups = (props: Props) => { {availableGroups.length === 0 && ( - {t("Empty")} + 暂无可用代理组 )} @@ -602,29 +567,9 @@ export const ProxyGroups = (props: Props) => { return (
- {hydrationChip && ( - - {hydrationChip} - - )} + {/* 代理组导航栏 */} {mode === "rule" && ( { )} /> - {showHydrationOverlay && overlayMessage && ( - - - - {overlayMessage} - - - - )}
); }; diff --git a/src/components/proxy/use-render-list.ts b/src/components/proxy/use-render-list.ts index 1e6e0fd6a..7a5949ae3 100644 --- a/src/components/proxy/use-render-list.ts +++ b/src/components/proxy/use-render-list.ts @@ -14,13 +14,50 @@ import { } from "./use-head-state"; import { useWindowWidth } from "./use-window-width"; -type RenderGroup = IProxyGroupItem; +// 定义代理项接口 +interface IProxyItem { + name: string; + type: string; + udp: boolean; + xudp: boolean; + tfo: boolean; + mptcp: boolean; + smux: boolean; + history: { + time: string; + delay: number; + }[]; + provider?: string; + testUrl?: string; + [key: string]: any; // 添加索引签名以适应其他可能的属性 +} + +// 代理组类型 +type ProxyGroup = { + name: string; + type: string; + udp: boolean; + xudp: boolean; + tfo: boolean; + mptcp: boolean; + smux: boolean; + history: { + time: string; + delay: number; + }[]; + now: string; + all: IProxyItem[]; + hidden?: boolean; + icon?: string; + testUrl?: string; + provider?: string; +}; export interface IRenderItem { // 组 | head | item | empty | item col type: 0 | 1 | 2 | 3 | 4; key: string; - group: RenderGroup; + group: ProxyGroup; proxy?: IProxyItem; col?: number; proxyCol?: IProxyItem[]; @@ -62,7 +99,7 @@ export const useRenderList = ( selectedGroup?: string | null, ) => { // 使用全局数据提供者 - const { proxies: proxiesData, proxyHydration, refreshProxy } = useAppData(); + const { proxies: proxiesData, refreshProxy } = useAppData(); const { verge } = useVerge(); const { width } = useWindowWidth(); const [headStates, setHeadState] = useHeadStateNew(); @@ -86,29 +123,17 @@ export const useRenderList = ( // 确保代理数据加载 useEffect(() => { - if (!proxiesData || proxyHydration !== "live") return; + if (!proxiesData) return; const { groups, proxies } = proxiesData; if ( (mode === "rule" && !groups.length) || (mode === "global" && proxies.length < 2) ) { - const handle = setTimeout(() => { - void refreshProxy().catch(() => {}); - }, 500); + const handle = setTimeout(() => refreshProxy(), 500); return () => clearTimeout(handle); } - }, [proxiesData, proxyHydration, mode, refreshProxy]); - - useEffect(() => { - if (proxyHydration !== "snapshot") return; - - const handle = setTimeout(() => { - void refreshProxy().catch(() => {}); - }, 1800); - - return () => clearTimeout(handle); - }, [proxyHydration, refreshProxy]); + }, [proxiesData, mode, refreshProxy]); // 链式代理模式节点自动计算延迟 useEffect(() => { @@ -122,7 +147,7 @@ export const useRenderList = ( // 设置组监听器,当有延迟更新时自动刷新 const groupListener = () => { console.log("[ChainMode] 延迟更新,刷新UI"); - void refreshProxy().catch(() => {}); + refreshProxy(); }; delayManager.setGroupListener("chain-mode", groupListener); @@ -163,12 +188,9 @@ export const useRenderList = ( // 链式代理模式下,显示代理组和其节点 if (isChainMode && runtimeConfig && mode === "rule") { // 使用正常的规则模式代理组 - const chainGroups = proxiesData.groups ?? []; - const allGroups = chainGroups.length - ? chainGroups - : proxiesData.global - ? [proxiesData.global] - : []; + const allGroups = proxiesData.groups.length + ? proxiesData.groups + : [proxiesData.global!]; // 如果选择了特定代理组,只显示该组的节点 if (selectedGroup) { @@ -260,7 +282,7 @@ export const useRenderList = ( }); // 创建一个虚拟的组来容纳所有节点 - const virtualGroup: RenderGroup = { + const virtualGroup: ProxyGroup = { name: "All Proxies", type: "Selector", udp: false, @@ -318,7 +340,7 @@ export const useRenderList = ( }); // 创建一个虚拟的组来容纳所有节点 - const virtualGroup: RenderGroup = { + const virtualGroup: ProxyGroup = { name: "All Proxies", type: "Selector", udp: false, @@ -358,15 +380,12 @@ export const useRenderList = ( // 正常模式的渲染逻辑 const useRule = mode === "rule" || mode === "script"; - const renderGroups = (() => { - const groups = proxiesData.groups ?? []; - if (useRule && groups.length) { - return groups; - } - return proxiesData.global ? [proxiesData.global] : groups; - })(); + const renderGroups = + useRule && proxiesData.groups.length + ? proxiesData.groups + : [proxiesData.global!]; - const retList = renderGroups.flatMap((group: RenderGroup) => { + const retList = renderGroups.flatMap((group: ProxyGroup) => { const headState = headStates[group.name] || DEFAULT_STATE; const ret: IRenderItem[] = [ { diff --git a/src/hooks/use-current-proxy.ts b/src/hooks/use-current-proxy.ts index 0c7108ffb..7d3523269 100644 --- a/src/hooks/use-current-proxy.ts +++ b/src/hooks/use-current-proxy.ts @@ -2,6 +2,12 @@ import { useMemo } from "react"; import { useAppData } from "@/providers/app-data-context"; +// 定义代理组类型 +interface ProxyGroup { + name: string; + now: string; +} + // 获取当前代理节点信息的自定义Hook export const useCurrentProxy = () => { // 从AppDataProvider获取数据 @@ -31,15 +37,15 @@ export const useCurrentProxy = () => { "自动选择", ]; const primaryGroup = - groups.find((group) => + groups.find((group: ProxyGroup) => primaryKeywords.some((keyword) => group.name.toLowerCase().includes(keyword.toLowerCase()), ), - ) || groups.find((group) => group.name !== "GLOBAL"); + ) || groups.filter((g: ProxyGroup) => g.name !== "GLOBAL")[0]; if (primaryGroup) { primaryGroupName = primaryGroup.name; - currentName = primaryGroup.now ?? currentName; + currentName = primaryGroup.now; } } diff --git a/src/hooks/use-profiles.ts b/src/hooks/use-profiles.ts index c412ec307..fdb734627 100644 --- a/src/hooks/use-profiles.ts +++ b/src/hooks/use-profiles.ts @@ -5,54 +5,33 @@ import { getProfiles, patchProfile, patchProfilesConfig, - calcuProxies, } from "@/services/cmds"; -import { - useProfileStore, - selectEffectiveProfiles, - selectIsHydrating, - selectLastResult, -} from "@/stores/profile-store"; +import { calcuProxies } from "@/services/cmds"; export const useProfiles = () => { - const profilesFromStore = useProfileStore(selectEffectiveProfiles); - const storeHydrating = useProfileStore(selectIsHydrating); - const lastResult = useProfileStore(selectLastResult); - const commitProfileSnapshot = useProfileStore( - (state) => state.commitHydrated, - ); - const { - data: swrProfiles, + data: profiles, mutate: mutateProfiles, error, isValidating, } = useSWR("getProfiles", getProfiles, { revalidateOnFocus: false, revalidateOnReconnect: false, - dedupingInterval: 500, + dedupingInterval: 500, // 减少去重时间,提高响应性 errorRetryCount: 3, errorRetryInterval: 1000, - refreshInterval: 0, - onError: (err) => { - console.error("[useProfiles] SWR错误:", err); + refreshInterval: 0, // 完全由手动控制 + onError: (error) => { + console.error("[useProfiles] SWR错误:", error); }, onSuccess: (data) => { - commitProfileSnapshot(data); console.log( - "[useProfiles] 配置数据更新成功,配置数量", + "[useProfiles] 配置数据更新成功,配置数量:", data?.items?.length || 0, ); }, }); - const rawProfiles = profilesFromStore ?? swrProfiles; - const profiles = (rawProfiles ?? { - current: null, - items: [], - }) as IProfilesConfig; - const hasProfiles = rawProfiles != null; - const patchProfiles = async ( value: Partial, signal?: AbortSignal, @@ -70,30 +49,32 @@ export const useProfiles = () => { await mutateProfiles(); return success; - } catch (err) { - if (err instanceof DOMException && err.name === "AbortError") { - throw err; + } catch (error) { + if (error instanceof DOMException && error.name === "AbortError") { + throw error; } await mutateProfiles(); - throw err; + throw error; } }; const patchCurrent = async (value: Partial) => { - if (!hasProfiles || !profiles.current) { - return; + if (profiles?.current) { + await patchProfile(profiles.current, value); + mutateProfiles(); } - await patchProfile(profiles.current, value); - mutateProfiles(); }; + // 根据selected的节点选择 const activateSelected = async () => { try { console.log("[ActivateSelected] 开始处理代理选择"); - const proxiesData = await calcuProxies(); - const profileData = hasProfiles ? profiles : null; + const [proxiesData, profileData] = await Promise.all([ + calcuProxies(), + getProfiles(), + ]); if (!profileData || !proxiesData) { console.log("[ActivateSelected] 代理或配置数据不可用,跳过处理"); @@ -109,6 +90,7 @@ export const useProfiles = () => { return; } + // 检查是否有saved的代理选择 const { selected = [] } = current; if (selected.length === 0) { console.log("[ActivateSelected] 当前profile无保存的代理选择,跳过"); @@ -116,7 +98,7 @@ export const useProfiles = () => { } console.log( - `[ActivateSelected] 当前profile有${selected.length} 个代理选择配置`, + `[ActivateSelected] 当前profile有 ${selected.length} 个代理选择配置`, ); const selectedMap = Object.fromEntries( @@ -133,6 +115,7 @@ export const useProfiles = () => { "LoadBalance", ]); + // 处理所有代理组 [global, ...groups].forEach((group) => { if (!group) { return; @@ -167,7 +150,7 @@ export const useProfiles = () => { if (!existsInGroup) { console.warn( - `[ActivateSelected] 保存的代理${savedProxy} 不存在于代理组${name}`, + `[ActivateSelected] 保存的代理 ${savedProxy} 不存在于代理组 ${name}`, ); hasChange = true; newSelected.push({ name, now: now ?? savedProxy }); @@ -190,7 +173,7 @@ export const useProfiles = () => { return; } - console.log("[ActivateSelected] 完成代理切换,保存新的选择配置"); + console.log(`[ActivateSelected] 完成代理切换,保存新的选择配置`); try { await patchProfile(profileData.current!, { selected: newSelected }); @@ -212,18 +195,14 @@ export const useProfiles = () => { return { profiles, - hasProfiles, - current: hasProfiles - ? (profiles.items?.find((p) => p && p.uid === profiles.current) ?? null) - : null, + current: profiles?.items?.find((p) => p && p.uid === profiles.current), activateSelected, patchProfiles, patchCurrent, mutateProfiles, - isLoading: isValidating || storeHydrating, - isHydrating: storeHydrating, - lastResult, + // 新增故障检测状态 + isLoading: isValidating, error, - isStale: !hasProfiles && !error && !isValidating, + isStale: !profiles && !error && !isValidating, // 检测是否处于异常状态 }; }; diff --git a/src/pages/_layout/useLayoutEvents.ts b/src/pages/_layout/useLayoutEvents.ts index e3c9ecbc1..c26084a3f 100644 --- a/src/pages/_layout/useLayoutEvents.ts +++ b/src/pages/_layout/useLayoutEvents.ts @@ -1,9 +1,11 @@ import { listen } from "@tauri-apps/api/event"; import { getCurrentWebviewWindow } from "@tauri-apps/api/webviewWindow"; import { useEffect } from "react"; +import { mutate } from "swr"; import { useListen } from "@/hooks/use-listen"; -import { refreshClashData, refreshVergeData } from "@/services/refresh"; +import { getAxios } from "@/services/api"; + export const useLayoutEvents = ( handleNotice: (payload: [string, string]) => void, ) => { @@ -35,32 +37,32 @@ export const useLayoutEvents = ( .catch((error) => console.error("[事件监听] 注册失败", error)); }; - register( - addListener("verge://notice-message", ({ payload }) => - handleNotice(payload as [string, string]), - ), - ); - register( addListener("verge://refresh-clash-config", async () => { - try { - await refreshClashData(); - } catch (error) { - console.error("[事件监听] 刷新 Clash 配置失败", error); - } + await getAxios(true); + mutate("getProxies"); + mutate("getVersion"); + mutate("getClashConfig"); + mutate("getProxyProviders"); }), ); register( addListener("verge://refresh-verge-config", () => { - try { - refreshVergeData(); - } catch (error) { - console.error("[事件监听] 刷新 Verge 配置失败", error); - } + mutate("getVergeConfig"); + mutate("getSystemProxy"); + mutate("getAutotemProxy"); + mutate("getRunningMode"); + mutate("isServiceAvailable"); }), ); + register( + addListener("verge://notice-message", ({ payload }) => + handleNotice(payload as [string, string]), + ), + ); + const appWindow = getCurrentWebviewWindow(); register( (async () => { diff --git a/src/pages/profiles.tsx b/src/pages/profiles.tsx index 0d29fe34f..597ec4ec6 100644 --- a/src/pages/profiles.tsx +++ b/src/pages/profiles.tsx @@ -25,23 +25,16 @@ import { } from "@mui/icons-material"; import { LoadingButton } from "@mui/lab"; import { Box, Button, Divider, Grid, IconButton, Stack } from "@mui/material"; -import { invoke } from "@tauri-apps/api/core"; import { listen, TauriEvent } from "@tauri-apps/api/event"; import { readText } from "@tauri-apps/plugin-clipboard-manager"; import { readTextFile } from "@tauri-apps/plugin-fs"; import { useLockFn } from "ahooks"; import { throttle } from "lodash-es"; -import { - useCallback, - useEffect, - useMemo, - useReducer, - useRef, - useState, -} from "react"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { useTranslation } from "react-i18next"; import { useLocation } from "react-router"; import useSWR, { mutate } from "swr"; +import { closeAllConnections } from "tauri-plugin-mihomo-api"; import { BasePage, DialogRef } from "@/components/base"; import { BaseStyledTextField } from "@/components/base/base-styled-text-field"; @@ -54,7 +47,6 @@ import { import { ConfigViewer } from "@/components/setting/mods/config-viewer"; import { useListen } from "@/hooks/use-listen"; import { useProfiles } from "@/hooks/use-profiles"; -import { useAppData } from "@/providers/app-data-context"; import { createProfile, deleteProfile, @@ -65,16 +57,11 @@ import { importProfile, reorderProfile, updateProfile, - switchProfileCommand, - type ProfileSwitchStatus, - type SwitchTaskStatus, } from "@/services/cmds"; import { showNotice } from "@/services/noticeService"; -import { refreshClashData } from "@/services/refresh"; import { useSetLoadingCache, useThemeMode } from "@/services/states"; -import { AsyncEventQueue, afterPaint } from "@/utils/asyncQueue"; -// Record profile switch state +// 记录profile切换状态 const debugProfileSwitch = (action: string, profile: string, extra?: any) => { const timestamp = new Date().toISOString().substring(11, 23); console.log( @@ -83,80 +70,33 @@ const debugProfileSwitch = (action: string, profile: string, extra?: any) => { ); }; -type RustPanicPayload = { - message: string; - location: string; +// 检查请求是否已过期 +const isRequestOutdated = ( + currentSequence: number, + requestSequenceRef: any, + profile: string, +) => { + if (currentSequence !== requestSequenceRef.current) { + debugProfileSwitch( + "REQUEST_OUTDATED", + profile, + `当前序列号: ${currentSequence}, 最新序列号: ${requestSequenceRef.current}`, + ); + return true; + } + return false; }; -type SwitchTaskMeta = { profileId: string; notify: boolean }; - -const collectSwitchingProfileIds = ( - status: ProfileSwitchStatus | null, -): string[] => { - if (!status) return []; - const ids = new Set(); - if (status.active) { - ids.add(status.active.profileId); - } - status.queue.forEach((task) => ids.add(task.profileId)); - return Array.from(ids); -}; - -type ManualActivatingAction = - | { type: "reset" } - | { type: "set"; value: string[] } - | { type: "add"; ids: string[] } - | { type: "remove"; id: string } - | { type: "filterAllowed"; allowed: Set }; - -const manualActivatingReducer = ( - state: string[], - action: ManualActivatingAction, -): string[] => { - switch (action.type) { - case "reset": - return state.length > 0 ? [] : state; - case "set": { - const unique = Array.from( - new Set(action.value.filter((id) => typeof id === "string" && id)), - ); - if ( - unique.length === state.length && - unique.every((id, index) => id === state[index]) - ) { - return state; - } - return unique; - } - case "add": { - const incoming = action.ids.filter((id) => typeof id === "string" && id); - if (incoming.length === 0) { - return state; - } - const next = new Set(state); - let changed = false; - incoming.forEach((id) => { - const before = next.size; - next.add(id); - if (next.size !== before) { - changed = true; - } - }); - return changed ? Array.from(next) : state; - } - case "remove": { - if (!state.includes(action.id)) { - return state; - } - return state.filter((id) => id !== action.id); - } - case "filterAllowed": { - const next = state.filter((id) => action.allowed.has(id)); - return next.length === state.length ? state : next; - } - default: - return state; +// 检查是否被中断 +const isOperationAborted = ( + abortController: AbortController, + profile: string, +) => { + if (abortController.signal.aborted) { + debugProfileSwitch("OPERATION_ABORTED", profile); + return true; } + return false; }; const normalizeProfileUrl = (value?: string) => { @@ -177,7 +117,7 @@ const normalizeProfileUrl = (value?: string) => { } catch { const schemeNormalized = trimmed.replace( /^([a-z]+):\/\//i, - (_match, scheme: string) => `${scheme.toLowerCase()}://`, + (match, scheme: string) => `${scheme.toLowerCase()}://`, ); return schemeNormalized.replace(/\/+$/, ""); } @@ -219,7 +159,7 @@ const createImportLandingVerifier = ( if (currentCount > baselineCount) { console.log( - `[Import Verify] Configuration count increased: ${baselineCount} -> ${currentCount}`, + `[导入验证] 配置数量已增加: ${baselineCount} -> ${currentCount}`, ); return true; } @@ -237,9 +177,7 @@ const createImportLandingVerifier = ( } if (!hadBaselineProfile) { - console.log( - "[Import Verify] Detected new profile record; treating as success", - ); + console.log("[导入验证] 检测到新的订阅记录,判定为导入成功"); return true; } @@ -248,15 +186,13 @@ const createImportLandingVerifier = ( if (currentUpdated > baselineUpdated) { console.log( - `[Import Verify] Profile timestamp updated ${baselineUpdated} -> ${currentUpdated}`, + `[导入验证] 订阅更新时间已更新 ${baselineUpdated} -> ${currentUpdated}`, ); return true; } if (currentSignature !== baselineSignature) { - console.log( - "[Import Verify] Profile details changed; treating as success", - ); + console.log("[导入验证] 订阅详情发生变化,判定为导入成功"); return true; } @@ -269,110 +205,14 @@ const createImportLandingVerifier = ( }; }; -const isDev = import.meta.env.DEV; - const ProfilePage = () => { - // Serialize profile switch events so state transitions stay deterministic. - const switchEventQueue = useMemo(() => new AsyncEventQueue(), []); - // Stage follow-up effects (hydration, refresh) to run sequentially after switch completion. - const postSwitchEffectQueue = useMemo(() => new AsyncEventQueue(), []); - const mountedRef = useRef(false); - const { t } = useTranslation(); const location = useLocation(); - const logToBackend = useCallback( - ( - level: "debug" | "info" | "warn" | "error", - message: string, - context?: Record, - ) => { - const payload: Record = { - level, - message, - }; - if (context !== undefined) { - payload.context = context; - } - invoke("frontend_log", { payload }).catch(() => {}); - }, - [], - ); const { addListener } = useListen(); - const { switchStatus } = useAppData(); const [url, setUrl] = useState(""); const [disabled, setDisabled] = useState(false); - const [manualActivatings, dispatchManualActivatings] = useReducer( - manualActivatingReducer, - [], - ); - const taskMetaRef = useRef>(new Map()); - const lastResultAtRef = useRef(0); - const initialLastResultSyncRef = useRef(true); - - useEffect(() => { - mountedRef.current = true; - return () => { - mountedRef.current = false; - switchEventQueue.clear(); - postSwitchEffectQueue.clear(); - if (isDev) { - console.debug("[ProfileSwitch] component unmounted, queues cleared"); - } - }; - }, [postSwitchEffectQueue, switchEventQueue]); - useEffect(() => { - const handleError = (event: ErrorEvent) => { - logToBackend("error", "[ProfileSwitch] window error captured", { - message: event.message, - filename: event.filename, - lineno: event.lineno, - colno: event.colno, - stack: event.error?.stack, - }); - console.error( - "[ProfileSwitch] window error captured", - event.message, - event.error, - ); - }; - const handleRejection = (event: PromiseRejectionEvent) => { - let reasonSummary: string; - if (typeof event.reason === "object") { - try { - reasonSummary = JSON.stringify(event.reason); - } catch (error) { - reasonSummary = `[unserializable reason: ${String(error)}]`; - } - } else { - reasonSummary = String(event.reason); - } - logToBackend("error", "[ProfileSwitch] unhandled rejection captured", { - reason: reasonSummary, - }); - console.error( - "[ProfileSwitch] unhandled rejection captured", - event.reason, - ); - }; - window.addEventListener("error", handleError); - window.addEventListener("unhandledrejection", handleRejection); - return () => { - window.removeEventListener("error", handleError); - window.removeEventListener("unhandledrejection", handleRejection); - }; - }, [logToBackend]); + const [activatings, setActivatings] = useState([]); const [loading, setLoading] = useState(false); - const postSwitchGenerationRef = useRef(0); - const switchingProfileId = switchStatus?.active?.profileId ?? null; - const switchActivatingIds = useMemo( - () => collectSwitchingProfileIds(switchStatus ?? null), - [switchStatus], - ); - const activatings = useMemo(() => { - const merged = new Set(manualActivatings); - switchActivatingIds.forEach((id) => merged.add(id)); - return Array.from(merged); - }, [manualActivatings, switchActivatingIds]); // Batch selection states const [batchMode, setBatchMode] = useState(false); @@ -380,6 +220,57 @@ const ProfilePage = () => { () => new Set(), ); + // 防止重复切换 + const switchingProfileRef = useRef(null); + + // 支持中断当前切换操作 + const abortControllerRef = useRef(null); + + // 只处理最新的切换请求 + const requestSequenceRef = useRef(0); + + // 待处理请求跟踪,取消排队的请求 + const pendingRequestRef = useRef | null>(null); + + // 处理profile切换中断 + const handleProfileInterrupt = useCallback( + (previousSwitching: string, newProfile: string) => { + debugProfileSwitch( + "INTERRUPT_PREVIOUS", + previousSwitching, + `被 ${newProfile} 中断`, + ); + + if (abortControllerRef.current) { + abortControllerRef.current.abort(); + debugProfileSwitch("ABORT_CONTROLLER_TRIGGERED", previousSwitching); + } + + if (pendingRequestRef.current) { + debugProfileSwitch("CANCEL_PENDING_REQUEST", previousSwitching); + } + + setActivatings((prev) => prev.filter((id) => id !== previousSwitching)); + showNotice( + "info", + `${t("Profile switch interrupted by new selection")}: ${previousSwitching} → ${newProfile}`, + 3000, + ); + }, + [t], + ); + + // 清理切换状态 + const cleanupSwitchState = useCallback( + (profile: string, sequence: number) => { + setActivatings((prev) => prev.filter((id) => id !== profile)); + switchingProfileRef.current = null; + abortControllerRef.current = null; + pendingRequestRef.current = null; + debugProfileSwitch("SWITCH_END", profile, `序列号: ${sequence}`); + }, + [], + ); const sensors = useSensors( useSensor(PointerSensor), useSensor(KeyboardSensor, { @@ -391,32 +282,11 @@ const ProfilePage = () => { const { profiles = {}, activateSelected, + patchProfiles, mutateProfiles, error, isStale, } = useProfiles(); - const activateSelectedRef = useRef(activateSelected); - const mutateProfilesRef = useRef(mutateProfiles); - const profileMutateScheduledRef = useRef(false); - const mutateLogsRef = useRef<(() => Promise | void) | null>(null); - const tRef = useRef(t); - const showNoticeRef = useRef(showNotice); - const refreshClashDataRef = useRef(refreshClashData); - - useEffect(() => { - activateSelectedRef.current = activateSelected; - }, [activateSelected]); - - useEffect(() => { - mutateProfilesRef.current = mutateProfiles; - }, [mutateProfiles]); - - useEffect(() => { - tRef.current = t; - }, [t]); - - showNoticeRef.current = showNotice; - refreshClashDataRef.current = refreshClashData; useEffect(() => { const handleFileDrop = async () => { @@ -457,28 +327,28 @@ const ProfilePage = () => { }; }, [addListener, mutateProfiles, t]); - // Add emergency recovery capability + // 添加紧急恢复功能 const onEmergencyRefresh = useLockFn(async () => { - console.log("[Emergency Refresh] Starting forced refresh of all data"); + console.log("[紧急刷新] 开始强制刷新所有数据"); try { - // Clear all SWR caches + // 清除所有SWR缓存 await mutate(() => true, undefined, { revalidate: false }); - // Force fetching profile data + // 强制重新获取配置数据 await mutateProfiles(undefined, { revalidate: true, rollbackOnError: false, }); - // Wait for state to stabilize before enhancing the profile + // 等待状态稳定后增强配置 await new Promise((resolve) => setTimeout(resolve, 500)); await onEnhance(false); - showNotice("success", "Data forcibly refreshed", 2000); + showNotice("success", "数据已强制刷新", 2000); } catch (error: any) { - console.error("[Emergency Refresh] Failed:", error); - showNotice("error", `Emergency refresh failed: ${error.message}`, 4000); + console.error("[紧急刷新] 失败:", error); + showNotice("error", `紧急刷新失败: ${error.message}`, 4000); } }); @@ -486,156 +356,6 @@ const ProfilePage = () => { "getRuntimeLogs", getRuntimeLogs, ); - useEffect(() => { - mutateLogsRef.current = mutateLogs; - }, [mutateLogs]); - - useEffect(() => { - activateSelectedRef.current = activateSelected; - }, [activateSelected]); - - useEffect(() => { - mutateProfilesRef.current = mutateProfiles; - }, [mutateProfiles]); - - const scheduleProfileMutate = useCallback(() => { - if (profileMutateScheduledRef.current) return; - if (!mountedRef.current) return; - profileMutateScheduledRef.current = true; - requestAnimationFrame(() => { - profileMutateScheduledRef.current = false; - const mutateProfilesFn = mutateProfilesRef.current; - if (mutateProfilesFn) { - void mutateProfilesFn(); - if (isDev) { - console.debug( - "[ProfileSwitch] mutateProfiles executed from schedule", - ); - } - } - }); - }, []); - - useEffect(() => { - if (!switchStatus) { - taskMetaRef.current.clear(); - dispatchManualActivatings({ type: "reset" }); - return; - } - - const trackedProfiles = new Set(); - const registerTask = (task: SwitchTaskStatus | null | undefined) => { - if (!task) return; - taskMetaRef.current.set(task.taskId, { - profileId: task.profileId, - notify: task.notify, - }); - trackedProfiles.add(task.profileId); - }; - - registerTask(switchStatus.active ?? null); - switchStatus.queue.forEach((task) => registerTask(task)); - - dispatchManualActivatings({ - type: "filterAllowed", - allowed: trackedProfiles, - }); - - const lastResult = switchStatus.lastResult ?? null; - if (initialLastResultSyncRef.current) { - initialLastResultSyncRef.current = false; - if (lastResult) { - lastResultAtRef.current = lastResult.finishedAt; - } - } - - if (lastResult && lastResult.finishedAt !== lastResultAtRef.current) { - lastResultAtRef.current = lastResult.finishedAt; - const { profileId, success, finishedAt, errorDetail, cancelled } = - lastResult; - const isCancelled = Boolean(cancelled); - const meta = taskMetaRef.current.get(lastResult.taskId); - const notifySuccess = meta?.notify ?? true; - taskMetaRef.current.delete(lastResult.taskId); - - debugProfileSwitch("STATUS_RESULT", profileId, { - success, - finishedAt, - notifySuccess, - cancelled: isCancelled, - }); - - switchEventQueue.enqueue(() => { - if (!mountedRef.current) return; - - dispatchManualActivatings({ type: "remove", id: profileId }); - - const eventGeneration = postSwitchGenerationRef.current; - - postSwitchEffectQueue.enqueue(async () => { - if (!mountedRef.current) return; - if (postSwitchGenerationRef.current !== eventGeneration) { - return; - } - - logToBackend( - success || isCancelled ? "info" : "warn", - "[ProfileSwitch] status result received", - { - profileId, - success, - cancelled: isCancelled, - finishedAt, - }, - ); - - scheduleProfileMutate(); - - if (success) { - if (notifySuccess) { - await afterPaint(); - showNoticeRef.current?.( - "success", - tRef.current("Profile Switched"), - 1000, - ); - } - - const operations: Promise[] = []; - const mutateLogs = mutateLogsRef.current; - if (mutateLogs) { - operations.push(Promise.resolve(mutateLogs())); - } - const activateSelected = activateSelectedRef.current; - if (activateSelected) { - operations.push(Promise.resolve(activateSelected())); - } - const refreshFn = refreshClashDataRef.current; - if (refreshFn) { - operations.push(Promise.resolve(refreshFn())); - } - - if (operations.length > 0) { - void Promise.resolve().then(() => Promise.allSettled(operations)); - } - } else if (!isCancelled) { - await afterPaint(); - showNoticeRef.current?.( - "error", - errorDetail ?? tRef.current("Profile switch failed"), - ); - } - }); - }); - } - }, [ - dispatchManualActivatings, - logToBackend, - postSwitchEffectQueue, - scheduleProfileMutate, - switchEventQueue, - switchStatus, - ]); const viewerRef = useRef(null); const configRef = useRef(null); @@ -655,7 +375,7 @@ const ProfilePage = () => { const onImport = async () => { if (!url) return; - // Validate that the URL uses http/https + // 校验url是否为http/https if (!/^https?:\/\//i.test(url)) { showNotice("error", t("Invalid Profile URL")); return; @@ -685,10 +405,7 @@ const ProfilePage = () => { ); } } catch (verifyErr) { - console.warn( - "[Import Verify] Failed to fetch profile state:", - verifyErr, - ); + console.warn("[导入验证] 获取配置状态失败:", verifyErr); break; } } @@ -697,33 +414,33 @@ const ProfilePage = () => { }; try { - // Attempt standard import + // 尝试正常导入 await importProfile(url); await handleImportSuccess("Profile Imported Successfully"); return; } catch (initialErr) { - console.warn("[Profile Import] Initial import failed:", initialErr); + console.warn("[订阅导入] 首次导入失败:", initialErr); const alreadyImported = await waitForImportLanding(); if (alreadyImported) { console.warn( - "[Profile Import] API reported failure, but profile already imported; skipping rollback", + "[订阅导入] 接口返回失败,但检测到订阅已导入,跳过回退导入流程", ); await handleImportSuccess("Profile Imported Successfully"); return; } - // Initial import failed without data change; try built-in proxy + // 首次导入失败且未检测到数据变更,尝试使用自身代理 showNotice("info", t("Import failed, retrying with Clash proxy...")); try { - // Attempt import using built-in proxy + // 使用自身代理尝试导入 await importProfile(url, { with_proxy: false, self_proxy: true, }); await handleImportSuccess("Profile Imported with Clash proxy"); } catch (retryErr: any) { - // Rollback import also failed + // 回退导入也失败 const retryErrmsg = retryErr?.message || retryErr.toString(); showNotice( "error", @@ -736,9 +453,7 @@ const ProfilePage = () => { } }; - const currentProfileId = profiles.current ?? null; - - // Enhanced refresh strategy + // 强化的刷新策略 const performRobustRefresh = async ( importVerifier: ImportLandingVerifier, ) => { @@ -749,50 +464,43 @@ const ProfilePage = () => { while (retryCount < maxRetries) { try { - console.log( - `[Import Refresh] Attempt ${retryCount + 1} to refresh profile data`, - ); + console.log(`[导入刷新] 第${retryCount + 1}次尝试刷新配置数据`); - // Force refresh and bypass caches + // 强制刷新,绕过所有缓存 await mutateProfiles(undefined, { revalidate: true, rollbackOnError: false, }); - // Wait for state to stabilize + // 等待状态稳定 await new Promise((resolve) => setTimeout(resolve, baseDelay * (retryCount + 1)), ); - // Verify whether refresh succeeded + // 验证刷新是否成功 const currentProfiles = await getProfiles(); const currentCount = currentProfiles?.items?.length || 0; if (currentCount > baselineCount) { console.log( - `[Import Refresh] Profile refresh succeeded; count ${baselineCount} -> ${currentCount}`, + `[导入刷新] 配置刷新成功,配置数量 ${baselineCount} -> ${currentCount}`, ); await onEnhance(false); return; } if (hasLanding(currentProfiles)) { - console.log( - "[Import Refresh] Detected profile update; treating as success", - ); + console.log("[导入刷新] 检测到订阅内容更新,判定刷新成功"); await onEnhance(false); return; } console.warn( - `[Import Refresh] Profile count unchanged (${currentCount}), retrying...`, + `[导入刷新] 配置数量未增加 (${currentCount}), 继续重试...`, ); retryCount++; } catch (error) { - console.error( - `[Import Refresh] Attempt ${retryCount + 1} failed:`, - error, - ); + console.error(`[导入刷新] 第${retryCount + 1}次刷新失败:`, error); retryCount++; await new Promise((resolve) => setTimeout(resolve, baseDelay * retryCount), @@ -800,12 +508,10 @@ const ProfilePage = () => { } } - // Final attempt after all retries fail - console.warn( - `[Import Refresh] Regular refresh failed; clearing cache and retrying`, - ); + // 所有重试失败后的最后尝试 + console.warn(`[导入刷新] 常规刷新失败,尝试清除缓存重新获取`); try { - // Clear SWR cache and refetch + // 清除SWR缓存并重新获取 await mutate("getProfiles", getProfiles(), { revalidate: true }); await onEnhance(false); showNotice( @@ -814,10 +520,7 @@ const ProfilePage = () => { 3000, ); } catch (finalError) { - console.error( - `[Import Refresh] Final refresh attempt failed:`, - finalError, - ); + console.error(`[导入刷新] 最终刷新尝试失败:`, finalError); showNotice( "error", t("Profile imported successfully, please restart if not visible"), @@ -828,108 +531,209 @@ const ProfilePage = () => { const onDragEnd = async (event: DragEndEvent) => { const { active, over } = event; - if (over && active.id !== over.id) { - await reorderProfile(active.id.toString(), over.id.toString()); - mutateProfiles(); + if (over) { + if (active.id !== over.id) { + await reorderProfile(active.id.toString(), over.id.toString()); + mutateProfiles(); + } } }; - const requestSwitch = useCallback( - (targetProfile: string, notifySuccess: boolean) => { - const nextGeneration = postSwitchGenerationRef.current + 1; - postSwitchGenerationRef.current = nextGeneration; - postSwitchEffectQueue.clear(); - - debugProfileSwitch("REQUEST_SWITCH", targetProfile, { - notifySuccess, - generation: nextGeneration, - }); - - logToBackend("info", "[ProfileSwitch] request switch", { - targetProfile, - notifySuccess, - generation: nextGeneration, - }); - - dispatchManualActivatings({ type: "add", ids: [targetProfile] }); - - void (async () => { - try { - const accepted = await switchProfileCommand( - targetProfile, - notifySuccess, + const executeBackgroundTasks = useCallback( + async ( + profile: string, + sequence: number, + abortController: AbortController, + ) => { + try { + if ( + sequence === requestSequenceRef.current && + switchingProfileRef.current === profile && + !abortController.signal.aborted + ) { + await activateSelected(); + console.log(`[Profile] 后台处理完成,序列号: ${sequence}`); + } else { + debugProfileSwitch( + "BACKGROUND_TASK_SKIPPED", + profile, + `序列号过期或被中断: ${sequence} vs ${requestSequenceRef.current}`, ); - if (!accepted) { - throw new Error(tRef.current("Profile switch failed")); - } - } catch (error: any) { - const message = - error?.message || error?.toString?.() || String(error); - logToBackend("error", "[ProfileSwitch] switch command failed", { - profileId: targetProfile, - message, - }); - dispatchManualActivatings({ type: "remove", id: targetProfile }); - scheduleProfileMutate(); - await afterPaint(); - showNoticeRef.current?.("error", message); } - })(); + } catch (err: any) { + console.warn("Failed to activate selected proxies:", err); + } }, - [ - dispatchManualActivatings, - logToBackend, - postSwitchEffectQueue, - scheduleProfileMutate, - ], + [activateSelected], ); - const onSelect = useCallback( - (targetProfile: string, force: boolean) => { - if (!force && targetProfile === currentProfileId) { - debugProfileSwitch("ALREADY_CURRENT_IGNORED", targetProfile); + const activateProfile = useCallback( + async (profile: string, notifySuccess: boolean) => { + if (profiles.current === profile && !notifySuccess) { + console.log( + `[Profile] 目标profile ${profile} 已经是当前配置,跳过切换`, + ); return; } - requestSwitch(targetProfile, true); + + const currentSequence = ++requestSequenceRef.current; + debugProfileSwitch("NEW_REQUEST", profile, `序列号: ${currentSequence}`); + + // 处理中断逻辑 + const previousSwitching = switchingProfileRef.current; + if (previousSwitching && previousSwitching !== profile) { + handleProfileInterrupt(previousSwitching, profile); + } + + // 防止重复切换同一个profile + if (switchingProfileRef.current === profile) { + debugProfileSwitch("DUPLICATE_SWITCH_BLOCKED", profile); + return; + } + + // 初始化切换状态 + switchingProfileRef.current = profile; + debugProfileSwitch("SWITCH_START", profile, `序列号: ${currentSequence}`); + + const currentAbortController = new AbortController(); + abortControllerRef.current = currentAbortController; + + setActivatings((prev) => { + if (prev.includes(profile)) return prev; + return [...prev, profile]; + }); + + try { + console.log( + `[Profile] 开始切换到: ${profile},序列号: ${currentSequence}`, + ); + + // 检查请求有效性 + if ( + isRequestOutdated(currentSequence, requestSequenceRef, profile) || + isOperationAborted(currentAbortController, profile) + ) { + return; + } + + // 执行切换请求 + const requestPromise = patchProfiles( + { current: profile }, + currentAbortController.signal, + ); + pendingRequestRef.current = requestPromise; + + const success = await requestPromise; + + if (pendingRequestRef.current === requestPromise) { + pendingRequestRef.current = null; + } + + // 再次检查有效性 + if ( + isRequestOutdated(currentSequence, requestSequenceRef, profile) || + isOperationAborted(currentAbortController, profile) + ) { + return; + } + + // 完成切换 + await mutateLogs(); + closeAllConnections(); + + if (notifySuccess && success) { + showNotice("success", t("Profile Switched"), 1000); + } + + console.log( + `[Profile] 切换到 ${profile} 完成,序列号: ${currentSequence},开始后台处理`, + ); + + // 延迟执行后台任务 + setTimeout( + () => + executeBackgroundTasks( + profile, + currentSequence, + currentAbortController, + ), + 50, + ); + } catch (err: any) { + if (pendingRequestRef.current) { + pendingRequestRef.current = null; + } + + // 检查是否因为中断或过期而出错 + if ( + isOperationAborted(currentAbortController, profile) || + isRequestOutdated(currentSequence, requestSequenceRef, profile) + ) { + return; + } + + console.error(`[Profile] 切换失败:`, err); + showNotice("error", err?.message || err.toString(), 4000); + } finally { + // 只有当前profile仍然是正在切换的profile且序列号匹配时才清理状态 + if ( + switchingProfileRef.current === profile && + currentSequence === requestSequenceRef.current + ) { + cleanupSwitchState(profile, currentSequence); + } else { + debugProfileSwitch( + "CLEANUP_SKIPPED", + profile, + `序列号不匹配或已被接管: ${currentSequence} vs ${requestSequenceRef.current}`, + ); + } + } }, - [currentProfileId, requestSwitch], + [ + profiles, + patchProfiles, + mutateLogs, + t, + executeBackgroundTasks, + handleProfileInterrupt, + cleanupSwitchState, + ], ); + const onSelect = async (current: string, force: boolean) => { + // 阻止重复点击或已激活的profile + if (switchingProfileRef.current === current) { + debugProfileSwitch("DUPLICATE_CLICK_IGNORED", current); + return; + } + + if (!force && current === profiles.current) { + debugProfileSwitch("ALREADY_CURRENT_IGNORED", current); + return; + } + + await activateProfile(current, true); + }; useEffect(() => { - if (!current) return; - if (current === currentProfileId) return; - if (switchActivatingIds.includes(current)) return; - requestSwitch(current, false); - }, [current, currentProfileId, requestSwitch, switchActivatingIds]); - - useEffect(() => { - let mounted = true; - const panicListener = listen("rust-panic", (event) => { - if (!mounted) return; - const payload = event.payload; - if (!payload) return; - showNotice( - "error", - `Rust panic: ${payload.message} @ ${payload.location}`, - ); - console.error("Rust panic reported from backend:", payload); - }); - return () => { - mounted = false; - panicListener.then((unlisten) => unlisten()).catch(() => {}); - }; - }, [t]); + (async () => { + if (current) { + mutateProfiles(); + await activateProfile(current, false); + } + })(); + }, [current, activateProfile, mutateProfiles]); const onEnhance = useLockFn(async (notifySuccess: boolean) => { - if (switchingProfileId) { + if (switchingProfileRef.current) { console.log( - `[Profile] A profile is currently switching (${switchingProfileId}); skipping enhance operation`, + `[Profile] 有profile正在切换中(${switchingProfileRef.current}),跳过enhance操作`, ); return; } const currentProfiles = currentActivatings(); - dispatchManualActivatings({ type: "add", ids: currentProfiles }); + setActivatings((prev) => [...new Set([...prev, ...currentProfiles])]); try { await enhanceProfiles(); @@ -940,17 +744,17 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err.message || err.toString(), 3000); } finally { - dispatchManualActivatings({ type: "reset" }); + // 保留正在切换的profile,清除其他状态 + setActivatings((prev) => + prev.filter((id) => id === switchingProfileRef.current), + ); } }); const onDelete = useLockFn(async (uid: string) => { const current = profiles.current === uid; try { - dispatchManualActivatings({ - type: "set", - value: [...new Set([...(current ? currentActivatings() : []), uid])], - }); + setActivatings([...(current ? currentActivatings() : []), uid]); await deleteProfile(uid); mutateProfiles(); mutateLogs(); @@ -960,11 +764,11 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err?.message || err.toString()); } finally { - dispatchManualActivatings({ type: "reset" }); + setActivatings([]); } }); - // Update all profiles + // 更新所有订阅 const setLoadingCache = useSetLoadingCache(); const onUpdateAll = useLockFn(async () => { const throttleMutate = throttle(mutateProfiles, 2000, { @@ -975,7 +779,7 @@ const ProfilePage = () => { await updateProfile(uid); throttleMutate(); } catch (err: any) { - console.error(`Failed to update profile ${uid}:`, err); + console.error(`更新订阅 ${uid} 失败:`, err); } finally { setLoadingCache((cache) => ({ ...cache, [uid]: false })); } @@ -983,7 +787,7 @@ const ProfilePage = () => { return new Promise((resolve) => { setLoadingCache((cache) => { - // Gather profiles that are not updating + // 获取没有正在更新的订阅 const items = profileItems.filter( (e) => e.type === "remote" && !cache[e.uid], ); @@ -1037,11 +841,11 @@ const ProfilePage = () => { const getSelectionState = () => { if (selectedProfiles.size === 0) { - return "none"; // no selection + return "none"; // 无选择 } else if (selectedProfiles.size === profileItems.length) { - return "all"; // all selected + return "all"; // 全选 } else { - return "partial"; // partially selected + return "partial"; // 部分选择 } }; @@ -1055,7 +859,7 @@ const ProfilePage = () => { ? [profiles.current] : []; - dispatchManualActivatings({ type: "add", ids: currentActivating }); + setActivatings((prev) => [...new Set([...prev, ...currentActivating])]); // Delete all selected profiles for (const uid of selectedProfiles) { @@ -1078,17 +882,17 @@ const ProfilePage = () => { } catch (err: any) { showNotice("error", err?.message || err.toString()); } finally { - dispatchManualActivatings({ type: "reset" }); + setActivatings([]); } }); const mode = useThemeMode(); - const islight = mode === "light"; + const islight = mode === "light" ? true : false; const dividercolor = islight ? "rgba(0, 0, 0, 0.06)" : "rgba(255, 255, 255, 0.06)"; - // Observe configuration changes from backend + // 监听后端配置变更 useEffect(() => { let unlistenPromise: Promise<() => void> | undefined; let lastProfileId: string | null = null; @@ -1102,29 +906,29 @@ const ProfilePage = () => { const newProfileId = event.payload; const now = Date.now(); - console.log(`[Profile] Received profile-change event: ${newProfileId}`); + console.log(`[Profile] 收到配置变更事件: ${newProfileId}`); if ( lastProfileId === newProfileId && now - lastUpdateTime < debounceDelay ) { - console.log(`[Profile] Duplicate event throttled; skipping`); + console.log(`[Profile] 重复事件被防抖,跳过`); return; } lastProfileId = newProfileId; lastUpdateTime = now; - console.log(`[Profile] Performing profile data refresh`); + console.log(`[Profile] 执行配置数据刷新`); if (refreshTimer !== null) { window.clearTimeout(refreshTimer); } - // Use async scheduling to avoid blocking event handling + // 使用异步调度避免阻塞事件处理 refreshTimer = window.setTimeout(() => { mutateProfiles().catch((error) => { - console.error("[Profile] Profile data refresh failed:", error); + console.error("[Profile] 配置数据刷新失败:", error); }); refreshTimer = null; }, 0); @@ -1141,6 +945,16 @@ const ProfilePage = () => { }; }, [mutateProfiles]); + // 组件卸载时清理中断控制器 + useEffect(() => { + return () => { + if (abortControllerRef.current) { + abortControllerRef.current.abort(); + debugProfileSwitch("COMPONENT_UNMOUNT_CLEANUP", "all"); + } + }; + }, []); + return ( { - {/* Fault detection and emergency recovery button */} + {/* 故障检测和紧急恢复按钮 */} {(error || isStale) && ( { ref={viewerRef} onChange={async (isActivating) => { mutateProfiles(); - // Only trigger global reload when the active profile changes + // 只有更改当前激活的配置时才触发全局重新加载 if (isActivating) { await onEnhance(false); } diff --git a/src/providers/app-data-context.ts b/src/providers/app-data-context.ts index 6bf02313f..7b7244aba 100644 --- a/src/providers/app-data-context.ts +++ b/src/providers/app-data-context.ts @@ -6,15 +6,8 @@ import { RuleProvider, } from "tauri-plugin-mihomo-api"; -import { ProxiesView, type ProfileSwitchStatus } from "@/services/cmds"; - export interface AppDataContextType { - proxies: ProxiesView | null; - proxyHydration: "none" | "snapshot" | "live"; - proxyTargetProfileId: string | null; - proxyDisplayProfileId: string | null; - isProxyRefreshPending: boolean; - switchStatus: ProfileSwitchStatus | null; + proxies: any; clashConfig: BaseConfig; rules: Rule[]; sysproxy: any; diff --git a/src/providers/app-data-provider.tsx b/src/providers/app-data-provider.tsx index 9c97c61f3..c71528c40 100644 --- a/src/providers/app-data-provider.tsx +++ b/src/providers/app-data-provider.tsx @@ -1,6 +1,6 @@ import { listen } from "@tauri-apps/api/event"; -import React, { useCallback, useEffect, useMemo, useRef } from "react"; -import useSWR, { mutate as globalMutate } from "swr"; +import React, { useCallback, useEffect, useMemo } from "react"; +import useSWR from "swr"; import { getBaseConfig, getRuleProviders, @@ -9,53 +9,31 @@ import { import { useVerge } from "@/hooks/use-verge"; import { + calcuProxies, calcuProxyProviders, getAppUptime, - getProfileSwitchStatus, - getProfileSwitchEvents, - getProfiles as fetchProfilesConfig, getRunningMode, - readProfileFile, getSystemProxy, - type ProxiesView, - type ProfileSwitchStatus, - type SwitchResultStatus, } from "@/services/cmds"; -import { SWR_DEFAULTS, SWR_SLOW_POLL } from "@/services/config"; -import { useProfileStore } from "@/stores/profile-store"; -import { - applyLiveProxyPayload, - fetchLiveProxies, - type ProxiesUpdatedPayload, - useProxyStore, -} from "@/stores/proxy-store"; -import { createProxySnapshotFromProfile } from "@/utils/proxy-snapshot"; +import { SWR_DEFAULTS, SWR_REALTIME, SWR_SLOW_POLL } from "@/services/config"; import { AppDataContext, AppDataContextType } from "./app-data-context"; -// Global app data provider +// 全局数据提供者组件 export const AppDataProvider = ({ children, }: { children: React.ReactNode; }) => { const { verge } = useVerge(); - const applyProfileSwitchResult = useProfileStore( - (state) => state.applySwitchResult, - ); - const commitProfileSnapshot = useProfileStore( - (state) => state.commitHydrated, - ); - const setSwitchEventSeq = useProfileStore((state) => state.setLastEventSeq); - const proxyView = useProxyStore((state) => state.data); - const proxyHydration = useProxyStore((state) => state.hydration); - const proxyProfileId = useProxyStore((state) => state.lastProfileId); - const pendingProxyProfileId = useProxyStore( - (state) => state.pendingProfileId, - ); - const setProxySnapshot = useProxyStore((state) => state.setSnapshot); - const clearPendingProxyProfile = useProxyStore( - (state) => state.clearPendingProfile, + + const { data: proxiesData, mutate: refreshProxy } = useSWR( + "getProxies", + calcuProxies, + { + ...SWR_REALTIME, + onError: (err) => console.warn("[DataProvider] Proxy fetch failed:", err), + }, ); const { data: clashConfig, mutate: refreshClashConfig } = useSWR( @@ -82,259 +60,25 @@ export const AppDataProvider = ({ SWR_DEFAULTS, ); - const { data: switchStatus, mutate: mutateSwitchStatus } = - useSWR( - "getProfileSwitchStatus", - getProfileSwitchStatus, - { - refreshInterval: (status) => - status && (status.isSwitching || (status.queue?.length ?? 0) > 0) - ? 400 - : 4000, - dedupingInterval: 200, - }, - ); - - const isUnmountedRef = useRef(false); - // Keep track of pending timers so we can cancel them on unmount and avoid stray updates. - const scheduledTimeoutsRef = useRef>(new Set()); - // Shared metadata to dedupe switch events coming from both polling and subscriptions. - const switchMetaRef = useRef<{ - pendingProfileId: string | null; - lastResultTaskId: number | null; - }>({ - pendingProfileId: null, - lastResultTaskId: null, - }); - const switchEventSeqRef = useRef(0); - const profileChangeMetaRef = useRef<{ - lastProfileId: string | null; - lastEventTs: number; - }>({ - lastProfileId: null, - lastEventTs: 0, - }); - const lastClashRefreshAtRef = useRef(0); - const PROFILE_EVENT_DEDUP_MS = 400; - const CLASH_REFRESH_DEDUP_MS = 300; - - // Thin wrapper around setTimeout that no-ops once the provider unmounts. - const scheduleTimeout = useCallback( - (callback: () => void | Promise, delay: number) => { - if (isUnmountedRef.current) return -1; - - const timeoutId = window.setTimeout(() => { - scheduledTimeoutsRef.current.delete(timeoutId); - if (!isUnmountedRef.current) { - void callback(); - } - }, delay); - - scheduledTimeoutsRef.current.add(timeoutId); - return timeoutId; - }, - [], - ); - - const clearAllTimeouts = useCallback(() => { - scheduledTimeoutsRef.current.forEach((timeoutId) => - clearTimeout(timeoutId), - ); - scheduledTimeoutsRef.current.clear(); - }, []); - - // Delay live proxy refreshes slightly so we don't hammer Mihomo while a switch is still applying. - const queueProxyRefresh = useCallback( - (reason: string, delay = 1500) => { - scheduleTimeout(() => { - fetchLiveProxies().catch((error) => - console.warn( - `[DataProvider] Proxy refresh failed (${reason}, fallback):`, - error, - ), - ); - }, delay); - }, - [scheduleTimeout], - ); - // Prime the proxy store with the static selections from the profile YAML before live data arrives. - const seedProxySnapshot = useCallback( - async (profileId: string) => { - if (!profileId) return; - - try { - const yamlContent = await readProfileFile(profileId); - const snapshot = createProxySnapshotFromProfile(yamlContent); - if (!snapshot) return; - - setProxySnapshot(snapshot, profileId); - } catch (error) { - console.warn( - "[DataProvider] Failed to seed proxy snapshot from profile:", - error, - ); - } - }, - [setProxySnapshot], - ); - - const handleSwitchResult = useCallback( - (result: SwitchResultStatus) => { - // Ignore duplicate notifications for the same switch execution. - const meta = switchMetaRef.current; - if (result.taskId === meta.lastResultTaskId) { - return; - } - meta.lastResultTaskId = result.taskId; - - // Optimistically update the SWR cache so the UI shows the new profile immediately. - void globalMutate( - "getProfiles", - (current?: IProfilesConfig | null) => { - if (!current || !result.success) { - return current; - } - if (current.current === result.profileId) { - return current; - } - return { - ...current, - current: result.profileId, - }; - }, - false, - ); - - applyProfileSwitchResult(result); - if (!result.success) { - clearPendingProxyProfile(); - } - - if (result.success && result.cancelled !== true) { - // Once the backend settles, refresh all dependent data in the background. - scheduleTimeout(() => { - void Promise.allSettled([ - fetchProfilesConfig().then((data) => { - commitProfileSnapshot(data); - globalMutate("getProfiles", data, false); - }), - fetchLiveProxies(), - refreshProxyProviders(), - refreshRules(), - refreshRuleProviders(), - ]).catch((error) => { - console.warn( - "[DataProvider] Background refresh after profile switch failed:", - error, - ); - }); - }, 100); - } - - void mutateSwitchStatus((current) => { - if (!current) { - return current; - } - const filteredQueue = current.queue.filter( - (task) => task.taskId !== result.taskId, - ); - const active = - current.active && current.active.taskId === result.taskId - ? null - : current.active; - const isSwitching = filteredQueue.length > 0; - return { - ...current, - active, - queue: filteredQueue, - isSwitching, - lastResult: result, - }; - }, false); - }, - [ - scheduleTimeout, - refreshProxyProviders, - refreshRules, - refreshRuleProviders, - mutateSwitchStatus, - applyProfileSwitchResult, - commitProfileSnapshot, - clearPendingProxyProfile, - ], - ); - useEffect(() => { - isUnmountedRef.current = false; - return () => { - isUnmountedRef.current = true; - clearAllTimeouts(); - }; - }, [clearAllTimeouts]); + let lastProfileId: string | null = null; + let lastUpdateTime = 0; + const refreshThrottle = 800; - useEffect(() => { - if (!switchStatus) { - return; - } - - const meta = switchMetaRef.current; - const nextTarget = - switchStatus.active?.profileId ?? - (switchStatus.queue.length > 0 ? switchStatus.queue[0].profileId : null); - - if (nextTarget && nextTarget !== meta.pendingProfileId) { - meta.pendingProfileId = nextTarget; - void seedProxySnapshot(nextTarget); - } else if (!nextTarget) { - meta.pendingProfileId = null; - } - - const lastResult = switchStatus.lastResult ?? null; - if (lastResult) { - handleSwitchResult(lastResult); - } - }, [switchStatus, seedProxySnapshot, handleSwitchResult]); - - useEffect(() => { - let disposed = false; - - const pollEvents = async () => { - if (disposed) { - return; - } - try { - const events = await getProfileSwitchEvents(switchEventSeqRef.current); - if (events.length > 0) { - switchEventSeqRef.current = events[events.length - 1].sequence; - setSwitchEventSeq(switchEventSeqRef.current); - events.forEach((event) => handleSwitchResult(event.result)); - } - } catch (error) { - console.warn("[DataProvider] Failed to poll switch events:", error); - } finally { - if (!disposed) { - const nextDelay = - switchStatus && - (switchStatus.isSwitching || (switchStatus.queue?.length ?? 0) > 0) - ? 250 - : 1000; - scheduleTimeout(pollEvents, nextDelay); - } - } - }; - - scheduleTimeout(pollEvents, 0); - - return () => { - disposed = true; - }; - }, [scheduleTimeout, handleSwitchResult, switchStatus, setSwitchEventSeq]); - - useEffect(() => { + let isUnmounted = false; + const scheduledTimeouts = new Set(); const cleanupFns: Array<() => void> = []; const registerCleanup = (fn: () => void) => { - cleanupFns.push(fn); + if (isUnmounted) { + try { + fn(); + } catch (error) { + console.error("[DataProvider] Immediate cleanup failed:", error); + } + } else { + cleanupFns.push(fn); + } }; const addWindowListener = (eventName: string, handler: EventListener) => { @@ -343,319 +87,140 @@ export const AppDataProvider = ({ return () => window.removeEventListener(eventName, handler); }; - const runProfileChangedPipeline = ( - profileId: string | null, - source: "tauri" | "window", + const scheduleTimeout = ( + callback: () => void | Promise, + delay: number, ) => { + if (isUnmounted) return -1; + + const timeoutId = window.setTimeout(() => { + scheduledTimeouts.delete(timeoutId); + if (!isUnmounted) { + void callback(); + } + }, delay); + + scheduledTimeouts.add(timeoutId); + return timeoutId; + }; + + const clearAllTimeouts = () => { + scheduledTimeouts.forEach((timeoutId) => clearTimeout(timeoutId)); + scheduledTimeouts.clear(); + }; + + const handleProfileChanged = (event: { payload: string }) => { + const newProfileId = event.payload; const now = Date.now(); - const meta = profileChangeMetaRef.current; if ( - meta.lastProfileId === profileId && - now - meta.lastEventTs < PROFILE_EVENT_DEDUP_MS + lastProfileId === newProfileId && + now - lastUpdateTime < refreshThrottle ) { return; } - meta.lastProfileId = profileId; - meta.lastEventTs = now; - - if (profileId) { - void seedProxySnapshot(profileId); - } - - queueProxyRefresh(`profile-changed-${source}`, 500); + lastProfileId = newProfileId; + lastUpdateTime = now; scheduleTimeout(() => { - void fetchProfilesConfig() - .then((data) => { - commitProfileSnapshot(data); - globalMutate("getProfiles", data, false); - }) - .catch((error) => - console.warn( - "[AppDataProvider] Failed to refresh profiles after profile change:", - error, - ), - ); - void refreshProxyProviders().catch((error) => - console.warn( - "[AppDataProvider] Proxy providers refresh failed after profile change:", - error, - ), + refreshRules().catch((error) => + console.warn("[DataProvider] Rules refresh failed:", error), ); - void refreshRules().catch((error) => - console.warn( - "[AppDataProvider] Rules refresh failed after profile change:", - error, - ), - ); - void refreshRuleProviders().catch((error) => - console.warn( - "[AppDataProvider] Rule providers refresh failed after profile change:", - error, - ), + refreshRuleProviders().catch((error) => + console.warn("[DataProvider] Rule providers refresh failed:", error), ); }, 200); }; - const handleProfileChanged = (event: { payload: string }) => { - runProfileChangedPipeline(event.payload ?? null, "tauri"); - }; - - const runRefreshClashPipeline = (source: "tauri" | "window") => { + const handleRefreshClash = () => { const now = Date.now(); - if (now - lastClashRefreshAtRef.current < CLASH_REFRESH_DEDUP_MS) { - return; - } - - lastClashRefreshAtRef.current = now; + if (now - lastUpdateTime <= refreshThrottle) return; + lastUpdateTime = now; scheduleTimeout(() => { - void refreshClashConfig().catch((error) => - console.warn( - "[AppDataProvider] Clash config refresh failed after backend update:", - error, - ), + refreshProxy().catch((error) => + console.error("[DataProvider] Proxy refresh failed:", error), ); - void refreshRules().catch((error) => - console.warn( - "[AppDataProvider] Rules refresh failed after backend update:", - error, - ), - ); - void refreshRuleProviders().catch((error) => - console.warn( - "[AppDataProvider] Rule providers refresh failed after backend update:", - error, - ), - ); - void refreshProxyProviders().catch((error) => - console.warn( - "[AppDataProvider] Proxy providers refresh failed after backend update:", - error, - ), - ); - }, 0); - - queueProxyRefresh(`refresh-clash-config-${source}`, 400); + }, 200); }; - const handleProfileUpdateCompleted = (_: { payload: { uid: string } }) => { - queueProxyRefresh("profile-update-completed", 3000); - if (!isUnmountedRef.current) { - scheduleTimeout(() => { - void refreshProxyProviders().catch((error) => - console.warn( - "[DataProvider] Proxy providers refresh failed after profile update completed:", - error, - ), - ); - }, 0); - } - }; + const handleRefreshProxy = () => { + const now = Date.now(); + if (now - lastUpdateTime <= refreshThrottle) return; - const isProxiesPayload = ( - value: unknown, - ): value is ProxiesUpdatedPayload => { - if (!value || typeof value !== "object") { - return false; - } - const candidate = value as Partial; - return candidate.proxies !== undefined && candidate.proxies !== null; - }; - - const handleProxiesUpdatedPayload = ( - rawPayload: unknown, - source: "tauri" | "window", - ) => { - if (!isProxiesPayload(rawPayload)) { - console.warn( - `[AppDataProvider] Ignored ${source} proxies-updated payload`, - rawPayload, + lastUpdateTime = now; + scheduleTimeout(() => { + refreshProxy().catch((error) => + console.warn("[DataProvider] Proxy refresh failed:", error), ); - queueProxyRefresh(`proxies-updated-${source}-invalid`, 500); - return; + }, 200); + }; + + const initializeListeners = async () => { + try { + const unlistenProfile = await listen( + "profile-changed", + handleProfileChanged, + ); + registerCleanup(unlistenProfile); + } catch (error) { + console.error("[AppDataProvider] 监听 Profile 事件失败:", error); } try { - applyLiveProxyPayload(rawPayload); - } catch (error) { - console.warn( - `[AppDataProvider] Failed to apply ${source} proxies-updated payload`, - error, + const unlistenClash = await listen( + "verge://refresh-clash-config", + handleRefreshClash, ); - queueProxyRefresh(`proxies-updated-${source}-apply-failed`, 500); + const unlistenProxy = await listen( + "verge://refresh-proxy-config", + handleRefreshProxy, + ); + + registerCleanup(() => { + unlistenClash(); + unlistenProxy(); + }); + } catch (error) { + console.warn("[AppDataProvider] 设置 Tauri 事件监听器失败:", error); + + const fallbackHandlers: Array<[string, EventListener]> = [ + ["verge://refresh-clash-config", handleRefreshClash], + ["verge://refresh-proxy-config", handleRefreshProxy], + ]; + + fallbackHandlers.forEach(([eventName, handler]) => { + registerCleanup(addWindowListener(eventName, handler)); + }); } }; - listen<{ uid: string }>( - "profile-update-completed", - handleProfileUpdateCompleted, - ) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach profile update listeners:", - error, - ), - ); - - listen("profile-changed", handleProfileChanged) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach profile-changed listener:", - error, - ), - ); - - listen("proxies-updated", (event) => { - handleProxiesUpdatedPayload(event.payload, "tauri"); - }) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach proxies-updated listener:", - error, - ), - ); - - listen("verge://refresh-clash-config", () => { - runRefreshClashPipeline("tauri"); - }) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach refresh-clash-config listener:", - error, - ), - ); - - listen("verge://refresh-proxy-config", () => { - queueProxyRefresh("refresh-proxy-config-tauri", 500); - }) - .then(registerCleanup) - .catch((error) => - console.error( - "[AppDataProvider] failed to attach refresh-proxy-config listener:", - error, - ), - ); - - const fallbackHandlers: Array<[string, EventListener]> = [ - [ - "profile-update-completed", - ((event: Event) => { - const payload = (event as CustomEvent<{ uid: string }>).detail ?? { - uid: "", - }; - handleProfileUpdateCompleted({ payload }); - }) as EventListener, - ], - [ - "profile-changed", - ((event: Event) => { - const payload = (event as CustomEvent).detail ?? null; - runProfileChangedPipeline(payload, "window"); - }) as EventListener, - ], - [ - "proxies-updated", - ((event: Event) => { - const payload = (event as CustomEvent).detail; - handleProxiesUpdatedPayload(payload, "window"); - }) as EventListener, - ], - [ - "verge://refresh-clash-config", - (() => { - runRefreshClashPipeline("window"); - }) as EventListener, - ], - [ - "verge://refresh-proxy-config", - (() => { - queueProxyRefresh("refresh-proxy-config-window", 500); - }) as EventListener, - ], - ]; - - fallbackHandlers.forEach(([eventName, handler]) => { - registerCleanup(addWindowListener(eventName, handler)); - }); + void initializeListeners(); return () => { - cleanupFns.forEach((fn) => { + isUnmounted = true; + clearAllTimeouts(); + + const errors: Error[] = []; + cleanupFns.splice(0).forEach((fn) => { try { fn(); } catch (error) { - console.error("[AppDataProvider] cleanup error:", error); + errors.push( + error instanceof Error ? error : new Error(String(error)), + ); } }); + + if (errors.length > 0) { + console.error( + `[DataProvider] ${errors.length} errors during cleanup:`, + errors, + ); + } }; - }, [ - commitProfileSnapshot, - queueProxyRefresh, - refreshClashConfig, - refreshProxyProviders, - refreshRuleProviders, - refreshRules, - scheduleTimeout, - seedProxySnapshot, - ]); - - const switchTargetProfileId = - switchStatus?.active?.profileId ?? - (switchStatus && switchStatus.queue.length > 0 - ? switchStatus.queue[0].profileId - : null); - - const proxyTargetProfileId = - switchTargetProfileId ?? pendingProxyProfileId ?? proxyProfileId ?? null; - const displayProxyStateRef = useRef<{ - view: ProxiesView | null; - profileId: string | null; - }>({ - view: proxyView, - profileId: proxyTargetProfileId, - }); - - const currentDisplay = displayProxyStateRef.current; - - if (!proxyView) { - if ( - currentDisplay.view !== null || - currentDisplay.profileId !== proxyTargetProfileId - ) { - displayProxyStateRef.current = { - view: null, - profileId: proxyTargetProfileId, - }; - } - } else if (proxyHydration === "live") { - if ( - currentDisplay.view !== proxyView || - currentDisplay.profileId !== proxyTargetProfileId - ) { - displayProxyStateRef.current = { - view: proxyView, - profileId: proxyTargetProfileId, - }; - } - } else if (!currentDisplay.view) { - displayProxyStateRef.current = { - view: proxyView, - profileId: proxyTargetProfileId, - }; - } - const displayProxyState = displayProxyStateRef.current; - const proxyDisplayProfileId = displayProxyState.profileId; - const proxiesForRender = displayProxyState.view ?? proxyView; - const isProxyRefreshPending = - (switchStatus?.isSwitching ?? false) || - proxyHydration !== "live" || - proxyTargetProfileId !== proxyDisplayProfileId; + }, [refreshProxy, refreshRules, refreshRuleProviders]); const { data: sysproxy, mutate: refreshSysproxy } = useSWR( "getSystemProxy", @@ -675,10 +240,10 @@ export const AppDataProvider = ({ errorRetryCount: 1, }); - // Provide unified refresh method + // 提供统一的刷新方法 const refreshAll = useCallback(async () => { await Promise.all([ - fetchLiveProxies(), + refreshProxy(), refreshClashConfig(), refreshRules(), refreshSysproxy(), @@ -686,6 +251,7 @@ export const AppDataProvider = ({ refreshRuleProviders(), ]); }, [ + refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, @@ -693,22 +259,22 @@ export const AppDataProvider = ({ refreshRuleProviders, ]); - // Aggregate data into context value + // 聚合所有数据 const value = useMemo(() => { - // Compute the system proxy address + // 计算系统代理地址 const calculateSystemProxyAddress = () => { if (!verge || !clashConfig) return "-"; const isPacMode = verge.proxy_auto_config ?? false; if (isPacMode) { - // PAC mode: display the desired proxy address + // PAC模式:显示我们期望设置的代理地址 const proxyHost = verge.proxy_host || "127.0.0.1"; const proxyPort = verge.verge_mixed_port || clashConfig.mixedPort || 7897; return `${proxyHost}:${proxyPort}`; } else { - // HTTP proxy mode: prefer system address, fallback to desired address if invalid + // HTTP代理模式:优先使用系统地址,但如果格式不正确则使用期望地址 const systemServer = sysproxy?.server; if ( systemServer && @@ -717,7 +283,7 @@ export const AppDataProvider = ({ ) { return systemServer; } else { - // System address invalid: fallback to desired proxy address + // 系统地址无效,返回期望的代理地址 const proxyHost = verge.proxy_host || "127.0.0.1"; const proxyPort = verge.verge_mixed_port || clashConfig.mixedPort || 7897; @@ -727,27 +293,22 @@ export const AppDataProvider = ({ }; return { - // Data - proxies: proxiesForRender, - proxyHydration, - proxyTargetProfileId, - proxyDisplayProfileId, - isProxyRefreshPending, - switchStatus: switchStatus ?? null, + // 数据 + proxies: proxiesData, clashConfig, rules: rulesData?.rules || [], sysproxy, runningMode, uptime: uptimeData || 0, - // Provider data + // 提供者数据 proxyProviders: proxyProviders || {}, ruleProviders: ruleProviders?.providers || {}, systemProxyAddress: calculateSystemProxyAddress(), - // Refresh helpers - refreshProxy: fetchLiveProxies, + // 刷新方法 + refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, @@ -756,12 +317,7 @@ export const AppDataProvider = ({ refreshAll, } as AppDataContextType; }, [ - proxiesForRender, - proxyHydration, - proxyTargetProfileId, - proxyDisplayProfileId, - isProxyRefreshPending, - switchStatus, + proxiesData, clashConfig, rulesData, sysproxy, @@ -770,6 +326,7 @@ export const AppDataProvider = ({ proxyProviders, ruleProviders, verge, + refreshProxy, refreshClashConfig, refreshRules, refreshSysproxy, diff --git a/src/services/cmds.ts b/src/services/cmds.ts index 098944181..e1e686bd3 100644 --- a/src/services/cmds.ts +++ b/src/services/cmds.ts @@ -4,52 +4,6 @@ import { getProxies, getProxyProviders } from "tauri-plugin-mihomo-api"; import { showNotice } from "@/services/noticeService"; -export type ProxyProviderRecord = Record< - string, - IProxyProviderItem | undefined ->; - -export interface SwitchTaskStatus { - taskId: number; - profileId: string; - notify: boolean; - stage?: number | null; - queued: boolean; -} - -export interface SwitchResultStatus { - taskId: number; - profileId: string; - success: boolean; - cancelled?: boolean; - finishedAt: number; - errorStage?: string | null; - errorDetail?: string | null; -} - -export interface ProfileSwitchStatus { - isSwitching: boolean; - active?: SwitchTaskStatus | null; - queue: SwitchTaskStatus[]; - cleanupProfiles: string[]; - lastResult?: SwitchResultStatus | null; - lastUpdated: number; -} - -export interface SwitchResultEvent { - sequence: number; - result: SwitchResultStatus; -} - -// Persist the last proxy provider payload so UI can render while waiting on Mihomo. -let cachedProxyProviders: ProxyProviderRecord | null = null; - -export const getCachedProxyProviders = () => cachedProxyProviders; - -export const setCachedProxyProviders = (record: ProxyProviderRecord | null) => { - cachedProxyProviders = record; -}; - export async function copyClashEnv() { return invoke("copy_clash_env"); } @@ -66,14 +20,6 @@ export async function patchProfilesConfig(profiles: IProfilesConfig) { return invoke("patch_profiles_config", { profiles }); } -// Triggers the async state-machine driven switch flow on the backend. -export async function switchProfileCommand( - profileIndex: string, - notifySuccess: boolean, -) { - return invoke("switch_profile", { profileIndex, notifySuccess }); -} - export async function createProfile( item: Partial, fileData?: string | null, @@ -167,29 +113,27 @@ export async function syncTrayProxySelection() { return invoke("sync_tray_proxy_selection"); } -export interface ProxiesView { +export async function calcuProxies(): Promise<{ global: IProxyGroupItem; direct: IProxyItem; groups: IProxyGroupItem[]; records: Record; proxies: IProxyItem[]; -} +}> { + const [proxyResponse, providerResponse] = await Promise.all([ + getProxies(), + calcuProxyProviders(), + ]); -export function buildProxyView( - proxyResponse: Awaited>, - providerRecord?: ProxyProviderRecord | null, -): ProxiesView { const proxyRecord = proxyResponse.proxies; + const providerRecord = providerResponse; // provider name map - const providerMap = providerRecord - ? Object.fromEntries( - Object.entries(providerRecord).flatMap(([provider, item]) => { - if (!item) return []; - return item.proxies.map((p) => [p.name, { ...p, provider }]); - }), - ) - : {}; + const providerMap = Object.fromEntries( + Object.entries(providerRecord).flatMap(([provider, item]) => + item!.proxies.map((p) => [p.name, { ...p, provider }]), + ), + ); // compatible with proxy-providers const generateItem = (name: string) => { @@ -263,56 +207,16 @@ export function buildProxyView( }; } -export async function calcuProxies(): Promise { - const proxyResponse = await getProxies(); - - let providerRecord = cachedProxyProviders; - if (!providerRecord) { - try { - providerRecord = await calcuProxyProviders(); - } catch (error) { - console.warn("[calcuProxies] 代理提供者加载失败:", error); - } - } - - return buildProxyView(proxyResponse, providerRecord); -} - export async function calcuProxyProviders() { const providers = await getProxyProviders(); - const mappedEntries = Object.entries(providers.providers) - .sort() - .filter( - ([, item]) => - item?.vehicleType === "HTTP" || item?.vehicleType === "File", - ) - .map(([name, item]) => { - if (!item) return [name, undefined] as const; - - const subscriptionInfo = - item.subscriptionInfo && typeof item.subscriptionInfo === "object" - ? { - Upload: item.subscriptionInfo.Upload ?? 0, - Download: item.subscriptionInfo.Download ?? 0, - Total: item.subscriptionInfo.Total ?? 0, - Expire: item.subscriptionInfo.Expire ?? 0, - } - : undefined; - - const normalized: IProxyProviderItem = { - name: item.name, - type: item.type, - proxies: item.proxies ?? [], - updatedAt: item.updatedAt ?? "", - vehicleType: item.vehicleType ?? "", - subscriptionInfo, - }; - return [name, normalized] as const; - }); - - const mapped = Object.fromEntries(mappedEntries) as ProxyProviderRecord; - cachedProxyProviders = mapped; - return mapped; + return Object.fromEntries( + Object.entries(providers.providers) + .sort() + .filter( + ([_, item]) => + item?.vehicleType === "HTTP" || item?.vehicleType === "File", + ), + ); } export async function getClashLogs() { @@ -651,13 +555,3 @@ export const isAdmin = async () => { export async function getNextUpdateTime(uid: string) { return invoke("get_next_update_time", { uid }); } - -export async function getProfileSwitchStatus() { - return invoke("get_profile_switch_status"); -} - -export async function getProfileSwitchEvents(afterSequence: number) { - return invoke("get_profile_switch_events", { - afterSequence, - }); -} diff --git a/src/services/noticeService.ts b/src/services/noticeService.ts index 7275dd458..0a3505dac 100644 --- a/src/services/noticeService.ts +++ b/src/services/noticeService.ts @@ -14,20 +14,10 @@ let nextId = 0; let notices: NoticeItem[] = []; const listeners: Set = new Set(); -function flushListeners() { +function notifyListeners() { listeners.forEach((listener) => listener([...notices])); // Pass a copy } -let notifyScheduled = false; -function scheduleNotify() { - if (notifyScheduled) return; - notifyScheduled = true; - requestAnimationFrame(() => { - notifyScheduled = false; - flushListeners(); - }); -} - // Shows a notification. export function showNotice( @@ -54,7 +44,7 @@ export function showNotice( } notices = [...notices, newNotice]; - scheduleNotify(); + notifyListeners(); return id; } @@ -66,7 +56,7 @@ export function hideNotice(id: number) { clearTimeout(notice.timerId); // Clear timeout if manually closed } notices = notices.filter((n) => n.id !== id); - scheduleNotify(); + notifyListeners(); } // Subscribes a listener function to notice state changes. @@ -87,5 +77,5 @@ export function clearAllNotices() { if (n.timerId) clearTimeout(n.timerId); }); notices = []; - scheduleNotify(); + notifyListeners(); } diff --git a/src/services/refresh.ts b/src/services/refresh.ts deleted file mode 100644 index 6150680da..000000000 --- a/src/services/refresh.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { mutate } from "swr"; - -import { getAxios } from "@/services/api"; - -export const refreshClashData = async () => { - try { - await getAxios(true); - } catch (error) { - console.warn("[Refresh] getAxios failed during clash refresh:", error); - } - - mutate("getProxies"); - mutate("getVersion"); - mutate("getClashConfig"); - mutate("getProxyProviders"); -}; - -export const refreshVergeData = () => { - mutate("getVergeConfig"); - mutate("getSystemProxy"); - mutate("getAutotemProxy"); - mutate("getRunningMode"); - mutate("isServiceAvailable"); -}; diff --git a/src/stores/profile-store.ts b/src/stores/profile-store.ts deleted file mode 100644 index 446b62267..000000000 --- a/src/stores/profile-store.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { create } from "zustand"; - -import type { SwitchResultStatus } from "@/services/cmds"; - -interface ProfileStoreState { - data: IProfilesConfig | null; - optimisticCurrent: string | null; - isHydrating: boolean; - lastEventSeq: number; - lastResult: SwitchResultStatus | null; - applySwitchResult: (result: SwitchResultStatus) => void; - commitHydrated: (data: IProfilesConfig) => void; - setLastEventSeq: (sequence: number) => void; -} - -export const useProfileStore = create((set) => ({ - data: null, - optimisticCurrent: null, - isHydrating: false, - lastEventSeq: 0, - lastResult: null, - applySwitchResult(result) { - // Record the optimistic switch outcome so the UI reflects the desired profile immediately. - set((state) => ({ - lastResult: result, - optimisticCurrent: result.success ? result.profileId : null, - isHydrating: result.success ? true : state.isHydrating, - })); - }, - commitHydrated(data) { - set({ - data, - optimisticCurrent: null, - isHydrating: false, - }); - }, - setLastEventSeq(sequence) { - set({ lastEventSeq: sequence }); - }, -})); - -export const selectEffectiveProfiles = (state: ProfileStoreState) => { - if (!state.data) { - return null; - } - // Prefer the optimistic selection while hydration is pending. - const current = state.optimisticCurrent ?? state.data.current; - if ( - state.optimisticCurrent && - state.optimisticCurrent !== state.data.current - ) { - return { ...state.data, current } as IProfilesConfig; - } - return state.data; -}; - -export const selectIsHydrating = (state: ProfileStoreState) => - state.isHydrating; -export const selectLastResult = (state: ProfileStoreState) => state.lastResult; diff --git a/src/stores/proxy-store.ts b/src/stores/proxy-store.ts deleted file mode 100644 index b8c87ef80..000000000 --- a/src/stores/proxy-store.ts +++ /dev/null @@ -1,298 +0,0 @@ -import type { getProxies } from "tauri-plugin-mihomo-api"; -import { create } from "zustand"; - -import { - ProxiesView, - ProxyProviderRecord, - buildProxyView, - calcuProxies, - getCachedProxyProviders, - setCachedProxyProviders, -} from "@/services/cmds"; -import { AsyncEventQueue, nextTick } from "@/utils/asyncQueue"; - -type ProxyHydration = "none" | "snapshot" | "live"; -type RawProxiesResponse = Awaited>; - -export interface ProxiesUpdatedPayload { - proxies: RawProxiesResponse; - providers?: Record | null; - emittedAt?: number; - profileId?: string | null; -} - -interface ProxyStoreState { - data: ProxiesView | null; - hydration: ProxyHydration; - lastUpdated: number | null; - lastProfileId: string | null; - liveFetchRequestId: number; - lastAppliedFetchId: number; - pendingProfileId: string | null; - pendingSnapshotFetchId: number | null; - setSnapshot: (snapshot: ProxiesView, profileId: string) => void; - setLive: (payload: ProxiesUpdatedPayload) => void; - startLiveFetch: () => number; - completeLiveFetch: (requestId: number, view: ProxiesView) => void; - clearPendingProfile: () => void; - reset: () => void; -} - -const normalizeProviderPayload = ( - raw: ProxiesUpdatedPayload["providers"], -): ProxyProviderRecord | null => { - if (!raw || typeof raw !== "object") return null; - - const rawRecord = raw as Record; - const source = - rawRecord.providers && typeof rawRecord.providers === "object" - ? (rawRecord.providers as Record) - : rawRecord; - - const entries = Object.entries(source) - .sort(([a], [b]) => a.localeCompare(b)) - .filter(([, value]) => { - if (!value || typeof value !== "object") { - return false; - } - const vehicleType = value.vehicleType; - return vehicleType === "HTTP" || vehicleType === "File"; - }) - .map(([name, value]) => { - const normalized: IProxyProviderItem = { - name: value.name ?? name, - type: value.type ?? "", - proxies: Array.isArray(value.proxies) ? value.proxies : [], - updatedAt: value.updatedAt ?? "", - vehicleType: value.vehicleType ?? "", - subscriptionInfo: - value.subscriptionInfo && typeof value.subscriptionInfo === "object" - ? { - Upload: Number(value.subscriptionInfo.Upload ?? 0), - Download: Number(value.subscriptionInfo.Download ?? 0), - Total: Number(value.subscriptionInfo.Total ?? 0), - Expire: Number(value.subscriptionInfo.Expire ?? 0), - } - : undefined, - }; - - return [name, normalized] as const; - }); - - return Object.fromEntries(entries) as ProxyProviderRecord; -}; - -export const useProxyStore = create((set, get) => ({ - data: null, - hydration: "none", - lastUpdated: null, - lastProfileId: null, - liveFetchRequestId: 0, - lastAppliedFetchId: 0, - pendingProfileId: null, - pendingSnapshotFetchId: null, - setSnapshot(snapshot, profileId) { - const stateBefore = get(); - - set((state) => ({ - data: snapshot, - hydration: "snapshot", - lastUpdated: null, - pendingProfileId: profileId, - pendingSnapshotFetchId: state.liveFetchRequestId, - })); - - const hasLiveHydration = - stateBefore.hydration === "live" && - stateBefore.lastProfileId === profileId; - - if (profileId && !hasLiveHydration) { - void fetchLiveProxies().catch((error) => { - console.warn( - "[ProxyStore] Failed to bootstrap live proxies from snapshot:", - error, - ); - scheduleBootstrapLiveFetch(800); - }); - } - }, - setLive(payload) { - const state = get(); - const emittedAt = payload.emittedAt ?? Date.now(); - - if ( - state.hydration === "live" && - state.lastUpdated !== null && - emittedAt <= state.lastUpdated - ) { - return; - } - - const providersRecord = - normalizeProviderPayload(payload.providers) ?? getCachedProxyProviders(); - - if (providersRecord) { - setCachedProxyProviders(providersRecord); - } - - const view = buildProxyView(payload.proxies, providersRecord); - const nextProfileId = payload.profileId ?? state.lastProfileId; - - set((current) => ({ - data: view, - hydration: "live", - lastUpdated: emittedAt, - lastProfileId: nextProfileId ?? null, - lastAppliedFetchId: current.liveFetchRequestId, - pendingProfileId: null, - pendingSnapshotFetchId: null, - })); - }, - startLiveFetch() { - let nextRequestId = 0; - set((state) => { - nextRequestId = state.liveFetchRequestId + 1; - return { - liveFetchRequestId: nextRequestId, - }; - }); - return nextRequestId; - }, - completeLiveFetch(requestId, view) { - const state = get(); - if (requestId <= state.lastAppliedFetchId) { - return; - } - - const shouldAdoptPending = - state.pendingProfileId !== null && - requestId >= (state.pendingSnapshotFetchId ?? 0); - - set({ - data: view, - hydration: "live", - lastUpdated: Date.now(), - lastProfileId: shouldAdoptPending - ? state.pendingProfileId - : state.lastProfileId, - lastAppliedFetchId: requestId, - pendingProfileId: shouldAdoptPending ? null : state.pendingProfileId, - pendingSnapshotFetchId: shouldAdoptPending - ? null - : state.pendingSnapshotFetchId, - }); - }, - clearPendingProfile() { - set({ - pendingProfileId: null, - pendingSnapshotFetchId: null, - }); - }, - reset() { - set({ - data: null, - hydration: "none", - lastUpdated: null, - lastProfileId: null, - liveFetchRequestId: 0, - lastAppliedFetchId: 0, - pendingProfileId: null, - pendingSnapshotFetchId: null, - }); - scheduleBootstrapLiveFetch(200); - }, -})); - -const liveApplyQueue = new AsyncEventQueue(); -let pendingLivePayload: ProxiesUpdatedPayload | null = null; -let liveApplyScheduled = false; - -const scheduleLiveApply = () => { - if (liveApplyScheduled) return; - liveApplyScheduled = true; - - const dispatch = () => { - liveApplyScheduled = false; - const payload = pendingLivePayload; - pendingLivePayload = null; - if (!payload) return; - - liveApplyQueue.enqueue(async () => { - await nextTick(); - useProxyStore.getState().setLive(payload); - }); - }; - - if ( - typeof window !== "undefined" && - typeof window.requestAnimationFrame === "function" - ) { - window.requestAnimationFrame(dispatch); - } else { - setTimeout(dispatch, 16); - } -}; - -export const applyLiveProxyPayload = (payload: ProxiesUpdatedPayload) => { - pendingLivePayload = payload; - scheduleLiveApply(); -}; - -export const fetchLiveProxies = async () => { - const requestId = useProxyStore.getState().startLiveFetch(); - const view = await calcuProxies(); - useProxyStore.getState().completeLiveFetch(requestId, view); -}; - -const MAX_BOOTSTRAP_ATTEMPTS = 5; -const BOOTSTRAP_BASE_DELAY_MS = 600; -let bootstrapAttempts = 0; -let bootstrapTimer: number | null = null; - -function attemptBootstrapLiveFetch() { - const state = useProxyStore.getState(); - if (state.hydration === "live") { - bootstrapAttempts = 0; - return; - } - - if (bootstrapAttempts >= MAX_BOOTSTRAP_ATTEMPTS) { - return; - } - - const attemptNumber = ++bootstrapAttempts; - - void fetchLiveProxies() - .then(() => { - bootstrapAttempts = 0; - }) - .catch((error) => { - console.warn( - `[ProxyStore] Bootstrap live fetch attempt ${attemptNumber} failed:`, - error, - ); - if (attemptNumber < MAX_BOOTSTRAP_ATTEMPTS) { - scheduleBootstrapLiveFetch(BOOTSTRAP_BASE_DELAY_MS * attemptNumber); - } - }); -} - -function scheduleBootstrapLiveFetch(delay = 0) { - if (typeof window === "undefined") { - return; - } - - if (bootstrapTimer !== null) { - window.clearTimeout(bootstrapTimer); - bootstrapTimer = null; - } - - bootstrapTimer = window.setTimeout(() => { - bootstrapTimer = null; - attemptBootstrapLiveFetch(); - }, delay); -} - -if (typeof window !== "undefined") { - void nextTick().then(() => scheduleBootstrapLiveFetch(0)); -} diff --git a/src/utils/asyncQueue.ts b/src/utils/asyncQueue.ts deleted file mode 100644 index 927faa837..000000000 --- a/src/utils/asyncQueue.ts +++ /dev/null @@ -1,31 +0,0 @@ -export class AsyncEventQueue { - private tail: Promise = Promise.resolve(); - - enqueue(task: () => Promise | void) { - this.tail = this.tail - .then(async () => { - await task(); - }) - .catch((error) => { - console.error("AsyncEventQueue task failed", error); - }); - } - - clear() { - this.tail = Promise.resolve(); - } -} - -export const nextTick = () => - new Promise((resolve) => { - if (typeof queueMicrotask === "function") { - queueMicrotask(resolve); - } else { - Promise.resolve().then(() => resolve()); - } - }); - -export const afterPaint = () => - new Promise((resolve) => { - requestAnimationFrame(() => resolve()); - }); diff --git a/src/utils/proxy-snapshot.ts b/src/utils/proxy-snapshot.ts deleted file mode 100644 index 2e451db2f..000000000 --- a/src/utils/proxy-snapshot.ts +++ /dev/null @@ -1,205 +0,0 @@ -import yaml from "js-yaml"; - -const createProxyItem = ( - name: string, - partial: Partial = {}, -): IProxyItem => ({ - name, - type: partial.type ?? "unknown", - udp: partial.udp ?? false, - xudp: partial.xudp ?? false, - tfo: partial.tfo ?? false, - mptcp: partial.mptcp ?? false, - smux: partial.smux ?? false, - history: [], - provider: partial.provider, - testUrl: partial.testUrl, - hidden: partial.hidden, - icon: partial.icon, - fixed: partial.fixed, -}); - -const createGroupItem = ( - name: string, - all: IProxyItem[], - partial: Partial = {}, -): IProxyGroupItem => { - const rest = { ...partial } as Partial; - delete (rest as Partial).all; - const base = createProxyItem(name, rest); - return { - ...base, - all, - now: partial.now ?? base.now, - }; -}; - -const ensureProxyItem = ( - map: Map, - name: string, - source?: Partial, -) => { - const key = String(name); - if (map.has(key)) return map.get(key)!; - const item = createProxyItem(key, source); - map.set(key, item); - return item; -}; - -const parseProxyEntry = (entry: any): IProxyItem | null => { - if (!entry || typeof entry !== "object") return null; - const name = entry.name || entry.uid || entry.id; - if (!name) return null; - return createProxyItem(String(name), { - type: entry.type ? String(entry.type) : undefined, - udp: Boolean(entry.udp), - xudp: Boolean(entry.xudp), - tfo: Boolean(entry.tfo), - mptcp: Boolean(entry.mptcp), - smux: Boolean(entry.smux), - testUrl: entry.test_url || entry.testUrl, - }); -}; - -const isNonEmptyString = (value: unknown): value is string => - typeof value === "string" && value.trim().length > 0; - -const parseProxyGroup = ( - entry: any, - proxyMap: Map, -): IProxyGroupItem | null => { - if (!entry || typeof entry !== "object") return null; - const name = entry.name; - if (!name) return null; - - const rawProxies: unknown[] = Array.isArray(entry.proxies) - ? entry.proxies - : []; - - const proxyRefs: string[] = rawProxies - .filter(isNonEmptyString) - .map((item) => item.trim()); - - const uniqueNames: string[] = Array.from(new Set(proxyRefs)); - - const all = uniqueNames.map((proxyName) => - ensureProxyItem(proxyMap, proxyName), - ); - - return createGroupItem(String(name), all, { - type: entry.type ? String(entry.type) : "Selector", - provider: entry.provider, - testUrl: entry.testUrl || entry.test_url, - now: typeof entry.now === "string" ? entry.now : undefined, - }); -}; - -const mapRecords = ( - proxies: Map, - groups: IProxyGroupItem[], - extra: IProxyItem[] = [], -): Record => { - const result: Record = {}; - proxies.forEach((item, key) => { - result[key] = item; - }); - groups.forEach((group) => { - result[group.name] = group as unknown as IProxyItem; - }); - extra.forEach((item) => { - result[item.name] = item; - }); - return result; -}; - -export const createProxySnapshotFromProfile = ( - yamlContent: string, -): { - global: IProxyGroupItem; - direct: IProxyItem; - groups: IProxyGroupItem[]; - records: Record; - proxies: IProxyItem[]; -} | null => { - let parsed: any; - try { - parsed = yaml.load(yamlContent); - } catch (error) { - console.warn("[ProxySnapshot] Failed to parse YAML:", error); - return null; - } - - if (!parsed || typeof parsed !== "object") { - return null; - } - - const proxyMap = new Map(); - - if (Array.isArray((parsed as any).proxies)) { - for (const entry of (parsed as any).proxies) { - const item = parseProxyEntry(entry); - if (item) { - proxyMap.set(item.name, item); - } - } - } - - const proxyProviders = (parsed as any)["proxy-providers"]; - if (proxyProviders && typeof proxyProviders === "object") { - for (const key of Object.keys(proxyProviders)) { - const provider = proxyProviders[key]; - if (provider && Array.isArray(provider.proxies)) { - provider.proxies - .filter( - (proxyName: unknown): proxyName is string => - typeof proxyName === "string", - ) - .forEach((proxyName: string) => ensureProxyItem(proxyMap, proxyName)); - } - } - } - - const groups: IProxyGroupItem[] = []; - if (Array.isArray((parsed as any)["proxy-groups"])) { - for (const entry of (parsed as any)["proxy-groups"]) { - const groupItem = parseProxyGroup(entry, proxyMap); - if (groupItem) { - groups.push(groupItem); - } - } - } - - const direct = createProxyItem("DIRECT", { type: "Direct" }); - const reject = createProxyItem("REJECT", { type: "Reject" }); - - ensureProxyItem(proxyMap, direct.name, direct); - ensureProxyItem(proxyMap, reject.name, reject); - - let global = groups.find((group) => group.name === "GLOBAL"); - if (!global) { - const globalRefs = groups.flatMap((group) => - group.all.map((proxy) => proxy.name), - ); - const unique = Array.from(new Set(globalRefs)); - const all = unique.map((name) => ensureProxyItem(proxyMap, name)); - global = createGroupItem("GLOBAL", all, { - type: "Selector", - hidden: true, - }); - groups.unshift(global); - } - - const proxies = Array.from(proxyMap.values()).filter( - (item) => !groups.some((group) => group.name === item.name), - ); - - const records = mapRecords(proxyMap, groups, [direct, reject]); - - return { - global, - direct, - groups, - records, - proxies, - }; -};