mirror of
https://github.com/clash-verge-rev/clash-verge-rev.git
synced 2026-01-29 00:35:38 +08:00
Revert "refactor: profile switch (#5197)"
This reverts commit c2dcd86722.
This commit is contained in:
@@ -30,7 +30,6 @@
|
|||||||
- 修复悬浮跳转导航失效
|
- 修复悬浮跳转导航失效
|
||||||
- 修复小键盘热键映射错误
|
- 修复小键盘热键映射错误
|
||||||
- 修复前端无法及时刷新操作状态
|
- 修复前端无法及时刷新操作状态
|
||||||
- 修复切换订阅卡死
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><strong> ✨ 新增功能 </strong></summary>
|
<summary><strong> ✨ 新增功能 </strong></summary>
|
||||||
@@ -78,7 +77,6 @@
|
|||||||
- 优化首页当前节点对MATCH规则的支持
|
- 优化首页当前节点对MATCH规则的支持
|
||||||
- 允许在 `界面设置` 修改 `悬浮跳转导航延迟`
|
- 允许在 `界面设置` 修改 `悬浮跳转导航延迟`
|
||||||
- 添加热键绑定错误的提示信息
|
- 添加热键绑定错误的提示信息
|
||||||
- 重构订阅切换,保证代理页面的及时刷新
|
|
||||||
- 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题
|
- 在 macOS 10.15 及更高版本默认包含 Mihomo-go122,以解决 Intel 架构 Mac 无法运行内核的问题
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|||||||
@@ -1,48 +0,0 @@
|
|||||||
use super::CmdResult;
|
|
||||||
use crate::{logging, utils::logging::Type};
|
|
||||||
use serde::Deserialize;
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct FrontendLogPayload {
|
|
||||||
pub level: Option<String>,
|
|
||||||
pub message: String,
|
|
||||||
pub context: Option<serde_json::Value>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tauri::command]
|
|
||||||
pub fn frontend_log(payload: FrontendLogPayload) -> CmdResult<()> {
|
|
||||||
let level = payload.level.as_deref().unwrap_or("info");
|
|
||||||
match level {
|
|
||||||
"trace" | "debug" => logging!(
|
|
||||||
debug,
|
|
||||||
Type::Frontend,
|
|
||||||
"[frontend] {}",
|
|
||||||
payload.message.as_str()
|
|
||||||
),
|
|
||||||
"warn" => logging!(
|
|
||||||
warn,
|
|
||||||
Type::Frontend,
|
|
||||||
"[frontend] {}",
|
|
||||||
payload.message.as_str()
|
|
||||||
),
|
|
||||||
"error" => logging!(
|
|
||||||
error,
|
|
||||||
Type::Frontend,
|
|
||||||
"[frontend] {}",
|
|
||||||
payload.message.as_str()
|
|
||||||
),
|
|
||||||
_ => logging!(
|
|
||||||
info,
|
|
||||||
Type::Frontend,
|
|
||||||
"[frontend] {}",
|
|
||||||
payload.message.as_str()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(context) = payload.context {
|
|
||||||
logging!(info, Type::Frontend, "[frontend] context: {}", context);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -7,12 +7,10 @@ pub type CmdResult<T = ()> = Result<T, String>;
|
|||||||
pub mod app;
|
pub mod app;
|
||||||
pub mod backup;
|
pub mod backup;
|
||||||
pub mod clash;
|
pub mod clash;
|
||||||
pub mod frontend;
|
|
||||||
pub mod lightweight;
|
pub mod lightweight;
|
||||||
pub mod media_unlock_checker;
|
pub mod media_unlock_checker;
|
||||||
pub mod network;
|
pub mod network;
|
||||||
pub mod profile;
|
pub mod profile;
|
||||||
mod profile_switch;
|
|
||||||
pub mod proxy;
|
pub mod proxy;
|
||||||
pub mod runtime;
|
pub mod runtime;
|
||||||
pub mod save_profile;
|
pub mod save_profile;
|
||||||
@@ -27,7 +25,6 @@ pub mod webdav;
|
|||||||
pub use app::*;
|
pub use app::*;
|
||||||
pub use backup::*;
|
pub use backup::*;
|
||||||
pub use clash::*;
|
pub use clash::*;
|
||||||
pub use frontend::*;
|
|
||||||
pub use lightweight::*;
|
pub use lightweight::*;
|
||||||
pub use media_unlock_checker::*;
|
pub use media_unlock_checker::*;
|
||||||
pub use network::*;
|
pub use network::*;
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use super::{CmdResult, StringifyErr, profile_switch};
|
use super::CmdResult;
|
||||||
|
use super::StringifyErr;
|
||||||
use crate::{
|
use crate::{
|
||||||
config::{
|
config::{
|
||||||
Config, IProfiles, PrfItem, PrfOption,
|
Config, IProfiles, PrfItem, PrfOption,
|
||||||
@@ -8,191 +9,77 @@ use crate::{
|
|||||||
},
|
},
|
||||||
profiles_append_item_safe,
|
profiles_append_item_safe,
|
||||||
},
|
},
|
||||||
core::{CoreManager, handle, timer::Timer},
|
core::{CoreManager, handle, timer::Timer, tray::Tray},
|
||||||
feat, logging, ret_err,
|
feat, logging,
|
||||||
|
process::AsyncHandler,
|
||||||
|
ret_err,
|
||||||
utils::{dirs, help, logging::Type},
|
utils::{dirs, help, logging::Type},
|
||||||
};
|
};
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use parking_lot::RwLock;
|
|
||||||
use smartstring::alias::String;
|
use smartstring::alias::String;
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
use std::time::Duration;
|
||||||
|
|
||||||
use crate::cmd::profile_switch::{ProfileSwitchStatus, SwitchResultEvent};
|
// 全局请求序列号跟踪,用于避免队列化执行
|
||||||
|
static CURRENT_REQUEST_SEQUENCE: AtomicU64 = AtomicU64::new(0);
|
||||||
|
|
||||||
#[derive(Clone)]
|
static CURRENT_SWITCHING_PROFILE: AtomicBool = AtomicBool::new(false);
|
||||||
struct CachedProfiles {
|
|
||||||
snapshot: IProfiles,
|
|
||||||
captured_at: Instant,
|
|
||||||
}
|
|
||||||
|
|
||||||
static PROFILES_CACHE: Lazy<RwLock<Option<CachedProfiles>>> = Lazy::new(|| RwLock::new(None));
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
struct SnapshotMetrics {
|
|
||||||
fast_hits: AtomicU64,
|
|
||||||
cache_hits: AtomicU64,
|
|
||||||
blocking_hits: AtomicU64,
|
|
||||||
refresh_scheduled: AtomicU64,
|
|
||||||
last_log_ms: AtomicU64,
|
|
||||||
}
|
|
||||||
|
|
||||||
static SNAPSHOT_METRICS: Lazy<SnapshotMetrics> = Lazy::new(SnapshotMetrics::default);
|
|
||||||
|
|
||||||
/// Store the latest snapshot so cache consumers can reuse it without hitting the lock again.
|
|
||||||
fn update_profiles_cache(snapshot: &IProfiles) {
|
|
||||||
*PROFILES_CACHE.write() = Some(CachedProfiles {
|
|
||||||
snapshot: snapshot.clone(),
|
|
||||||
captured_at: Instant::now(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the cached snapshot and how old it is, if present.
|
|
||||||
fn cached_profiles_snapshot() -> Option<(IProfiles, u128)> {
|
|
||||||
PROFILES_CACHE.read().as_ref().map(|entry| {
|
|
||||||
(
|
|
||||||
entry.snapshot.clone(),
|
|
||||||
entry.captured_at.elapsed().as_millis(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the latest profiles snapshot, preferring cached data so UI requests never block.
|
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn get_profiles() -> CmdResult<IProfiles> {
|
pub async fn get_profiles() -> CmdResult<IProfiles> {
|
||||||
let started_at = Instant::now();
|
// 策略1: 尝试快速获取latest数据
|
||||||
|
let latest_result = tokio::time::timeout(Duration::from_millis(500), async {
|
||||||
|
let profiles = Config::profiles().await;
|
||||||
|
let latest = profiles.latest_ref();
|
||||||
|
IProfiles {
|
||||||
|
current: latest.current.clone(),
|
||||||
|
items: latest.items.clone(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
// Resolve snapshots in three tiers so UI reads never stall on a mutex:
|
match latest_result {
|
||||||
// 1) try a non-blocking read, 2) fall back to the last cached copy while a
|
Ok(profiles) => {
|
||||||
// writer holds the lock, 3) block and refresh the cache as a final resort.
|
logging!(info, Type::Cmd, "快速获取配置列表成功");
|
||||||
if let Some(snapshot) = read_profiles_snapshot_nonblocking().await {
|
return Ok(profiles);
|
||||||
let item_count = snapshot
|
}
|
||||||
.items
|
Err(_) => {
|
||||||
.as_ref()
|
logging!(warn, Type::Cmd, "快速获取配置超时(500ms)");
|
||||||
.map(|items| items.len())
|
}
|
||||||
.unwrap_or(0);
|
|
||||||
update_profiles_cache(&snapshot);
|
|
||||||
SNAPSHOT_METRICS.fast_hits.fetch_add(1, Ordering::Relaxed);
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profiles] Snapshot served (path=fast, items={}, elapsed={}ms)",
|
|
||||||
item_count,
|
|
||||||
started_at.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
maybe_log_snapshot_metrics();
|
|
||||||
return Ok(snapshot);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some((cached, age_ms)) = cached_profiles_snapshot() {
|
// 策略2: 如果快速获取失败,尝试获取data()
|
||||||
SNAPSHOT_METRICS.cache_hits.fetch_add(1, Ordering::Relaxed);
|
let data_result = tokio::time::timeout(Duration::from_secs(2), async {
|
||||||
logging!(
|
let profiles = Config::profiles().await;
|
||||||
debug,
|
let data = profiles.latest_ref();
|
||||||
Type::Cmd,
|
IProfiles {
|
||||||
"[Profiles] Served cached snapshot while lock busy (age={}ms)",
|
current: data.current.clone(),
|
||||||
age_ms
|
items: data.items.clone(),
|
||||||
);
|
}
|
||||||
schedule_profiles_snapshot_refresh();
|
})
|
||||||
maybe_log_snapshot_metrics();
|
.await;
|
||||||
return Ok(cached);
|
|
||||||
|
match data_result {
|
||||||
|
Ok(profiles) => {
|
||||||
|
logging!(info, Type::Cmd, "获取draft配置列表成功");
|
||||||
|
return Ok(profiles);
|
||||||
|
}
|
||||||
|
Err(join_err) => {
|
||||||
|
logging!(
|
||||||
|
error,
|
||||||
|
Type::Cmd,
|
||||||
|
"获取draft配置任务失败或超时: {}",
|
||||||
|
join_err
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let snapshot = read_profiles_snapshot_blocking().await;
|
// 策略3: fallback,尝试重新创建配置
|
||||||
let item_count = snapshot
|
logging!(warn, Type::Cmd, "所有获取配置策略都失败,尝试fallback");
|
||||||
.items
|
|
||||||
.as_ref()
|
Ok(IProfiles::new().await)
|
||||||
.map(|items| items.len())
|
|
||||||
.unwrap_or(0);
|
|
||||||
update_profiles_cache(&snapshot);
|
|
||||||
SNAPSHOT_METRICS
|
|
||||||
.blocking_hits
|
|
||||||
.fetch_add(1, Ordering::Relaxed);
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profiles] Snapshot served (path=blocking, items={}, elapsed={}ms)",
|
|
||||||
item_count,
|
|
||||||
started_at.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
maybe_log_snapshot_metrics();
|
|
||||||
Ok(snapshot)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Try to grab the latest profile data without waiting for the writer.
|
/// 增强配置文件
|
||||||
async fn read_profiles_snapshot_nonblocking() -> Option<IProfiles> {
|
|
||||||
let profiles = Config::profiles().await;
|
|
||||||
profiles.try_latest_ref().map(|guard| (**guard).clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fall back to a blocking read when we absolutely must have fresh data.
|
|
||||||
async fn read_profiles_snapshot_blocking() -> IProfiles {
|
|
||||||
let profiles = Config::profiles().await;
|
|
||||||
let guard = profiles.latest_ref();
|
|
||||||
(**guard).clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedule a background cache refresh once the exclusive lock becomes available again.
|
|
||||||
fn schedule_profiles_snapshot_refresh() {
|
|
||||||
crate::process::AsyncHandler::spawn(|| async {
|
|
||||||
// Once the lock is released we refresh the cached snapshot so the next
|
|
||||||
// request observes the latest data instead of the stale fallback.
|
|
||||||
SNAPSHOT_METRICS
|
|
||||||
.refresh_scheduled
|
|
||||||
.fetch_add(1, Ordering::Relaxed);
|
|
||||||
let snapshot = read_profiles_snapshot_blocking().await;
|
|
||||||
update_profiles_cache(&snapshot);
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profiles] Cache refreshed after busy snapshot"
|
|
||||||
);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
fn maybe_log_snapshot_metrics() {
|
|
||||||
const LOG_INTERVAL_MS: u64 = 5_000;
|
|
||||||
let now_ms = current_millis();
|
|
||||||
let last_ms = SNAPSHOT_METRICS.last_log_ms.load(Ordering::Relaxed);
|
|
||||||
if now_ms.saturating_sub(last_ms) < LOG_INTERVAL_MS {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if SNAPSHOT_METRICS
|
|
||||||
.last_log_ms
|
|
||||||
.compare_exchange(last_ms, now_ms, Ordering::SeqCst, Ordering::Relaxed)
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let fast = SNAPSHOT_METRICS.fast_hits.swap(0, Ordering::SeqCst);
|
|
||||||
let cache = SNAPSHOT_METRICS.cache_hits.swap(0, Ordering::SeqCst);
|
|
||||||
let blocking = SNAPSHOT_METRICS.blocking_hits.swap(0, Ordering::SeqCst);
|
|
||||||
let refresh = SNAPSHOT_METRICS.refresh_scheduled.swap(0, Ordering::SeqCst);
|
|
||||||
|
|
||||||
if fast == 0 && cache == 0 && blocking == 0 && refresh == 0 {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profiles][Metrics] 5s window => fast={}, cache={}, blocking={}, refresh_jobs={}",
|
|
||||||
fast,
|
|
||||||
cache,
|
|
||||||
blocking,
|
|
||||||
refresh
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn current_millis() -> u64 {
|
|
||||||
SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap_or(Duration::ZERO)
|
|
||||||
.as_millis() as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run the optional enhancement pipeline and refresh Clash when it completes.
|
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn enhance_profiles() -> CmdResult {
|
pub async fn enhance_profiles() -> CmdResult {
|
||||||
match feat::enhance_profiles().await {
|
match feat::enhance_profiles().await {
|
||||||
@@ -206,106 +93,79 @@ pub async fn enhance_profiles() -> CmdResult {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Download a profile from the given URL and persist it to the local catalog.
|
/// 导入配置文件
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn import_profile(url: std::string::String, option: Option<PrfOption>) -> CmdResult {
|
pub async fn import_profile(url: std::string::String, option: Option<PrfOption>) -> CmdResult {
|
||||||
logging!(info, Type::Cmd, "[Profile Import] Begin: {}", url);
|
logging!(info, Type::Cmd, "[导入订阅] 开始导入: {}", url);
|
||||||
|
|
||||||
// Rely on PrfItem::from_url internal timeout/retry logic instead of wrapping with tokio::time::timeout
|
// 直接依赖 PrfItem::from_url 自身的超时/重试逻辑,不再使用 tokio::time::timeout 包裹
|
||||||
let item = match PrfItem::from_url(&url, None, None, option).await {
|
let item = match PrfItem::from_url(&url, None, None, option).await {
|
||||||
Ok(it) => {
|
Ok(it) => {
|
||||||
logging!(
|
logging!(info, Type::Cmd, "[导入订阅] 下载完成,开始保存配置");
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profile Import] Download complete; saving configuration"
|
|
||||||
);
|
|
||||||
it
|
it
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
logging!(error, Type::Cmd, "[Profile Import] Download failed: {}", e);
|
logging!(error, Type::Cmd, "[导入订阅] 下载失败: {}", e);
|
||||||
return Err(format!("Profile import failed: {}", e).into());
|
return Err(format!("导入订阅失败: {}", e).into());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match profiles_append_item_safe(item.clone()).await {
|
match profiles_append_item_safe(item.clone()).await {
|
||||||
Ok(_) => match profiles_save_file_safe().await {
|
Ok(_) => match profiles_save_file_safe().await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
logging!(
|
logging!(info, Type::Cmd, "[导入订阅] 配置文件保存成功");
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profile Import] Configuration file saved successfully"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
logging!(
|
logging!(error, Type::Cmd, "[导入订阅] 保存配置文件失败: {}", e);
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profile Import] Failed to save configuration file: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
logging!(
|
logging!(error, Type::Cmd, "[导入订阅] 保存配置失败: {}", e);
|
||||||
error,
|
return Err(format!("导入订阅失败: {}", e).into());
|
||||||
Type::Cmd,
|
|
||||||
"[Profile Import] Failed to persist configuration: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
return Err(format!("Profile import failed: {}", e).into());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Immediately emit a configuration change notification
|
// 立即发送配置变更通知
|
||||||
if let Some(uid) = &item.uid {
|
if let Some(uid) = &item.uid {
|
||||||
logging!(
|
logging!(info, Type::Cmd, "[导入订阅] 发送配置变更通知: {}", uid);
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profile Import] Emitting configuration change event: {}",
|
|
||||||
uid
|
|
||||||
);
|
|
||||||
handle::Handle::notify_profile_changed(uid.clone());
|
handle::Handle::notify_profile_changed(uid.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save configuration asynchronously and emit a global notification
|
// 异步保存配置文件并发送全局通知
|
||||||
let uid_clone = item.uid.clone();
|
let uid_clone = item.uid.clone();
|
||||||
if let Some(uid) = uid_clone {
|
if let Some(uid) = uid_clone {
|
||||||
// Delay notification to ensure the file is fully written
|
// 延迟发送,确保文件已完全写入
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
handle::Handle::notify_profile_changed(uid);
|
handle::Handle::notify_profile_changed(uid);
|
||||||
}
|
}
|
||||||
|
|
||||||
logging!(info, Type::Cmd, "[Profile Import] Completed: {}", url);
|
logging!(info, Type::Cmd, "[导入订阅] 导入完成: {}", url);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Move a profile in the list relative to another entry.
|
/// 调整profile的顺序
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn reorder_profile(active_id: String, over_id: String) -> CmdResult {
|
pub async fn reorder_profile(active_id: String, over_id: String) -> CmdResult {
|
||||||
match profiles_reorder_safe(active_id, over_id).await {
|
match profiles_reorder_safe(active_id, over_id).await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
log::info!(target: "app", "Reordered profiles");
|
log::info!(target: "app", "重新排序配置文件");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
log::error!(target: "app", "Failed to reorder profiles: {}", err);
|
log::error!(target: "app", "重新排序配置文件失败: {}", err);
|
||||||
Err(format!("Failed to reorder profiles: {}", err).into())
|
Err(format!("重新排序配置文件失败: {}", err).into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new profile entry and optionally write its backing file.
|
/// 创建新的profile
|
||||||
|
/// 创建一个新的配置文件
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn create_profile(item: PrfItem, file_data: Option<String>) -> CmdResult {
|
pub async fn create_profile(item: PrfItem, file_data: Option<String>) -> CmdResult {
|
||||||
match profiles_append_item_with_filedata_safe(item.clone(), file_data).await {
|
match profiles_append_item_with_filedata_safe(item.clone(), file_data).await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
// Emit configuration change notification
|
// 发送配置变更通知
|
||||||
if let Some(uid) = &item.uid {
|
if let Some(uid) = &item.uid {
|
||||||
logging!(
|
logging!(info, Type::Cmd, "[创建订阅] 发送配置变更通知: {}", uid);
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profile Create] Emitting configuration change event: {}",
|
|
||||||
uid
|
|
||||||
);
|
|
||||||
handle::Handle::notify_profile_changed(uid.clone());
|
handle::Handle::notify_profile_changed(uid.clone());
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -317,7 +177,7 @@ pub async fn create_profile(item: PrfItem, file_data: Option<String>) -> CmdResu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Force-refresh a profile from its remote source, if available.
|
/// 更新配置文件
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn update_profile(index: String, option: Option<PrfOption>) -> CmdResult {
|
pub async fn update_profile(index: String, option: Option<PrfOption>) -> CmdResult {
|
||||||
match feat::update_profile(index, option, Some(true)).await {
|
match feat::update_profile(index, option, Some(true)).await {
|
||||||
@@ -329,11 +189,11 @@ pub async fn update_profile(index: String, option: Option<PrfOption>) -> CmdResu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove a profile and refresh the running configuration if necessary.
|
/// 删除配置文件
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn delete_profile(index: String) -> CmdResult {
|
pub async fn delete_profile(index: String) -> CmdResult {
|
||||||
println!("delete_profile: {}", index);
|
println!("delete_profile: {}", index);
|
||||||
// Use send-safe helper function
|
// 使用Send-safe helper函数
|
||||||
let should_update = profiles_delete_item_safe(index.clone())
|
let should_update = profiles_delete_item_safe(index.clone())
|
||||||
.await
|
.await
|
||||||
.stringify_err()?;
|
.stringify_err()?;
|
||||||
@@ -343,13 +203,8 @@ pub async fn delete_profile(index: String) -> CmdResult {
|
|||||||
match CoreManager::global().update_config().await {
|
match CoreManager::global().update_config().await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
handle::Handle::refresh_clash();
|
handle::Handle::refresh_clash();
|
||||||
// Emit configuration change notification
|
// 发送配置变更通知
|
||||||
logging!(
|
logging!(info, Type::Cmd, "[删除订阅] 发送配置变更通知: {}", index);
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"[Profile Delete] Emitting configuration change event: {}",
|
|
||||||
index
|
|
||||||
);
|
|
||||||
handle::Handle::notify_profile_changed(index);
|
handle::Handle::notify_profile_changed(index);
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -361,28 +216,361 @@ pub async fn delete_profile(index: String) -> CmdResult {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply partial profile list updates through the switching workflow.
|
/// 验证新配置文件的语法
|
||||||
|
async fn validate_new_profile(new_profile: &String) -> Result<(), ()> {
|
||||||
|
logging!(info, Type::Cmd, "正在切换到新配置: {}", new_profile);
|
||||||
|
|
||||||
|
// 获取目标配置文件路径
|
||||||
|
let config_file_result = {
|
||||||
|
let profiles_config = Config::profiles().await;
|
||||||
|
let profiles_data = profiles_config.latest_ref();
|
||||||
|
match profiles_data.get_item(new_profile) {
|
||||||
|
Ok(item) => {
|
||||||
|
if let Some(file) = &item.file {
|
||||||
|
let path = dirs::app_profiles_dir().map(|dir| dir.join(file.as_str()));
|
||||||
|
path.ok()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
logging!(error, Type::Cmd, "获取目标配置信息失败: {}", e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// 如果获取到文件路径,检查YAML语法
|
||||||
|
if let Some(file_path) = config_file_result {
|
||||||
|
if !file_path.exists() {
|
||||||
|
logging!(
|
||||||
|
error,
|
||||||
|
Type::Cmd,
|
||||||
|
"目标配置文件不存在: {}",
|
||||||
|
file_path.display()
|
||||||
|
);
|
||||||
|
handle::Handle::notice_message(
|
||||||
|
"config_validate::file_not_found",
|
||||||
|
format!("{}", file_path.display()),
|
||||||
|
);
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// 超时保护
|
||||||
|
let file_read_result = tokio::time::timeout(
|
||||||
|
Duration::from_secs(5),
|
||||||
|
tokio::fs::read_to_string(&file_path),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match file_read_result {
|
||||||
|
Ok(Ok(content)) => {
|
||||||
|
let yaml_parse_result = AsyncHandler::spawn_blocking(move || {
|
||||||
|
serde_yaml_ng::from_str::<serde_yaml_ng::Value>(&content)
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match yaml_parse_result {
|
||||||
|
Ok(Ok(_)) => {
|
||||||
|
logging!(info, Type::Cmd, "目标配置文件语法正确");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Ok(Err(err)) => {
|
||||||
|
let error_msg = format!(" {err}");
|
||||||
|
logging!(
|
||||||
|
error,
|
||||||
|
Type::Cmd,
|
||||||
|
"目标配置文件存在YAML语法错误:{}",
|
||||||
|
error_msg
|
||||||
|
);
|
||||||
|
handle::Handle::notice_message(
|
||||||
|
"config_validate::yaml_syntax_error",
|
||||||
|
error_msg.clone(),
|
||||||
|
);
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
Err(join_err) => {
|
||||||
|
let error_msg = format!("YAML解析任务失败: {join_err}");
|
||||||
|
logging!(error, Type::Cmd, "{}", error_msg);
|
||||||
|
handle::Handle::notice_message(
|
||||||
|
"config_validate::yaml_parse_error",
|
||||||
|
error_msg.clone(),
|
||||||
|
);
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Err(err)) => {
|
||||||
|
let error_msg = format!("无法读取目标配置文件: {err}");
|
||||||
|
logging!(error, Type::Cmd, "{}", error_msg);
|
||||||
|
handle::Handle::notice_message(
|
||||||
|
"config_validate::file_read_error",
|
||||||
|
error_msg.clone(),
|
||||||
|
);
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
let error_msg = "读取配置文件超时(5秒)".to_string();
|
||||||
|
logging!(error, Type::Cmd, "{}", error_msg);
|
||||||
|
handle::Handle::notice_message(
|
||||||
|
"config_validate::file_read_timeout",
|
||||||
|
error_msg.clone(),
|
||||||
|
);
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// 执行配置更新并处理结果
|
||||||
|
async fn restore_previous_profile(prev_profile: String) -> CmdResult<()> {
|
||||||
|
logging!(info, Type::Cmd, "尝试恢复到之前的配置: {}", prev_profile);
|
||||||
|
let restore_profiles = IProfiles {
|
||||||
|
current: Some(prev_profile),
|
||||||
|
items: None,
|
||||||
|
};
|
||||||
|
Config::profiles()
|
||||||
|
.await
|
||||||
|
.draft_mut()
|
||||||
|
.patch_config(restore_profiles)
|
||||||
|
.stringify_err()?;
|
||||||
|
Config::profiles().await.apply();
|
||||||
|
crate::process::AsyncHandler::spawn(|| async move {
|
||||||
|
if let Err(e) = profiles_save_file_safe().await {
|
||||||
|
log::warn!(target: "app", "异步保存恢复配置文件失败: {e}");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
logging!(info, Type::Cmd, "成功恢复到之前的配置");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_success(current_sequence: u64, current_value: Option<String>) -> CmdResult<bool> {
|
||||||
|
let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst);
|
||||||
|
if current_sequence < latest_sequence {
|
||||||
|
logging!(
|
||||||
|
info,
|
||||||
|
Type::Cmd,
|
||||||
|
"内核操作后发现更新的请求 (序列号: {} < {}),忽略当前结果",
|
||||||
|
current_sequence,
|
||||||
|
latest_sequence
|
||||||
|
);
|
||||||
|
Config::profiles().await.discard();
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
logging!(
|
||||||
|
info,
|
||||||
|
Type::Cmd,
|
||||||
|
"配置更新成功,序列号: {}",
|
||||||
|
current_sequence
|
||||||
|
);
|
||||||
|
Config::profiles().await.apply();
|
||||||
|
handle::Handle::refresh_clash();
|
||||||
|
|
||||||
|
if let Err(e) = Tray::global().update_tooltip().await {
|
||||||
|
log::warn!(target: "app", "异步更新托盘提示失败: {e}");
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = Tray::global().update_menu().await {
|
||||||
|
log::warn!(target: "app", "异步更新托盘菜单失败: {e}");
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = profiles_save_file_safe().await {
|
||||||
|
log::warn!(target: "app", "异步保存配置文件失败: {e}");
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(current) = ¤t_value {
|
||||||
|
logging!(
|
||||||
|
info,
|
||||||
|
Type::Cmd,
|
||||||
|
"向前端发送配置变更事件: {}, 序列号: {}",
|
||||||
|
current,
|
||||||
|
current_sequence
|
||||||
|
);
|
||||||
|
handle::Handle::notify_profile_changed(current.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst);
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_validation_failure(
|
||||||
|
error_msg: String,
|
||||||
|
current_profile: Option<String>,
|
||||||
|
) -> CmdResult<bool> {
|
||||||
|
logging!(warn, Type::Cmd, "配置验证失败: {}", error_msg);
|
||||||
|
Config::profiles().await.discard();
|
||||||
|
if let Some(prev_profile) = current_profile {
|
||||||
|
restore_previous_profile(prev_profile).await?;
|
||||||
|
}
|
||||||
|
handle::Handle::notice_message("config_validate::error", error_msg);
|
||||||
|
CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst);
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_update_error<E: std::fmt::Display>(e: E, current_sequence: u64) -> CmdResult<bool> {
|
||||||
|
logging!(
|
||||||
|
warn,
|
||||||
|
Type::Cmd,
|
||||||
|
"更新过程发生错误: {}, 序列号: {}",
|
||||||
|
e,
|
||||||
|
current_sequence
|
||||||
|
);
|
||||||
|
Config::profiles().await.discard();
|
||||||
|
handle::Handle::notice_message("config_validate::boot_error", e.to_string());
|
||||||
|
CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst);
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_timeout(current_profile: Option<String>, current_sequence: u64) -> CmdResult<bool> {
|
||||||
|
let timeout_msg = "配置更新超时(30秒),可能是配置验证或核心通信阻塞";
|
||||||
|
logging!(
|
||||||
|
error,
|
||||||
|
Type::Cmd,
|
||||||
|
"{}, 序列号: {}",
|
||||||
|
timeout_msg,
|
||||||
|
current_sequence
|
||||||
|
);
|
||||||
|
Config::profiles().await.discard();
|
||||||
|
if let Some(prev_profile) = current_profile {
|
||||||
|
restore_previous_profile(prev_profile).await?;
|
||||||
|
}
|
||||||
|
handle::Handle::notice_message("config_validate::timeout", timeout_msg);
|
||||||
|
CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst);
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn perform_config_update(
|
||||||
|
current_sequence: u64,
|
||||||
|
current_value: Option<String>,
|
||||||
|
current_profile: Option<String>,
|
||||||
|
) -> CmdResult<bool> {
|
||||||
|
logging!(
|
||||||
|
info,
|
||||||
|
Type::Cmd,
|
||||||
|
"开始内核配置更新,序列号: {}",
|
||||||
|
current_sequence
|
||||||
|
);
|
||||||
|
let update_result = tokio::time::timeout(
|
||||||
|
Duration::from_secs(30),
|
||||||
|
CoreManager::global().update_config(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match update_result {
|
||||||
|
Ok(Ok((true, _))) => handle_success(current_sequence, current_value).await,
|
||||||
|
Ok(Ok((false, error_msg))) => handle_validation_failure(error_msg, current_profile).await,
|
||||||
|
Ok(Err(e)) => handle_update_error(e, current_sequence).await,
|
||||||
|
Err(_) => handle_timeout(current_profile, current_sequence).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// 修改profiles的配置
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult<bool> {
|
pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult<bool> {
|
||||||
profile_switch::patch_profiles_config(profiles).await
|
if CURRENT_SWITCHING_PROFILE.load(Ordering::SeqCst) {
|
||||||
|
logging!(info, Type::Cmd, "当前正在切换配置,放弃请求");
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
CURRENT_SWITCHING_PROFILE.store(true, Ordering::SeqCst);
|
||||||
|
|
||||||
|
// 为当前请求分配序列号
|
||||||
|
let current_sequence = CURRENT_REQUEST_SEQUENCE.fetch_add(1, Ordering::SeqCst) + 1;
|
||||||
|
let target_profile = profiles.current.clone();
|
||||||
|
|
||||||
|
logging!(
|
||||||
|
info,
|
||||||
|
Type::Cmd,
|
||||||
|
"开始修改配置文件,请求序列号: {}, 目标profile: {:?}",
|
||||||
|
current_sequence,
|
||||||
|
target_profile
|
||||||
|
);
|
||||||
|
|
||||||
|
let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst);
|
||||||
|
if current_sequence < latest_sequence {
|
||||||
|
logging!(
|
||||||
|
info,
|
||||||
|
Type::Cmd,
|
||||||
|
"获取锁后发现更新的请求 (序列号: {} < {}),放弃当前请求",
|
||||||
|
current_sequence,
|
||||||
|
latest_sequence
|
||||||
|
);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 保存当前配置,以便在验证失败时恢复
|
||||||
|
let current_profile = Config::profiles().await.latest_ref().current.clone();
|
||||||
|
logging!(info, Type::Cmd, "当前配置: {:?}", current_profile);
|
||||||
|
|
||||||
|
// 如果要切换配置,先检查目标配置文件是否有语法错误
|
||||||
|
if let Some(new_profile) = profiles.current.as_ref()
|
||||||
|
&& current_profile.as_ref() != Some(new_profile)
|
||||||
|
&& validate_new_profile(new_profile).await.is_err()
|
||||||
|
{
|
||||||
|
CURRENT_SWITCHING_PROFILE.store(false, Ordering::SeqCst);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 检查请求有效性
|
||||||
|
let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst);
|
||||||
|
if current_sequence < latest_sequence {
|
||||||
|
logging!(
|
||||||
|
info,
|
||||||
|
Type::Cmd,
|
||||||
|
"在核心操作前发现更新的请求 (序列号: {} < {}),放弃当前请求",
|
||||||
|
current_sequence,
|
||||||
|
latest_sequence
|
||||||
|
);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 更新profiles配置
|
||||||
|
logging!(
|
||||||
|
info,
|
||||||
|
Type::Cmd,
|
||||||
|
"正在更新配置草稿,序列号: {}",
|
||||||
|
current_sequence
|
||||||
|
);
|
||||||
|
|
||||||
|
let current_value = profiles.current.clone();
|
||||||
|
|
||||||
|
let _ = Config::profiles().await.draft_mut().patch_config(profiles);
|
||||||
|
|
||||||
|
// 在调用内核前再次验证请求有效性
|
||||||
|
let latest_sequence = CURRENT_REQUEST_SEQUENCE.load(Ordering::SeqCst);
|
||||||
|
if current_sequence < latest_sequence {
|
||||||
|
logging!(
|
||||||
|
info,
|
||||||
|
Type::Cmd,
|
||||||
|
"在内核交互前发现更新的请求 (序列号: {} < {}),放弃当前请求",
|
||||||
|
current_sequence,
|
||||||
|
latest_sequence
|
||||||
|
);
|
||||||
|
Config::profiles().await.discard();
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
perform_config_update(current_sequence, current_value, current_profile).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Switch to the provided profile index and wait for completion before returning.
|
/// 根据profile name修改profiles
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn patch_profiles_config_by_profile_index(profile_index: String) -> CmdResult<bool> {
|
pub async fn patch_profiles_config_by_profile_index(profile_index: String) -> CmdResult<bool> {
|
||||||
profile_switch::patch_profiles_config_by_profile_index(profile_index).await
|
logging!(info, Type::Cmd, "切换配置到: {}", profile_index);
|
||||||
|
|
||||||
|
let profiles = IProfiles {
|
||||||
|
current: Some(profile_index),
|
||||||
|
items: None,
|
||||||
|
};
|
||||||
|
patch_profiles_config(profiles).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enqueue a profile switch request and optionally notify on success.
|
/// 修改某个profile item的
|
||||||
#[tauri::command]
|
|
||||||
pub async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult<bool> {
|
|
||||||
profile_switch::switch_profile(profile_index, notify_success).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update a specific profile item and refresh timers if its schedule changed.
|
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult {
|
pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult {
|
||||||
// Check for update_interval changes before saving
|
// 保存修改前检查是否有更新 update_interval
|
||||||
let profiles = Config::profiles().await;
|
let profiles = Config::profiles().await;
|
||||||
let should_refresh_timer = if let Ok(old_profile) = profiles.latest_ref().get_item(&index) {
|
let should_refresh_timer = if let Ok(old_profile) = profiles.latest_ref().get_item(&index) {
|
||||||
let old_interval = old_profile.option.as_ref().and_then(|o| o.update_interval);
|
let old_interval = old_profile.option.as_ref().and_then(|o| o.update_interval);
|
||||||
@@ -401,19 +589,15 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult {
|
|||||||
.await
|
.await
|
||||||
.stringify_err()?;
|
.stringify_err()?;
|
||||||
|
|
||||||
// If the interval or auto-update flag changes, refresh the timer asynchronously
|
// 如果更新间隔或允许自动更新变更,异步刷新定时器
|
||||||
if should_refresh_timer {
|
if should_refresh_timer {
|
||||||
let index_clone = index.clone();
|
let index_clone = index.clone();
|
||||||
crate::process::AsyncHandler::spawn(move || async move {
|
crate::process::AsyncHandler::spawn(move || async move {
|
||||||
logging!(
|
logging!(info, Type::Timer, "定时器更新间隔已变更,正在刷新定时器...");
|
||||||
info,
|
|
||||||
Type::Timer,
|
|
||||||
"Timer interval changed; refreshing timer..."
|
|
||||||
);
|
|
||||||
if let Err(e) = crate::core::Timer::global().refresh().await {
|
if let Err(e) = crate::core::Timer::global().refresh().await {
|
||||||
logging!(error, Type::Timer, "Failed to refresh timer: {}", e);
|
logging!(error, Type::Timer, "刷新定时器失败: {}", e);
|
||||||
} else {
|
} else {
|
||||||
// After refreshing successfully, emit a custom event without triggering a reload
|
// 刷新成功后发送自定义事件,不触发配置重载
|
||||||
crate::core::handle::Handle::notify_timer_updated(index_clone);
|
crate::core::handle::Handle::notify_timer_updated(index_clone);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -422,7 +606,7 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open the profile file in the system viewer.
|
/// 查看配置文件
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn view_profile(index: String) -> CmdResult {
|
pub async fn view_profile(index: String) -> CmdResult {
|
||||||
let profiles = Config::profiles().await;
|
let profiles = Config::profiles().await;
|
||||||
@@ -444,7 +628,7 @@ pub async fn view_profile(index: String) -> CmdResult {
|
|||||||
help::open_file(path).stringify_err()
|
help::open_file(path).stringify_err()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the raw YAML contents for the given profile file.
|
/// 读取配置文件内容
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn read_profile_file(index: String) -> CmdResult<String> {
|
pub async fn read_profile_file(index: String) -> CmdResult<String> {
|
||||||
let profiles = Config::profiles().await;
|
let profiles = Config::profiles().await;
|
||||||
@@ -454,22 +638,10 @@ pub async fn read_profile_file(index: String) -> CmdResult<String> {
|
|||||||
Ok(data)
|
Ok(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report the scheduled refresh timestamp (if any) for the profile timer.
|
/// 获取下一次更新时间
|
||||||
#[tauri::command]
|
#[tauri::command]
|
||||||
pub async fn get_next_update_time(uid: String) -> CmdResult<Option<i64>> {
|
pub async fn get_next_update_time(uid: String) -> CmdResult<Option<i64>> {
|
||||||
let timer = Timer::global();
|
let timer = Timer::global();
|
||||||
let next_time = timer.get_next_update_time(&uid).await;
|
let next_time = timer.get_next_update_time(&uid).await;
|
||||||
Ok(next_time)
|
Ok(next_time)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the latest driver snapshot describing active and queued switch tasks.
|
|
||||||
#[tauri::command]
|
|
||||||
pub async fn get_profile_switch_status() -> CmdResult<ProfileSwitchStatus> {
|
|
||||||
profile_switch::get_switch_status()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch switch result events newer than the provided sequence number.
|
|
||||||
#[tauri::command]
|
|
||||||
pub async fn get_profile_switch_events(after_sequence: u64) -> CmdResult<Vec<SwitchResultEvent>> {
|
|
||||||
profile_switch::get_switch_events(after_sequence)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,683 +0,0 @@
|
|||||||
use super::{
|
|
||||||
CmdResult,
|
|
||||||
state::{
|
|
||||||
ProfileSwitchStatus, SwitchCancellation, SwitchManager, SwitchRequest, SwitchResultStatus,
|
|
||||||
SwitchTaskStatus, current_millis, manager,
|
|
||||||
},
|
|
||||||
workflow::{self, SwitchPanicInfo, SwitchStage},
|
|
||||||
};
|
|
||||||
use crate::{logging, utils::logging::Type};
|
|
||||||
use futures::FutureExt;
|
|
||||||
use once_cell::sync::OnceCell;
|
|
||||||
use smartstring::alias::String as SmartString;
|
|
||||||
use std::{
|
|
||||||
collections::{HashMap, VecDeque},
|
|
||||||
panic::AssertUnwindSafe,
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
use tokio::{
|
|
||||||
sync::{
|
|
||||||
Mutex as AsyncMutex,
|
|
||||||
mpsc::{self, error::TrySendError},
|
|
||||||
oneshot,
|
|
||||||
},
|
|
||||||
time::{self, MissedTickBehavior},
|
|
||||||
};
|
|
||||||
|
|
||||||
// Single shared queue so profile switches are executed sequentially and can
|
|
||||||
// collapse redundant requests for the same profile.
|
|
||||||
const SWITCH_QUEUE_CAPACITY: usize = 32;
|
|
||||||
static SWITCH_QUEUE: OnceCell<mpsc::Sender<SwitchDriverMessage>> = OnceCell::new();
|
|
||||||
|
|
||||||
type CompletionRegistry = AsyncMutex<HashMap<u64, oneshot::Sender<SwitchResultStatus>>>;
|
|
||||||
|
|
||||||
static SWITCH_COMPLETION_WAITERS: OnceCell<CompletionRegistry> = OnceCell::new();
|
|
||||||
|
|
||||||
/// Global map of task id -> completion channel sender used when callers await the result.
|
|
||||||
fn completion_waiters() -> &'static CompletionRegistry {
|
|
||||||
SWITCH_COMPLETION_WAITERS.get_or_init(|| AsyncMutex::new(HashMap::new()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Register a oneshot sender so `switch_profile_and_wait` can be notified when its task finishes.
|
|
||||||
async fn register_completion_waiter(task_id: u64) -> oneshot::Receiver<SwitchResultStatus> {
|
|
||||||
let (sender, receiver) = oneshot::channel();
|
|
||||||
let mut guard = completion_waiters().lock().await;
|
|
||||||
if guard.insert(task_id, sender).is_some() {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Replacing existing completion waiter for task {}",
|
|
||||||
task_id
|
|
||||||
);
|
|
||||||
}
|
|
||||||
receiver
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove an outstanding completion waiter; used when enqueue fails or succeeds immediately.
|
|
||||||
async fn remove_completion_waiter(task_id: u64) -> Option<oneshot::Sender<SwitchResultStatus>> {
|
|
||||||
completion_waiters().lock().await.remove(&task_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fire-and-forget notify helper so we do not block the driver loop.
|
|
||||||
fn notify_completion_waiter(task_id: u64, result: SwitchResultStatus) {
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let sender = completion_waiters().lock().await.remove(&task_id);
|
|
||||||
if let Some(sender) = sender {
|
|
||||||
let _ = sender.send(result);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
const WATCHDOG_TIMEOUT: Duration = Duration::from_secs(5);
|
|
||||||
const WATCHDOG_TICK: Duration = Duration::from_millis(500);
|
|
||||||
|
|
||||||
// Mutable snapshot of the driver's world; all mutations happen on the driver task.
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
struct SwitchDriverState {
|
|
||||||
active: Option<SwitchRequest>,
|
|
||||||
queue: VecDeque<SwitchRequest>,
|
|
||||||
latest_tokens: HashMap<SmartString, SwitchCancellation>,
|
|
||||||
cleanup_profiles: HashMap<SmartString, tokio::task::JoinHandle<()>>,
|
|
||||||
last_result: Option<SwitchResultStatus>,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Messages passed through SWITCH_QUEUE so the driver can react to events in order.
|
|
||||||
#[derive(Debug)]
|
|
||||||
enum SwitchDriverMessage {
|
|
||||||
Request {
|
|
||||||
request: SwitchRequest,
|
|
||||||
respond_to: oneshot::Sender<bool>,
|
|
||||||
},
|
|
||||||
Completion {
|
|
||||||
request: SwitchRequest,
|
|
||||||
outcome: SwitchJobOutcome,
|
|
||||||
},
|
|
||||||
CleanupDone {
|
|
||||||
profile: SmartString,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
enum SwitchJobOutcome {
|
|
||||||
Completed {
|
|
||||||
success: bool,
|
|
||||||
cleanup: workflow::CleanupHandle,
|
|
||||||
},
|
|
||||||
Panicked {
|
|
||||||
info: SwitchPanicInfo,
|
|
||||||
cleanup: workflow::CleanupHandle,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn switch_profile(
|
|
||||||
profile_index: impl Into<SmartString>,
|
|
||||||
notify_success: bool,
|
|
||||||
) -> CmdResult<bool> {
|
|
||||||
switch_profile_impl(profile_index.into(), notify_success, false).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn switch_profile_and_wait(
|
|
||||||
profile_index: impl Into<SmartString>,
|
|
||||||
notify_success: bool,
|
|
||||||
) -> CmdResult<bool> {
|
|
||||||
switch_profile_impl(profile_index.into(), notify_success, true).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn switch_profile_impl(
|
|
||||||
profile_index: SmartString,
|
|
||||||
notify_success: bool,
|
|
||||||
wait_for_completion: bool,
|
|
||||||
) -> CmdResult<bool> {
|
|
||||||
// wait_for_completion is used by CLI flows that must block until the switch finishes.
|
|
||||||
let manager = manager();
|
|
||||||
let sender = switch_driver_sender();
|
|
||||||
|
|
||||||
let request = SwitchRequest::new(
|
|
||||||
manager.next_task_id(),
|
|
||||||
profile_index.clone(),
|
|
||||||
notify_success,
|
|
||||||
);
|
|
||||||
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Queue profile switch task {} -> {} (notify={})",
|
|
||||||
request.task_id(),
|
|
||||||
profile_index,
|
|
||||||
notify_success
|
|
||||||
);
|
|
||||||
|
|
||||||
let task_id = request.task_id();
|
|
||||||
let mut completion_rx = if wait_for_completion {
|
|
||||||
Some(register_completion_waiter(task_id).await)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let (tx, rx) = oneshot::channel();
|
|
||||||
|
|
||||||
let enqueue_result = match sender.try_send(SwitchDriverMessage::Request {
|
|
||||||
request,
|
|
||||||
respond_to: tx,
|
|
||||||
}) {
|
|
||||||
Ok(_) => match rx.await {
|
|
||||||
Ok(result) => Ok(result),
|
|
||||||
Err(err) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Failed to receive enqueue result for profile {}: {}",
|
|
||||||
profile_index,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
Err("switch profile queue unavailable".into())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(TrySendError::Full(msg)) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Profile switch queue is full; waiting for space: {}",
|
|
||||||
profile_index
|
|
||||||
);
|
|
||||||
match sender.send(msg).await {
|
|
||||||
Ok(_) => match rx.await {
|
|
||||||
Ok(result) => Ok(result),
|
|
||||||
Err(err) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Failed to receive enqueue result after wait for {}: {}",
|
|
||||||
profile_index,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
Err("switch profile queue unavailable".into())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Profile switch queue closed while waiting ({}): {}",
|
|
||||||
profile_index,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
Err("switch profile queue unavailable".into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(TrySendError::Closed(_)) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Profile switch queue is closed, cannot enqueue: {}",
|
|
||||||
profile_index
|
|
||||||
);
|
|
||||||
Err("switch profile queue unavailable".into())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let accepted = match enqueue_result {
|
|
||||||
Ok(result) => result,
|
|
||||||
Err(err) => {
|
|
||||||
if completion_rx.is_some() {
|
|
||||||
remove_completion_waiter(task_id).await;
|
|
||||||
}
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if !accepted {
|
|
||||||
if completion_rx.is_some() {
|
|
||||||
remove_completion_waiter(task_id).await;
|
|
||||||
}
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(rx_completion) = completion_rx.take() {
|
|
||||||
match rx_completion.await {
|
|
||||||
Ok(status) => Ok(status.success),
|
|
||||||
Err(err) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Switch task {} completion channel dropped: {}",
|
|
||||||
task_id,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
Err("profile switch completion unavailable".into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn switch_driver_sender() -> &'static mpsc::Sender<SwitchDriverMessage> {
|
|
||||||
SWITCH_QUEUE.get_or_init(|| {
|
|
||||||
let (tx, rx) = mpsc::channel::<SwitchDriverMessage>(SWITCH_QUEUE_CAPACITY);
|
|
||||||
let driver_tx = tx.clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let manager = manager();
|
|
||||||
let driver = SwitchDriver::new(manager, driver_tx);
|
|
||||||
driver.run(rx).await;
|
|
||||||
});
|
|
||||||
tx
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
struct SwitchDriver {
|
|
||||||
manager: &'static SwitchManager,
|
|
||||||
sender: mpsc::Sender<SwitchDriverMessage>,
|
|
||||||
state: SwitchDriverState,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchDriver {
|
|
||||||
fn new(manager: &'static SwitchManager, sender: mpsc::Sender<SwitchDriverMessage>) -> Self {
|
|
||||||
let state = SwitchDriverState::default();
|
|
||||||
manager.set_status(state.snapshot(manager));
|
|
||||||
Self {
|
|
||||||
manager,
|
|
||||||
sender,
|
|
||||||
state,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run(mut self, mut rx: mpsc::Receiver<SwitchDriverMessage>) {
|
|
||||||
while let Some(message) = rx.recv().await {
|
|
||||||
match message {
|
|
||||||
SwitchDriverMessage::Request {
|
|
||||||
request,
|
|
||||||
respond_to,
|
|
||||||
} => {
|
|
||||||
self.handle_enqueue(request, respond_to);
|
|
||||||
}
|
|
||||||
SwitchDriverMessage::Completion { request, outcome } => {
|
|
||||||
self.handle_completion(request, outcome);
|
|
||||||
}
|
|
||||||
SwitchDriverMessage::CleanupDone { profile } => {
|
|
||||||
self.handle_cleanup_done(profile);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_enqueue(&mut self, request: SwitchRequest, respond_to: oneshot::Sender<bool>) {
|
|
||||||
// Each new request supersedes older ones for the same profile to avoid thrashing the core.
|
|
||||||
let mut responder = Some(respond_to);
|
|
||||||
let accepted = true;
|
|
||||||
let profile_key = request.profile_id().clone();
|
|
||||||
let cleanup_pending =
|
|
||||||
self.state.active.is_none() && !self.state.cleanup_profiles.is_empty();
|
|
||||||
|
|
||||||
if cleanup_pending && self.state.cleanup_profiles.contains_key(&profile_key) {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"Cleanup running for {}; queueing switch task {} -> {} to run afterwards",
|
|
||||||
profile_key,
|
|
||||||
request.task_id(),
|
|
||||||
profile_key
|
|
||||||
);
|
|
||||||
if let Some(previous) = self
|
|
||||||
.state
|
|
||||||
.latest_tokens
|
|
||||||
.insert(profile_key.clone(), request.cancel_token().clone())
|
|
||||||
{
|
|
||||||
previous.cancel();
|
|
||||||
}
|
|
||||||
self.state
|
|
||||||
.queue
|
|
||||||
.retain(|queued| queued.profile_id() != &profile_key);
|
|
||||||
self.state.queue.push_back(request);
|
|
||||||
if let Some(sender) = responder.take() {
|
|
||||||
let _ = sender.send(accepted);
|
|
||||||
}
|
|
||||||
self.publish_status();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if cleanup_pending {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"Cleanup running for {} profile(s); queueing task {} -> {} to run after cleanup without clearing existing requests",
|
|
||||||
self.state.cleanup_profiles.len(),
|
|
||||||
request.task_id(),
|
|
||||||
profile_key
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(previous) = self
|
|
||||||
.state
|
|
||||||
.latest_tokens
|
|
||||||
.insert(profile_key.clone(), request.cancel_token().clone())
|
|
||||||
{
|
|
||||||
previous.cancel();
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(active) = self.state.active.as_mut()
|
|
||||||
&& active.profile_id() == &profile_key
|
|
||||||
{
|
|
||||||
active.cancel_token().cancel();
|
|
||||||
active.merge_notify(request.notify());
|
|
||||||
self.state
|
|
||||||
.queue
|
|
||||||
.retain(|queued| queued.profile_id() != &profile_key);
|
|
||||||
self.state.queue.push_front(request.clone());
|
|
||||||
if let Some(sender) = responder.take() {
|
|
||||||
let _ = sender.send(accepted);
|
|
||||||
}
|
|
||||||
self.publish_status();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(active) = self.state.active.as_ref() {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"Cancelling active switch task {} (profile={}) in favour of task {} -> {}",
|
|
||||||
active.task_id(),
|
|
||||||
active.profile_id(),
|
|
||||||
request.task_id(),
|
|
||||||
profile_key
|
|
||||||
);
|
|
||||||
active.cancel_token().cancel();
|
|
||||||
}
|
|
||||||
|
|
||||||
self.state
|
|
||||||
.queue
|
|
||||||
.retain(|queued| queued.profile_id() != &profile_key);
|
|
||||||
|
|
||||||
self.state.queue.push_back(request.clone());
|
|
||||||
if let Some(sender) = responder.take() {
|
|
||||||
let _ = sender.send(accepted);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.start_next_job();
|
|
||||||
self.publish_status();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_completion(&mut self, request: SwitchRequest, outcome: SwitchJobOutcome) {
|
|
||||||
// Translate the workflow result into an event the frontend can understand.
|
|
||||||
let result_record = match &outcome {
|
|
||||||
SwitchJobOutcome::Completed { success, .. } => {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Switch task {} completed (success={})",
|
|
||||||
request.task_id(),
|
|
||||||
success
|
|
||||||
);
|
|
||||||
if *success {
|
|
||||||
SwitchResultStatus::success(request.task_id(), request.profile_id())
|
|
||||||
} else {
|
|
||||||
SwitchResultStatus::failed(request.task_id(), request.profile_id(), None, None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SwitchJobOutcome::Panicked { info, .. } => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Switch task {} panicked at stage {:?}: {}",
|
|
||||||
request.task_id(),
|
|
||||||
info.stage,
|
|
||||||
info.detail
|
|
||||||
);
|
|
||||||
SwitchResultStatus::failed(
|
|
||||||
request.task_id(),
|
|
||||||
request.profile_id(),
|
|
||||||
Some(format!("{:?}", info.stage)),
|
|
||||||
Some(info.detail.clone()),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(active) = self.state.active.as_ref()
|
|
||||||
&& active.task_id() == request.task_id()
|
|
||||||
{
|
|
||||||
self.state.active = None;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(latest) = self.state.latest_tokens.get(request.profile_id())
|
|
||||||
&& latest.same_token(request.cancel_token())
|
|
||||||
{
|
|
||||||
self.state.latest_tokens.remove(request.profile_id());
|
|
||||||
}
|
|
||||||
|
|
||||||
let cleanup = match outcome {
|
|
||||||
SwitchJobOutcome::Completed { cleanup, .. } => cleanup,
|
|
||||||
SwitchJobOutcome::Panicked { cleanup, .. } => cleanup,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.track_cleanup(request.profile_id().clone(), cleanup);
|
|
||||||
|
|
||||||
let event_record = result_record.clone();
|
|
||||||
self.state.last_result = Some(result_record);
|
|
||||||
notify_completion_waiter(request.task_id(), event_record.clone());
|
|
||||||
self.manager.push_event(event_record);
|
|
||||||
self.start_next_job();
|
|
||||||
self.publish_status();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_cleanup_done(&mut self, profile: SmartString) {
|
|
||||||
if let Some(handle) = self.state.cleanup_profiles.remove(&profile) {
|
|
||||||
handle.abort();
|
|
||||||
}
|
|
||||||
self.start_next_job();
|
|
||||||
self.publish_status();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_next_job(&mut self) {
|
|
||||||
if self.state.active.is_some() || !self.state.cleanup_profiles.is_empty() {
|
|
||||||
self.publish_status();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
while let Some(request) = self.state.queue.pop_front() {
|
|
||||||
if request.cancel_token().is_cancelled() {
|
|
||||||
self.discard_request(request);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.state.active = Some(request.clone());
|
|
||||||
self.start_switch_job(request);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.publish_status();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn track_cleanup(&mut self, profile: SmartString, cleanup: workflow::CleanupHandle) {
|
|
||||||
if let Some(existing) = self.state.cleanup_profiles.remove(&profile) {
|
|
||||||
existing.abort();
|
|
||||||
}
|
|
||||||
|
|
||||||
let driver_tx = self.sender.clone();
|
|
||||||
let profile_clone = profile.clone();
|
|
||||||
let handle = tokio::spawn(async move {
|
|
||||||
let profile_label = profile_clone.clone();
|
|
||||||
if let Err(err) = cleanup.await {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Cleanup task for profile {} failed: {}",
|
|
||||||
profile_label.as_str(),
|
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if let Err(err) = driver_tx
|
|
||||||
.send(SwitchDriverMessage::CleanupDone {
|
|
||||||
profile: profile_clone,
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Failed to push cleanup completion for profile {}: {}",
|
|
||||||
profile_label.as_str(),
|
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
self.state.cleanup_profiles.insert(profile, handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_switch_job(&self, request: SwitchRequest) {
|
|
||||||
// Run the workflow in a background task while the driver keeps processing messages.
|
|
||||||
let driver_tx = self.sender.clone();
|
|
||||||
let manager = self.manager;
|
|
||||||
|
|
||||||
let completion_request = request.clone();
|
|
||||||
let heartbeat = request.heartbeat().clone();
|
|
||||||
let cancel_token = request.cancel_token().clone();
|
|
||||||
let task_id = request.task_id();
|
|
||||||
let profile_label = request.profile_id().clone();
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut watchdog_interval = time::interval(WATCHDOG_TICK);
|
|
||||||
watchdog_interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
|
||||||
|
|
||||||
let workflow_fut =
|
|
||||||
AssertUnwindSafe(workflow::run_switch_job(manager, request)).catch_unwind();
|
|
||||||
tokio::pin!(workflow_fut);
|
|
||||||
|
|
||||||
let job_result = loop {
|
|
||||||
tokio::select! {
|
|
||||||
res = workflow_fut.as_mut() => {
|
|
||||||
break match res {
|
|
||||||
Ok(Ok(result)) => SwitchJobOutcome::Completed {
|
|
||||||
success: result.success,
|
|
||||||
cleanup: result.cleanup,
|
|
||||||
},
|
|
||||||
Ok(Err(error)) => SwitchJobOutcome::Panicked {
|
|
||||||
info: error.info,
|
|
||||||
cleanup: error.cleanup,
|
|
||||||
},
|
|
||||||
Err(payload) => {
|
|
||||||
let info = SwitchPanicInfo::driver_task(
|
|
||||||
workflow::describe_panic_payload(payload.as_ref()),
|
|
||||||
);
|
|
||||||
let cleanup = workflow::schedule_post_switch_failure(
|
|
||||||
profile_label.clone(),
|
|
||||||
completion_request.notify(),
|
|
||||||
completion_request.task_id(),
|
|
||||||
);
|
|
||||||
SwitchJobOutcome::Panicked { info, cleanup }
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
_ = watchdog_interval.tick() => {
|
|
||||||
if cancel_token.is_cancelled() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let elapsed = heartbeat.elapsed();
|
|
||||||
if elapsed > WATCHDOG_TIMEOUT {
|
|
||||||
let stage = SwitchStage::from_code(heartbeat.stage_code())
|
|
||||||
.unwrap_or(SwitchStage::Workflow);
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Switch task {} watchdog timeout (profile={} stage={:?}, elapsed={:?}); cancelling",
|
|
||||||
task_id,
|
|
||||||
profile_label.as_str(),
|
|
||||||
stage,
|
|
||||||
elapsed
|
|
||||||
);
|
|
||||||
cancel_token.cancel();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let request_for_error = completion_request.clone();
|
|
||||||
|
|
||||||
if let Err(err) = driver_tx
|
|
||||||
.send(SwitchDriverMessage::Completion {
|
|
||||||
request: completion_request,
|
|
||||||
outcome: job_result,
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Failed to push switch completion to driver: {}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
notify_completion_waiter(
|
|
||||||
request_for_error.task_id(),
|
|
||||||
SwitchResultStatus::failed(
|
|
||||||
request_for_error.task_id(),
|
|
||||||
request_for_error.profile_id(),
|
|
||||||
Some("driver".to_string()),
|
|
||||||
Some(format!("completion dispatch failed: {}", err)),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Mark a request as failed because a newer request superseded it.
|
|
||||||
fn discard_request(&mut self, request: SwitchRequest) {
|
|
||||||
let key = request.profile_id().clone();
|
|
||||||
let should_remove = self
|
|
||||||
.state
|
|
||||||
.latest_tokens
|
|
||||||
.get(&key)
|
|
||||||
.map(|latest| latest.same_token(request.cancel_token()))
|
|
||||||
.unwrap_or(false);
|
|
||||||
|
|
||||||
if should_remove {
|
|
||||||
self.state.latest_tokens.remove(&key);
|
|
||||||
}
|
|
||||||
|
|
||||||
if !request.cancel_token().is_cancelled() {
|
|
||||||
request.cancel_token().cancel();
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = SwitchResultStatus::cancelled(
|
|
||||||
request.task_id(),
|
|
||||||
request.profile_id(),
|
|
||||||
Some("request superseded".to_string()),
|
|
||||||
);
|
|
||||||
|
|
||||||
self.state.last_result = Some(event.clone());
|
|
||||||
notify_completion_waiter(request.task_id(), event.clone());
|
|
||||||
self.manager.push_event(event);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_status(&self) {
|
|
||||||
self.manager.set_status(self.state.snapshot(self.manager));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchDriverState {
|
|
||||||
/// Lightweight struct suitable for sharing across the command boundary.
|
|
||||||
fn snapshot(&self, manager: &SwitchManager) -> ProfileSwitchStatus {
|
|
||||||
let active = self
|
|
||||||
.active
|
|
||||||
.as_ref()
|
|
||||||
.map(|req| SwitchTaskStatus::from_request(req, false));
|
|
||||||
let queue = self
|
|
||||||
.queue
|
|
||||||
.iter()
|
|
||||||
.map(|req| SwitchTaskStatus::from_request(req, true))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let cleanup_profiles = self
|
|
||||||
.cleanup_profiles
|
|
||||||
.keys()
|
|
||||||
.map(|key| key.to_string())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
ProfileSwitchStatus {
|
|
||||||
is_switching: manager.is_switching(),
|
|
||||||
active,
|
|
||||||
queue,
|
|
||||||
cleanup_profiles,
|
|
||||||
last_result: self.last_result.clone(),
|
|
||||||
last_updated: current_millis(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
// Profile switch orchestration: plumbing between the public tauri commands,
|
|
||||||
// the async driver queue, validation helpers, and the state machine workflow.
|
|
||||||
mod driver;
|
|
||||||
mod state;
|
|
||||||
mod validation;
|
|
||||||
mod workflow;
|
|
||||||
|
|
||||||
pub use state::{ProfileSwitchStatus, SwitchResultEvent};
|
|
||||||
|
|
||||||
use smartstring::alias::String;
|
|
||||||
|
|
||||||
use super::CmdResult;
|
|
||||||
|
|
||||||
pub(super) async fn patch_profiles_config(profiles: crate::config::IProfiles) -> CmdResult<bool> {
|
|
||||||
workflow::patch_profiles_config(profiles).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn patch_profiles_config_by_profile_index(
|
|
||||||
profile_index: String,
|
|
||||||
) -> CmdResult<bool> {
|
|
||||||
driver::switch_profile_and_wait(profile_index, false).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn switch_profile(profile_index: String, notify_success: bool) -> CmdResult<bool> {
|
|
||||||
driver::switch_profile(profile_index, notify_success).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn get_switch_status() -> CmdResult<ProfileSwitchStatus> {
|
|
||||||
Ok(state::manager().status_snapshot())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn get_switch_events(after_sequence: u64) -> CmdResult<Vec<SwitchResultEvent>> {
|
|
||||||
Ok(state::manager().events_after(after_sequence))
|
|
||||||
}
|
|
||||||
@@ -1,353 +0,0 @@
|
|||||||
use once_cell::sync::OnceCell;
|
|
||||||
use parking_lot::RwLock;
|
|
||||||
use serde::Serialize;
|
|
||||||
use smartstring::alias::String as SmartString;
|
|
||||||
use std::collections::VecDeque;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering};
|
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
|
||||||
use tokio::sync::{Mutex, Notify};
|
|
||||||
|
|
||||||
pub(super) const SWITCH_JOB_TIMEOUT: Duration = Duration::from_secs(30);
|
|
||||||
pub(super) const SWITCH_CLEANUP_TIMEOUT: Duration = Duration::from_secs(5);
|
|
||||||
|
|
||||||
static SWITCH_MANAGER: OnceCell<SwitchManager> = OnceCell::new();
|
|
||||||
|
|
||||||
pub(super) fn manager() -> &'static SwitchManager {
|
|
||||||
SWITCH_MANAGER.get_or_init(SwitchManager::default)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
// Central coordination point shared between the driver and workflow state machine.
|
|
||||||
pub(super) struct SwitchManager {
|
|
||||||
core_mutex: Mutex<()>,
|
|
||||||
request_sequence: AtomicU64,
|
|
||||||
switching: AtomicBool,
|
|
||||||
task_sequence: AtomicU64,
|
|
||||||
status: RwLock<ProfileSwitchStatus>,
|
|
||||||
event_sequence: AtomicU64,
|
|
||||||
recent_events: RwLock<VecDeque<SwitchResultEvent>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for SwitchManager {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
core_mutex: Mutex::new(()),
|
|
||||||
request_sequence: AtomicU64::new(0),
|
|
||||||
switching: AtomicBool::new(false),
|
|
||||||
task_sequence: AtomicU64::new(0),
|
|
||||||
status: RwLock::new(ProfileSwitchStatus::default()),
|
|
||||||
event_sequence: AtomicU64::new(0),
|
|
||||||
recent_events: RwLock::new(VecDeque::with_capacity(32)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchManager {
|
|
||||||
pub(super) fn core_mutex(&self) -> &Mutex<()> {
|
|
||||||
&self.core_mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Monotonic identifiers so logs can correlate enqueue/finish pairs.
|
|
||||||
pub(super) fn next_task_id(&self) -> u64 {
|
|
||||||
self.task_sequence.fetch_add(1, Ordering::SeqCst) + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sequence id assigned to each enqueue request so we can spot stale work.
|
|
||||||
pub(super) fn next_request_sequence(&self) -> u64 {
|
|
||||||
self.request_sequence.fetch_add(1, Ordering::SeqCst) + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn latest_request_sequence(&self) -> u64 {
|
|
||||||
self.request_sequence.load(Ordering::SeqCst)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn begin_switch(&'static self) -> SwitchScope<'static> {
|
|
||||||
self.switching.store(true, Ordering::SeqCst);
|
|
||||||
SwitchScope { manager: self }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn is_switching(&self) -> bool {
|
|
||||||
self.switching.load(Ordering::SeqCst)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn set_status(&self, status: ProfileSwitchStatus) {
|
|
||||||
*self.status.write() = status;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn status_snapshot(&self) -> ProfileSwitchStatus {
|
|
||||||
self.status.read().clone()
|
|
||||||
}
|
|
||||||
pub(super) fn push_event(&self, result: SwitchResultStatus) {
|
|
||||||
const MAX_EVENTS: usize = 64;
|
|
||||||
let sequence = self.event_sequence.fetch_add(1, Ordering::SeqCst) + 1;
|
|
||||||
let mut guard = self.recent_events.write();
|
|
||||||
if guard.len() == MAX_EVENTS {
|
|
||||||
guard.pop_front();
|
|
||||||
}
|
|
||||||
guard.push_back(SwitchResultEvent { sequence, result });
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn events_after(&self, sequence: u64) -> Vec<SwitchResultEvent> {
|
|
||||||
self.recent_events
|
|
||||||
.read()
|
|
||||||
.iter()
|
|
||||||
.filter(|event| event.sequence > sequence)
|
|
||||||
.cloned()
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) struct SwitchScope<'a> {
|
|
||||||
manager: &'a SwitchManager,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for SwitchScope<'_> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.manager.switching.store(false, Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub(super) struct SwitchCancellation {
|
|
||||||
flag: Arc<AtomicBool>,
|
|
||||||
notify: Arc<Notify>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchCancellation {
|
|
||||||
pub(super) fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
flag: Arc::new(AtomicBool::new(false)),
|
|
||||||
notify: Arc::new(Notify::new()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn cancel(&self) {
|
|
||||||
self.flag.store(true, Ordering::SeqCst);
|
|
||||||
self.notify.notify_waiters();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// True if another request already cancelled this job.
|
|
||||||
pub(super) fn is_cancelled(&self) -> bool {
|
|
||||||
self.flag.load(Ordering::SeqCst)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn same_token(&self, other: &SwitchCancellation) -> bool {
|
|
||||||
Arc::ptr_eq(&self.flag, &other.flag)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn cancelled_future(&self) {
|
|
||||||
// Used by async blocks that want to pause until a newer request pre-empts them.
|
|
||||||
if self.is_cancelled() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
self.notify.notified().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub(super) struct SwitchRequest {
|
|
||||||
task_id: u64,
|
|
||||||
profile_id: SmartString,
|
|
||||||
notify: bool,
|
|
||||||
cancel_token: SwitchCancellation,
|
|
||||||
heartbeat: SwitchHeartbeat,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchRequest {
|
|
||||||
pub(super) fn new(task_id: u64, profile_id: SmartString, notify: bool) -> Self {
|
|
||||||
Self {
|
|
||||||
task_id,
|
|
||||||
profile_id,
|
|
||||||
notify,
|
|
||||||
cancel_token: SwitchCancellation::new(),
|
|
||||||
heartbeat: SwitchHeartbeat::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn task_id(&self) -> u64 {
|
|
||||||
self.task_id
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn profile_id(&self) -> &SmartString {
|
|
||||||
&self.profile_id
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn notify(&self) -> bool {
|
|
||||||
self.notify
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn merge_notify(&mut self, notify: bool) {
|
|
||||||
// When a new request wants a toast, remember it even if an older request did not.
|
|
||||||
if notify {
|
|
||||||
self.notify = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn cancel_token(&self) -> &SwitchCancellation {
|
|
||||||
&self.cancel_token
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn heartbeat(&self) -> &SwitchHeartbeat {
|
|
||||||
&self.heartbeat
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub(super) struct SwitchHeartbeat {
|
|
||||||
last_tick_millis: Arc<AtomicU64>,
|
|
||||||
stage_code: Arc<AtomicU32>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn now_millis() -> u64 {
|
|
||||||
SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap_or(Duration::ZERO)
|
|
||||||
.as_millis() as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Default)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct ProfileSwitchStatus {
|
|
||||||
pub is_switching: bool,
|
|
||||||
pub active: Option<SwitchTaskStatus>,
|
|
||||||
pub queue: Vec<SwitchTaskStatus>,
|
|
||||||
pub cleanup_profiles: Vec<String>,
|
|
||||||
pub last_result: Option<SwitchResultStatus>,
|
|
||||||
pub last_updated: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct SwitchTaskStatus {
|
|
||||||
pub task_id: u64,
|
|
||||||
pub profile_id: String,
|
|
||||||
pub notify: bool,
|
|
||||||
pub stage: Option<u32>,
|
|
||||||
pub queued: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchTaskStatus {
|
|
||||||
pub(super) fn from_request(request: &SwitchRequest, queued: bool) -> Self {
|
|
||||||
Self {
|
|
||||||
task_id: request.task_id(),
|
|
||||||
profile_id: request.profile_id().to_string(),
|
|
||||||
notify: request.notify(),
|
|
||||||
stage: if queued {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(request.heartbeat().stage_code())
|
|
||||||
},
|
|
||||||
queued,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct SwitchResultStatus {
|
|
||||||
pub task_id: u64,
|
|
||||||
pub profile_id: String,
|
|
||||||
pub success: bool,
|
|
||||||
pub cancelled: bool,
|
|
||||||
pub finished_at: u64,
|
|
||||||
pub error_stage: Option<String>,
|
|
||||||
pub error_detail: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchResultStatus {
|
|
||||||
pub(super) fn success(task_id: u64, profile_id: &SmartString) -> Self {
|
|
||||||
Self {
|
|
||||||
task_id,
|
|
||||||
profile_id: profile_id.to_string(),
|
|
||||||
success: true,
|
|
||||||
cancelled: false,
|
|
||||||
finished_at: now_millis(),
|
|
||||||
error_stage: None,
|
|
||||||
error_detail: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn failed(
|
|
||||||
task_id: u64,
|
|
||||||
profile_id: &SmartString,
|
|
||||||
stage: Option<String>,
|
|
||||||
detail: Option<String>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
task_id,
|
|
||||||
profile_id: profile_id.to_string(),
|
|
||||||
success: false,
|
|
||||||
cancelled: false,
|
|
||||||
finished_at: now_millis(),
|
|
||||||
error_stage: stage,
|
|
||||||
error_detail: detail,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn cancelled(
|
|
||||||
task_id: u64,
|
|
||||||
profile_id: &SmartString,
|
|
||||||
detail: Option<String>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
task_id,
|
|
||||||
profile_id: profile_id.to_string(),
|
|
||||||
success: false,
|
|
||||||
cancelled: true,
|
|
||||||
finished_at: now_millis(),
|
|
||||||
error_stage: Some("cancelled".to_string()),
|
|
||||||
error_detail: detail,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct SwitchResultEvent {
|
|
||||||
pub sequence: u64,
|
|
||||||
pub result: SwitchResultStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn current_millis() -> u64 {
|
|
||||||
now_millis()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchHeartbeat {
|
|
||||||
fn now_millis() -> u64 {
|
|
||||||
SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap_or(Duration::ZERO)
|
|
||||||
.as_millis() as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn new() -> Self {
|
|
||||||
let heartbeat = Self {
|
|
||||||
last_tick_millis: Arc::new(AtomicU64::new(Self::now_millis())),
|
|
||||||
stage_code: Arc::new(AtomicU32::new(0)),
|
|
||||||
};
|
|
||||||
heartbeat.touch();
|
|
||||||
heartbeat
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn touch(&self) {
|
|
||||||
self.last_tick_millis
|
|
||||||
.store(Self::now_millis(), Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update the internal timer to reflect the amount of time since the last heartbeat.
|
|
||||||
pub(super) fn elapsed(&self) -> Duration {
|
|
||||||
let last = self.last_tick_millis.load(Ordering::SeqCst);
|
|
||||||
let now = Self::now_millis();
|
|
||||||
Duration::from_millis(now.saturating_sub(last))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn set_stage(&self, stage: u32) {
|
|
||||||
self.stage_code.store(stage, Ordering::SeqCst);
|
|
||||||
self.touch();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn stage_code(&self) -> u32 {
|
|
||||||
self.stage_code.load(Ordering::SeqCst)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,113 +0,0 @@
|
|||||||
use crate::{
|
|
||||||
config::Config,
|
|
||||||
logging,
|
|
||||||
process::AsyncHandler,
|
|
||||||
utils::{dirs, logging::Type},
|
|
||||||
};
|
|
||||||
use serde_yaml_ng as serde_yaml;
|
|
||||||
use smartstring::alias::String;
|
|
||||||
use std::time::Duration;
|
|
||||||
use tokio::{fs as tokio_fs, time};
|
|
||||||
|
|
||||||
const YAML_READ_TIMEOUT: Duration = Duration::from_secs(5);
|
|
||||||
|
|
||||||
/// Verify that the requested profile exists locally and is well-formed before switching.
|
|
||||||
pub(super) async fn validate_switch_request(task_id: u64, profile_id: &str) -> Result<(), String> {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Validating profile switch task {} -> {}",
|
|
||||||
task_id,
|
|
||||||
profile_id
|
|
||||||
);
|
|
||||||
|
|
||||||
let profile_key: String = profile_id.into();
|
|
||||||
let (file_path, profile_type, is_current, remote_url) = {
|
|
||||||
let profiles_guard = Config::profiles().await;
|
|
||||||
let latest = profiles_guard.latest_ref();
|
|
||||||
let item = latest.get_item(&profile_key).map_err(|err| -> String {
|
|
||||||
format!("Target profile {} not found: {}", profile_id, err).into()
|
|
||||||
})?;
|
|
||||||
(
|
|
||||||
item.file.clone().map(|f| f.to_string()),
|
|
||||||
item.itype.clone().map(|t| t.to_string()),
|
|
||||||
latest
|
|
||||||
.current
|
|
||||||
.as_ref()
|
|
||||||
.map(|current| current.as_str() == profile_id)
|
|
||||||
.unwrap_or(false),
|
|
||||||
item.url.clone().map(|u| u.to_string()),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
if is_current {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Switch task {} is targeting the current profile {}; skipping validation",
|
|
||||||
task_id,
|
|
||||||
profile_id
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
if matches!(profile_type.as_deref(), Some("remote")) {
|
|
||||||
// Remote profiles must retain a URL so the subsequent refresh job knows where to download.
|
|
||||||
let has_url = remote_url.as_ref().map(|u| !u.is_empty()).unwrap_or(false);
|
|
||||||
if !has_url {
|
|
||||||
return Err({
|
|
||||||
let msg = format!("Remote profile {} is missing a download URL", profile_id);
|
|
||||||
msg.into()
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(file) = file_path {
|
|
||||||
let profiles_dir = dirs::app_profiles_dir().map_err(|err| -> String {
|
|
||||||
format!("Failed to resolve profiles directory: {}", err).into()
|
|
||||||
})?;
|
|
||||||
let path = profiles_dir.join(&file);
|
|
||||||
|
|
||||||
let contents = match time::timeout(YAML_READ_TIMEOUT, tokio_fs::read_to_string(&path)).await
|
|
||||||
{
|
|
||||||
Ok(Ok(contents)) => contents,
|
|
||||||
Ok(Err(err)) => {
|
|
||||||
return Err(
|
|
||||||
format!("Failed to read profile file {}: {}", path.display(), err).into(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
return Err(format!(
|
|
||||||
"Timed out reading profile file {} after {:?}",
|
|
||||||
path.display(),
|
|
||||||
YAML_READ_TIMEOUT
|
|
||||||
)
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let parse_result = AsyncHandler::spawn_blocking(move || {
|
|
||||||
serde_yaml::from_str::<serde_yaml::Value>(&contents)
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match parse_result {
|
|
||||||
Ok(Ok(_)) => {}
|
|
||||||
Ok(Err(err)) => {
|
|
||||||
return Err(
|
|
||||||
format!("Profile YAML parse failed for {}: {}", path.display(), err).into(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(join_err) => {
|
|
||||||
return Err(format!(
|
|
||||||
"Profile YAML parse task panicked for {}: {}",
|
|
||||||
path.display(),
|
|
||||||
join_err
|
|
||||||
)
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,385 +0,0 @@
|
|||||||
use super::{
|
|
||||||
CmdResult,
|
|
||||||
state::{SWITCH_JOB_TIMEOUT, SwitchManager, SwitchRequest, manager},
|
|
||||||
validation::validate_switch_request,
|
|
||||||
};
|
|
||||||
use crate::cmd::StringifyErr;
|
|
||||||
use crate::{
|
|
||||||
config::{Config, IProfiles, profiles::profiles_save_file_safe},
|
|
||||||
core::handle,
|
|
||||||
logging,
|
|
||||||
process::AsyncHandler,
|
|
||||||
utils::{dirs, logging::Type},
|
|
||||||
};
|
|
||||||
use futures::FutureExt;
|
|
||||||
use serde_yaml_ng as serde_yaml;
|
|
||||||
use smartstring::alias::String as SmartString;
|
|
||||||
use std::{any::Any, panic::AssertUnwindSafe, time::Duration};
|
|
||||||
use tokio::{fs as tokio_fs, time};
|
|
||||||
|
|
||||||
mod cleanup;
|
|
||||||
mod state_machine;
|
|
||||||
pub(super) use cleanup::{
|
|
||||||
CleanupHandle, schedule_post_switch_failure, schedule_post_switch_success,
|
|
||||||
};
|
|
||||||
|
|
||||||
use state_machine::{CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchStateMachine};
|
|
||||||
pub(super) use state_machine::{SwitchPanicInfo, SwitchStage};
|
|
||||||
|
|
||||||
pub(super) struct SwitchWorkflowResult {
|
|
||||||
pub success: bool,
|
|
||||||
pub cleanup: CleanupHandle,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) struct SwitchWorkflowError {
|
|
||||||
pub info: SwitchPanicInfo,
|
|
||||||
pub cleanup: CleanupHandle,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn run_switch_job(
|
|
||||||
manager: &'static SwitchManager,
|
|
||||||
request: SwitchRequest,
|
|
||||||
) -> Result<SwitchWorkflowResult, SwitchWorkflowError> {
|
|
||||||
// Short-circuit cancelled jobs before we allocate resources or emit events.
|
|
||||||
if request.cancel_token().is_cancelled() {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Switch task {} cancelled before validation",
|
|
||||||
request.task_id()
|
|
||||||
);
|
|
||||||
let cleanup = schedule_post_switch_failure(
|
|
||||||
request.profile_id().clone(),
|
|
||||||
request.notify(),
|
|
||||||
request.task_id(),
|
|
||||||
);
|
|
||||||
return Ok(SwitchWorkflowResult {
|
|
||||||
success: false,
|
|
||||||
cleanup,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let profile_id = request.profile_id().clone();
|
|
||||||
let task_id = request.task_id();
|
|
||||||
let notify = request.notify();
|
|
||||||
|
|
||||||
if let Err(err) = validate_switch_request(task_id, profile_id.as_str()).await {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Validation failed for switch task {} -> {}: {}",
|
|
||||||
task_id,
|
|
||||||
profile_id,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
handle::Handle::notice_message("config_validate::error", err.clone());
|
|
||||||
let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id);
|
|
||||||
return Ok(SwitchWorkflowResult {
|
|
||||||
success: false,
|
|
||||||
cleanup,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Starting switch task {} for profile {} (notify={})",
|
|
||||||
task_id,
|
|
||||||
profile_id,
|
|
||||||
notify
|
|
||||||
);
|
|
||||||
|
|
||||||
let pipeline_request = request;
|
|
||||||
// The state machine owns the heavy lifting. We wrap it with timeout/panic guards so the driver never hangs.
|
|
||||||
let pipeline = async move {
|
|
||||||
let target_profile = pipeline_request.profile_id().clone();
|
|
||||||
SwitchStateMachine::new(
|
|
||||||
manager,
|
|
||||||
Some(pipeline_request),
|
|
||||||
IProfiles {
|
|
||||||
current: Some(target_profile),
|
|
||||||
items: None,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.run()
|
|
||||||
.await
|
|
||||||
};
|
|
||||||
|
|
||||||
match time::timeout(
|
|
||||||
SWITCH_JOB_TIMEOUT,
|
|
||||||
AssertUnwindSafe(pipeline).catch_unwind(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Err(_) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Profile switch task {} timed out after {:?}",
|
|
||||||
task_id,
|
|
||||||
SWITCH_JOB_TIMEOUT
|
|
||||||
);
|
|
||||||
handle::Handle::notice_message(
|
|
||||||
"config_validate::error",
|
|
||||||
format!("profile switch timed out: {}", profile_id),
|
|
||||||
);
|
|
||||||
let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id);
|
|
||||||
Ok(SwitchWorkflowResult {
|
|
||||||
success: false,
|
|
||||||
cleanup,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
Ok(Err(panic_payload)) => {
|
|
||||||
let panic_message = describe_panic_payload(panic_payload.as_ref());
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Panic captured during profile switch task {} ({}): {}",
|
|
||||||
task_id,
|
|
||||||
profile_id,
|
|
||||||
panic_message
|
|
||||||
);
|
|
||||||
handle::Handle::notice_message(
|
|
||||||
"config_validate::panic",
|
|
||||||
format!("profile switch panic: {}", profile_id),
|
|
||||||
);
|
|
||||||
let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id);
|
|
||||||
Err(SwitchWorkflowError {
|
|
||||||
info: SwitchPanicInfo::workflow_root(panic_message),
|
|
||||||
cleanup,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
Ok(Ok(machine_result)) => match machine_result {
|
|
||||||
Ok(cmd_result) => match cmd_result {
|
|
||||||
Ok(success) => {
|
|
||||||
let cleanup =
|
|
||||||
schedule_post_switch_success(profile_id.clone(), success, notify, task_id);
|
|
||||||
Ok(SwitchWorkflowResult { success, cleanup })
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Profile switch failed ({}): {}",
|
|
||||||
profile_id,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
handle::Handle::notice_message("config_validate::error", err.clone());
|
|
||||||
let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id);
|
|
||||||
Ok(SwitchWorkflowResult {
|
|
||||||
success: false,
|
|
||||||
cleanup,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(panic_info) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"State machine panic during profile switch task {} ({} {:?}): {}",
|
|
||||||
task_id,
|
|
||||||
profile_id,
|
|
||||||
panic_info.stage,
|
|
||||||
panic_info.detail
|
|
||||||
);
|
|
||||||
handle::Handle::notice_message(
|
|
||||||
"config_validate::panic",
|
|
||||||
format!("profile switch panic: {}", profile_id),
|
|
||||||
);
|
|
||||||
let cleanup = schedule_post_switch_failure(profile_id.clone(), notify, task_id);
|
|
||||||
Err(SwitchWorkflowError {
|
|
||||||
info: panic_info,
|
|
||||||
cleanup,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Allow patch operations (no driver request) to use the same state machine pipeline.
|
|
||||||
pub(super) async fn patch_profiles_config(profiles: IProfiles) -> CmdResult<bool> {
|
|
||||||
match SwitchStateMachine::new(manager(), None, profiles)
|
|
||||||
.run()
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(result) => result,
|
|
||||||
Err(panic_info) => Err(format!(
|
|
||||||
"profile switch panic ({:?}): {}",
|
|
||||||
panic_info.stage, panic_info.detail
|
|
||||||
)
|
|
||||||
.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the target profile YAML on a background thread to catch syntax errors early.
|
|
||||||
pub(super) async fn validate_profile_yaml(profile: &SmartString) -> CmdResult<bool> {
|
|
||||||
let file_path = {
|
|
||||||
let profiles_guard = Config::profiles().await;
|
|
||||||
let profiles_data = profiles_guard.latest_ref();
|
|
||||||
match profiles_data.get_item(profile) {
|
|
||||||
Ok(item) => item.file.as_ref().and_then(|file| {
|
|
||||||
dirs::app_profiles_dir()
|
|
||||||
.ok()
|
|
||||||
.map(|dir| dir.join(file.as_str()))
|
|
||||||
}),
|
|
||||||
Err(e) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Failed to load target profile metadata: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(path) = file_path else {
|
|
||||||
return Ok(true);
|
|
||||||
};
|
|
||||||
|
|
||||||
if !path.exists() {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Target profile file does not exist: {}",
|
|
||||||
path.display()
|
|
||||||
);
|
|
||||||
handle::Handle::notice_message(
|
|
||||||
"config_validate::file_not_found",
|
|
||||||
format!("{}", path.display()),
|
|
||||||
);
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
let file_read_result =
|
|
||||||
time::timeout(Duration::from_secs(5), tokio_fs::read_to_string(&path)).await;
|
|
||||||
|
|
||||||
match file_read_result {
|
|
||||||
Ok(Ok(content)) => {
|
|
||||||
let yaml_parse_result = AsyncHandler::spawn_blocking(move || {
|
|
||||||
serde_yaml::from_str::<serde_yaml::Value>(&content)
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match yaml_parse_result {
|
|
||||||
Ok(Ok(_)) => {
|
|
||||||
logging!(info, Type::Cmd, "Target profile YAML syntax is valid");
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
||||||
Ok(Err(err)) => {
|
|
||||||
let error_msg = format!(" {err}");
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Target profile contains YAML syntax errors: {}",
|
|
||||||
error_msg
|
|
||||||
);
|
|
||||||
handle::Handle::notice_message(
|
|
||||||
"config_validate::yaml_syntax_error",
|
|
||||||
error_msg.clone(),
|
|
||||||
);
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
Err(join_err) => {
|
|
||||||
let error_msg = format!("YAML parsing task failed: {join_err}");
|
|
||||||
logging!(error, Type::Cmd, "{}", error_msg);
|
|
||||||
handle::Handle::notice_message(
|
|
||||||
"config_validate::yaml_parse_error",
|
|
||||||
error_msg.clone(),
|
|
||||||
);
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Err(err)) => {
|
|
||||||
let error_msg = format!("Failed to read target profile file: {err}");
|
|
||||||
logging!(error, Type::Cmd, "{}", error_msg);
|
|
||||||
handle::Handle::notice_message("config_validate::file_read_error", error_msg.clone());
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
let error_msg = "Timed out reading profile file (5s)".to_string();
|
|
||||||
logging!(error, Type::Cmd, "{}", error_msg);
|
|
||||||
handle::Handle::notice_message("config_validate::file_read_timeout", error_msg.clone());
|
|
||||||
Err(error_msg.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Best-effort rollback invoked when a switch fails midway through the pipeline.
|
|
||||||
pub(super) async fn restore_previous_profile(previous: Option<SmartString>) -> CmdResult<()> {
|
|
||||||
if let Some(prev_profile) = previous {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Attempting to restore previous configuration: {}",
|
|
||||||
prev_profile
|
|
||||||
);
|
|
||||||
let restore_profiles = IProfiles {
|
|
||||||
current: Some(prev_profile),
|
|
||||||
items: None,
|
|
||||||
};
|
|
||||||
Config::profiles()
|
|
||||||
.await
|
|
||||||
.draft_mut()
|
|
||||||
.patch_config(restore_profiles)
|
|
||||||
.stringify_err()?;
|
|
||||||
if time::timeout(CONFIG_APPLY_TIMEOUT, async {
|
|
||||||
Config::profiles().await.apply();
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Restoring previous configuration timed out after {:?}",
|
|
||||||
CONFIG_APPLY_TIMEOUT
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
AsyncHandler::spawn(|| async move {
|
|
||||||
let save_future = AsyncHandler::spawn_blocking(|| {
|
|
||||||
futures::executor::block_on(async { profiles_save_file_safe().await })
|
|
||||||
});
|
|
||||||
match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await {
|
|
||||||
Ok(join_res) => match join_res {
|
|
||||||
Ok(Ok(())) => {}
|
|
||||||
Ok(Err(err)) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Failed to persist restored configuration asynchronously: {}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(join_err) => {
|
|
||||||
logging!(warn, Type::Cmd, "Blocking save task failed: {}", join_err);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(_) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Persisting restored configuration timed out after {:?}",
|
|
||||||
SAVE_PROFILES_TIMEOUT
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn describe_panic_payload(payload: &(dyn Any + Send)) -> String {
|
|
||||||
if let Some(message) = payload.downcast_ref::<&str>() {
|
|
||||||
(*message).to_string()
|
|
||||||
} else if let Some(message) = payload.downcast_ref::<std::string::String>() {
|
|
||||||
message.clone()
|
|
||||||
} else {
|
|
||||||
"unknown panic".into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
use super::super::state::SWITCH_CLEANUP_TIMEOUT;
|
|
||||||
use crate::{core::handle, logging, process::AsyncHandler, utils::logging::Type};
|
|
||||||
use smartstring::alias::String as SmartString;
|
|
||||||
use tokio::time;
|
|
||||||
|
|
||||||
pub(crate) type CleanupHandle = tauri::async_runtime::JoinHandle<()>;
|
|
||||||
|
|
||||||
pub(crate) fn schedule_post_switch_success(
|
|
||||||
profile_id: SmartString,
|
|
||||||
success: bool,
|
|
||||||
notify: bool,
|
|
||||||
task_id: u64,
|
|
||||||
) -> CleanupHandle {
|
|
||||||
// Post-success cleanup runs detached from the driver so the queue keeps moving.
|
|
||||||
AsyncHandler::spawn(move || async move {
|
|
||||||
handle::Handle::notify_profile_switch_finished(
|
|
||||||
profile_id.clone(),
|
|
||||||
success,
|
|
||||||
notify,
|
|
||||||
task_id,
|
|
||||||
);
|
|
||||||
if success {
|
|
||||||
close_connections_after_switch(profile_id).await;
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn schedule_post_switch_failure(
|
|
||||||
profile_id: SmartString,
|
|
||||||
notify: bool,
|
|
||||||
task_id: u64,
|
|
||||||
) -> CleanupHandle {
|
|
||||||
// Failures or cancellations do not alter the active profile, so skip draining live connections.
|
|
||||||
AsyncHandler::spawn(move || async move {
|
|
||||||
handle::Handle::notify_profile_switch_finished(profile_id.clone(), false, notify, task_id);
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn close_connections_after_switch(profile_id: SmartString) {
|
|
||||||
match time::timeout(SWITCH_CLEANUP_TIMEOUT, async {
|
|
||||||
handle::Handle::mihomo().await.close_all_connections().await
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(Ok(())) => {}
|
|
||||||
Ok(Err(err)) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Failed to close connections after profile switch ({}): {}",
|
|
||||||
profile_id,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Closing connections after profile switch ({}) timed out after {:?}",
|
|
||||||
profile_id,
|
|
||||||
SWITCH_CLEANUP_TIMEOUT
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,178 +0,0 @@
|
|||||||
use super::{CmdResult, core::SwitchStage};
|
|
||||||
use crate::{
|
|
||||||
cmd::profile_switch::state::{
|
|
||||||
SwitchCancellation, SwitchHeartbeat, SwitchManager, SwitchRequest, SwitchScope,
|
|
||||||
},
|
|
||||||
config::IProfiles,
|
|
||||||
logging,
|
|
||||||
utils::logging::Type,
|
|
||||||
};
|
|
||||||
use smartstring::alias::String as SmartString;
|
|
||||||
use tokio::sync::MutexGuard;
|
|
||||||
|
|
||||||
pub(super) struct SwitchContext {
|
|
||||||
pub(super) manager: &'static SwitchManager,
|
|
||||||
pub(super) request: Option<SwitchRequest>,
|
|
||||||
pub(super) profiles_patch: Option<IProfiles>,
|
|
||||||
pub(super) sequence: Option<u64>,
|
|
||||||
pub(super) target_profile: Option<SmartString>,
|
|
||||||
pub(super) previous_profile: Option<SmartString>,
|
|
||||||
pub(super) new_profile_for_event: Option<SmartString>,
|
|
||||||
pub(super) switch_scope: Option<SwitchScope<'static>>,
|
|
||||||
pub(super) core_guard: Option<MutexGuard<'static, ()>>,
|
|
||||||
pub(super) heartbeat: SwitchHeartbeat,
|
|
||||||
pub(super) task_id: Option<u64>,
|
|
||||||
pub(super) profile_label: SmartString,
|
|
||||||
pub(super) active_stage: SwitchStage,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchContext {
|
|
||||||
// Captures all mutable data required across states (locks, profile ids, etc).
|
|
||||||
pub(super) fn new(
|
|
||||||
manager: &'static SwitchManager,
|
|
||||||
request: Option<SwitchRequest>,
|
|
||||||
profiles: IProfiles,
|
|
||||||
heartbeat: SwitchHeartbeat,
|
|
||||||
) -> Self {
|
|
||||||
let task_id = request.as_ref().map(|req| req.task_id());
|
|
||||||
let profile_label = request
|
|
||||||
.as_ref()
|
|
||||||
.map(|req| req.profile_id().clone())
|
|
||||||
.or_else(|| profiles.current.clone())
|
|
||||||
.unwrap_or_else(|| SmartString::from("unknown"));
|
|
||||||
heartbeat.touch();
|
|
||||||
Self {
|
|
||||||
manager,
|
|
||||||
request,
|
|
||||||
profiles_patch: Some(profiles),
|
|
||||||
sequence: None,
|
|
||||||
target_profile: None,
|
|
||||||
previous_profile: None,
|
|
||||||
new_profile_for_event: None,
|
|
||||||
switch_scope: None,
|
|
||||||
core_guard: None,
|
|
||||||
heartbeat,
|
|
||||||
task_id,
|
|
||||||
profile_label,
|
|
||||||
active_stage: SwitchStage::Start,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn ensure_target_profile(&mut self) {
|
|
||||||
// Lazily determine which profile we're switching to so shared paths (patch vs. driver) behave the same.
|
|
||||||
if let Some(patch) = self.profiles_patch.as_mut() {
|
|
||||||
if patch.current.is_none()
|
|
||||||
&& let Some(request) = self.request.as_ref()
|
|
||||||
{
|
|
||||||
patch.current = Some(request.profile_id().clone());
|
|
||||||
}
|
|
||||||
self.target_profile = patch.current.clone();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn take_profiles_patch(&mut self) -> CmdResult<IProfiles> {
|
|
||||||
self.profiles_patch
|
|
||||||
.take()
|
|
||||||
.ok_or_else(|| "profiles patch already consumed".into())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn cancel_token(&self) -> Option<SwitchCancellation> {
|
|
||||||
self.request.as_ref().map(|req| req.cancel_token().clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn cancelled(&self) -> bool {
|
|
||||||
self.request
|
|
||||||
.as_ref()
|
|
||||||
.map(|req| req.cancel_token().is_cancelled())
|
|
||||||
.unwrap_or(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn log_cancelled(&self, stage: &str) {
|
|
||||||
if let Some(request) = self.request.as_ref() {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Switch task {} cancelled {}; profile={}",
|
|
||||||
request.task_id(),
|
|
||||||
stage,
|
|
||||||
request.profile_id()
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
logging!(info, Type::Cmd, "Profile switch cancelled {}", stage);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn should_validate_target(&self) -> bool {
|
|
||||||
match (&self.target_profile, &self.previous_profile) {
|
|
||||||
(Some(target), Some(current)) => current != target,
|
|
||||||
(Some(_), None) => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn stale(&self) -> bool {
|
|
||||||
self.sequence
|
|
||||||
.map(|seq| seq < self.manager.latest_request_sequence())
|
|
||||||
.unwrap_or(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn sequence(&self) -> u64 {
|
|
||||||
self.sequence.unwrap_or_else(|| {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Sequence unexpectedly missing in switch context; defaulting to 0"
|
|
||||||
);
|
|
||||||
0
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn record_stage(&mut self, stage: SwitchStage) {
|
|
||||||
let since_last = self.heartbeat.elapsed();
|
|
||||||
let previous = self.active_stage;
|
|
||||||
self.active_stage = stage;
|
|
||||||
self.heartbeat.set_stage(stage.as_code());
|
|
||||||
|
|
||||||
match self.task_id {
|
|
||||||
Some(task_id) => logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"Switch task {} (profile={}) transitioned {:?} -> {:?} after {:?}",
|
|
||||||
task_id,
|
|
||||||
self.profile_label,
|
|
||||||
previous,
|
|
||||||
stage,
|
|
||||||
since_last
|
|
||||||
),
|
|
||||||
None => logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"Profile patch {} transitioned {:?} -> {:?} after {:?}",
|
|
||||||
self.profile_label,
|
|
||||||
previous,
|
|
||||||
stage,
|
|
||||||
since_last
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn release_core_guard(&mut self) {
|
|
||||||
self.core_guard = None;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn release_switch_scope(&mut self) {
|
|
||||||
self.switch_scope = None;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn release_locks(&mut self) {
|
|
||||||
self.release_core_guard();
|
|
||||||
self.release_switch_scope();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for SwitchContext {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.core_guard.take();
|
|
||||||
self.switch_scope.take();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,284 +0,0 @@
|
|||||||
use super::{CmdResult, context::SwitchContext, describe_panic_payload};
|
|
||||||
use crate::{
|
|
||||||
cmd::profile_switch::state::{SwitchHeartbeat, SwitchManager, SwitchRequest},
|
|
||||||
config::IProfiles,
|
|
||||||
logging,
|
|
||||||
utils::logging::Type,
|
|
||||||
};
|
|
||||||
use futures::FutureExt;
|
|
||||||
use std::{
|
|
||||||
mem,
|
|
||||||
panic::AssertUnwindSafe,
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
pub(crate) const CONFIG_APPLY_TIMEOUT: Duration = Duration::from_secs(5);
|
|
||||||
pub(crate) const TRAY_UPDATE_TIMEOUT: Duration = Duration::from_secs(3);
|
|
||||||
pub(crate) const REFRESH_TIMEOUT: Duration = Duration::from_secs(3);
|
|
||||||
pub(crate) const SAVE_PROFILES_TIMEOUT: Duration = Duration::from_secs(5);
|
|
||||||
pub(crate) const SWITCH_IDLE_WAIT_TIMEOUT: Duration = Duration::from_secs(30);
|
|
||||||
pub(crate) const SWITCH_IDLE_WAIT_POLL: Duration = Duration::from_millis(25);
|
|
||||||
pub(crate) const SWITCH_IDLE_WAIT_MAX_BACKOFF: Duration = Duration::from_millis(250);
|
|
||||||
|
|
||||||
/// Explicit state machine for profile switching so we can reason about
|
|
||||||
/// cancellation, stale requests, and side effects at each stage.
|
|
||||||
pub(crate) struct SwitchStateMachine {
|
|
||||||
pub(super) ctx: SwitchContext,
|
|
||||||
state: SwitchState,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub(crate) enum SwitchStage {
|
|
||||||
Start,
|
|
||||||
AcquireCore,
|
|
||||||
Prepare,
|
|
||||||
ValidateTarget,
|
|
||||||
PatchDraft,
|
|
||||||
UpdateCore,
|
|
||||||
Finalize,
|
|
||||||
Workflow,
|
|
||||||
DriverTask,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchStage {
|
|
||||||
pub(crate) fn as_code(self) -> u32 {
|
|
||||||
match self {
|
|
||||||
SwitchStage::Start => 0,
|
|
||||||
SwitchStage::AcquireCore => 1,
|
|
||||||
SwitchStage::Prepare => 2,
|
|
||||||
SwitchStage::ValidateTarget => 3,
|
|
||||||
SwitchStage::PatchDraft => 4,
|
|
||||||
SwitchStage::UpdateCore => 5,
|
|
||||||
SwitchStage::Finalize => 6,
|
|
||||||
SwitchStage::Workflow => 7,
|
|
||||||
SwitchStage::DriverTask => 8,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn from_code(code: u32) -> Option<Self> {
|
|
||||||
Some(match code {
|
|
||||||
0 => SwitchStage::Start,
|
|
||||||
1 => SwitchStage::AcquireCore,
|
|
||||||
2 => SwitchStage::Prepare,
|
|
||||||
3 => SwitchStage::ValidateTarget,
|
|
||||||
4 => SwitchStage::PatchDraft,
|
|
||||||
5 => SwitchStage::UpdateCore,
|
|
||||||
6 => SwitchStage::Finalize,
|
|
||||||
7 => SwitchStage::Workflow,
|
|
||||||
8 => SwitchStage::DriverTask,
|
|
||||||
_ => return None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub(crate) struct SwitchPanicInfo {
|
|
||||||
pub(crate) stage: SwitchStage,
|
|
||||||
pub(crate) detail: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchPanicInfo {
|
|
||||||
pub(crate) fn new(stage: SwitchStage, detail: String) -> Self {
|
|
||||||
Self { stage, detail }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn workflow_root(detail: String) -> Self {
|
|
||||||
Self::new(SwitchStage::Workflow, detail)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn driver_task(detail: String) -> Self {
|
|
||||||
Self::new(SwitchStage::DriverTask, detail)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// High-level state machine nodes executed in strict sequence.
|
|
||||||
pub(crate) enum SwitchState {
|
|
||||||
Start,
|
|
||||||
AcquireCore,
|
|
||||||
Prepare,
|
|
||||||
ValidateTarget,
|
|
||||||
PatchDraft,
|
|
||||||
UpdateCore,
|
|
||||||
Finalize(CoreUpdateOutcome),
|
|
||||||
Complete(bool),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Result of trying to apply the draft configuration to the core.
|
|
||||||
pub(crate) enum CoreUpdateOutcome {
|
|
||||||
Success,
|
|
||||||
ValidationFailed { message: String },
|
|
||||||
CoreError { message: String },
|
|
||||||
Timeout,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Indicates where a stale request was detected so logs stay descriptive.
|
|
||||||
pub(crate) enum StaleStage {
|
|
||||||
AfterLock,
|
|
||||||
BeforeCoreOperation,
|
|
||||||
BeforeCoreInteraction,
|
|
||||||
AfterCoreOperation,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StaleStage {
|
|
||||||
pub(super) fn log(&self, ctx: &SwitchContext) {
|
|
||||||
let sequence = ctx.sequence();
|
|
||||||
let latest = ctx.manager.latest_request_sequence();
|
|
||||||
match self {
|
|
||||||
StaleStage::AfterLock => logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Detected a newer request after acquiring the lock (sequence: {} < {}), abandoning current request",
|
|
||||||
sequence,
|
|
||||||
latest
|
|
||||||
),
|
|
||||||
StaleStage::BeforeCoreOperation => logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Detected a newer request before core operation (sequence: {} < {}), abandoning current request",
|
|
||||||
sequence,
|
|
||||||
latest
|
|
||||||
),
|
|
||||||
StaleStage::BeforeCoreInteraction => logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Detected a newer request before core interaction (sequence: {} < {}), abandoning current request",
|
|
||||||
sequence,
|
|
||||||
latest
|
|
||||||
),
|
|
||||||
StaleStage::AfterCoreOperation => logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Detected a newer request after core operation (sequence: {} < {}), ignoring current result",
|
|
||||||
sequence,
|
|
||||||
latest
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SwitchStateMachine {
|
|
||||||
pub(crate) fn new(
|
|
||||||
manager: &'static SwitchManager,
|
|
||||||
request: Option<SwitchRequest>,
|
|
||||||
profiles: IProfiles,
|
|
||||||
) -> Self {
|
|
||||||
let heartbeat = request
|
|
||||||
.as_ref()
|
|
||||||
.map(|req| req.heartbeat().clone())
|
|
||||||
.unwrap_or_else(SwitchHeartbeat::new);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
ctx: SwitchContext::new(manager, request, profiles, heartbeat),
|
|
||||||
state: SwitchState::Start,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn run(mut self) -> Result<CmdResult<bool>, SwitchPanicInfo> {
|
|
||||||
// Drive the state machine until we either complete successfully or bubble up a panic.
|
|
||||||
loop {
|
|
||||||
let current_state = mem::replace(&mut self.state, SwitchState::Complete(false));
|
|
||||||
match current_state {
|
|
||||||
SwitchState::Complete(result) => return Ok(Ok(result)),
|
|
||||||
_ => match self.run_state(current_state).await? {
|
|
||||||
Ok(state) => self.state = state,
|
|
||||||
Err(err) => return Ok(Err(err)),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run_state(
|
|
||||||
&mut self,
|
|
||||||
current: SwitchState,
|
|
||||||
) -> Result<CmdResult<SwitchState>, SwitchPanicInfo> {
|
|
||||||
match current {
|
|
||||||
SwitchState::Start => {
|
|
||||||
self.with_stage(
|
|
||||||
SwitchStage::Start,
|
|
||||||
|this| async move { this.handle_start() },
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
SwitchState::AcquireCore => {
|
|
||||||
self.with_stage(SwitchStage::AcquireCore, |this| async move {
|
|
||||||
this.handle_acquire_core().await
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
SwitchState::Prepare => {
|
|
||||||
self.with_stage(SwitchStage::Prepare, |this| async move {
|
|
||||||
this.handle_prepare().await
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
SwitchState::ValidateTarget => {
|
|
||||||
self.with_stage(SwitchStage::ValidateTarget, |this| async move {
|
|
||||||
this.handle_validate_target().await
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
SwitchState::PatchDraft => {
|
|
||||||
self.with_stage(SwitchStage::PatchDraft, |this| async move {
|
|
||||||
this.handle_patch_draft().await
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
SwitchState::UpdateCore => {
|
|
||||||
self.with_stage(SwitchStage::UpdateCore, |this| async move {
|
|
||||||
this.handle_update_core().await
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
SwitchState::Finalize(outcome) => {
|
|
||||||
self.with_stage(SwitchStage::Finalize, |this| async move {
|
|
||||||
this.handle_finalize(outcome).await
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
SwitchState::Complete(result) => Ok(Ok(SwitchState::Complete(result))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper that wraps each stage with consistent logging and panic reporting.
|
|
||||||
async fn with_stage<'a, F, Fut>(
|
|
||||||
&'a mut self,
|
|
||||||
stage: SwitchStage,
|
|
||||||
f: F,
|
|
||||||
) -> Result<CmdResult<SwitchState>, SwitchPanicInfo>
|
|
||||||
where
|
|
||||||
F: FnOnce(&'a mut Self) -> Fut,
|
|
||||||
Fut: std::future::Future<Output = CmdResult<SwitchState>> + 'a,
|
|
||||||
{
|
|
||||||
let sequence = self.ctx.sequence();
|
|
||||||
let task = self.ctx.task_id;
|
|
||||||
let profile = self.ctx.profile_label.clone();
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Enter {:?} (sequence={}, task={:?}, profile={})",
|
|
||||||
stage,
|
|
||||||
sequence,
|
|
||||||
task,
|
|
||||||
profile
|
|
||||||
);
|
|
||||||
let stage_start = Instant::now();
|
|
||||||
self.ctx.record_stage(stage);
|
|
||||||
AssertUnwindSafe(f(self))
|
|
||||||
.catch_unwind()
|
|
||||||
.await
|
|
||||||
.map_err(|payload| {
|
|
||||||
SwitchPanicInfo::new(stage, describe_panic_payload(payload.as_ref()))
|
|
||||||
})
|
|
||||||
.inspect(|_| {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Exit {:?} (sequence={}, task={:?}, profile={}, elapsed={}ms)",
|
|
||||||
stage,
|
|
||||||
sequence,
|
|
||||||
task,
|
|
||||||
profile,
|
|
||||||
stage_start.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
mod context;
|
|
||||||
mod core;
|
|
||||||
mod stages;
|
|
||||||
|
|
||||||
pub(crate) use core::{
|
|
||||||
CONFIG_APPLY_TIMEOUT, SAVE_PROFILES_TIMEOUT, SwitchPanicInfo, SwitchStage, SwitchStateMachine,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(super) use super::{
|
|
||||||
CmdResult, describe_panic_payload, restore_previous_profile, validate_profile_yaml,
|
|
||||||
};
|
|
||||||
@@ -1,597 +0,0 @@
|
|||||||
use super::{
|
|
||||||
CmdResult,
|
|
||||||
core::{
|
|
||||||
CONFIG_APPLY_TIMEOUT, CoreUpdateOutcome, REFRESH_TIMEOUT, SAVE_PROFILES_TIMEOUT,
|
|
||||||
SWITCH_IDLE_WAIT_MAX_BACKOFF, SWITCH_IDLE_WAIT_POLL, SWITCH_IDLE_WAIT_TIMEOUT, StaleStage,
|
|
||||||
SwitchState, SwitchStateMachine, TRAY_UPDATE_TIMEOUT,
|
|
||||||
},
|
|
||||||
restore_previous_profile, validate_profile_yaml,
|
|
||||||
};
|
|
||||||
use crate::{
|
|
||||||
config::{Config, profiles::profiles_save_file_safe},
|
|
||||||
core::{CoreManager, handle, tray::Tray},
|
|
||||||
logging,
|
|
||||||
process::AsyncHandler,
|
|
||||||
utils::logging::Type,
|
|
||||||
};
|
|
||||||
use anyhow::Error;
|
|
||||||
use futures::future;
|
|
||||||
use smartstring::alias::String as SmartString;
|
|
||||||
use std::{
|
|
||||||
pin::Pin,
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
use tokio::time;
|
|
||||||
|
|
||||||
impl SwitchStateMachine {
|
|
||||||
pub(super) fn handle_start(&mut self) -> CmdResult<SwitchState> {
|
|
||||||
if self.ctx.manager.is_switching() {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Profile switch already in progress; queuing request for task={:?}, profile={}",
|
|
||||||
self.ctx.task_id,
|
|
||||||
self.ctx.profile_label
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Ok(SwitchState::AcquireCore)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Grab the core lock, mark the manager as switching, and compute the target profile.
|
|
||||||
pub(super) async fn handle_acquire_core(&mut self) -> CmdResult<SwitchState> {
|
|
||||||
let manager = self.ctx.manager;
|
|
||||||
let core_guard = manager.core_mutex().lock().await;
|
|
||||||
|
|
||||||
if manager.is_switching() {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Active profile switch detected; waiting before acquiring scope"
|
|
||||||
);
|
|
||||||
let wait_start = Instant::now();
|
|
||||||
let mut backoff = SWITCH_IDLE_WAIT_POLL;
|
|
||||||
while manager.is_switching() {
|
|
||||||
if self.ctx.cancelled() {
|
|
||||||
self.ctx
|
|
||||||
.log_cancelled("while waiting for active switch to finish");
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
if wait_start.elapsed() >= SWITCH_IDLE_WAIT_TIMEOUT {
|
|
||||||
let message = format!(
|
|
||||||
"Timed out after {:?} waiting for active profile switch to finish",
|
|
||||||
SWITCH_IDLE_WAIT_TIMEOUT
|
|
||||||
);
|
|
||||||
logging!(error, Type::Cmd, "{}", message);
|
|
||||||
return Err(message.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
time::sleep(backoff).await;
|
|
||||||
backoff = backoff.saturating_mul(2).min(SWITCH_IDLE_WAIT_MAX_BACKOFF);
|
|
||||||
}
|
|
||||||
let waited = wait_start.elapsed().as_millis();
|
|
||||||
if waited > 0 {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Waited {}ms for active switch to finish before acquiring scope",
|
|
||||||
waited
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.ctx.core_guard = Some(core_guard);
|
|
||||||
self.ctx.switch_scope = Some(manager.begin_switch());
|
|
||||||
self.ctx.sequence = Some(manager.next_request_sequence());
|
|
||||||
self.ctx.ensure_target_profile();
|
|
||||||
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Begin modifying configuration; sequence: {}, target profile: {:?}",
|
|
||||||
self.ctx.sequence(),
|
|
||||||
self.ctx.target_profile
|
|
||||||
);
|
|
||||||
|
|
||||||
if self.ctx.cancelled() {
|
|
||||||
self.ctx.log_cancelled("after acquiring core lock");
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.ctx.stale() {
|
|
||||||
StaleStage::AfterLock.log(&self.ctx);
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(SwitchState::Prepare)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn handle_prepare(&mut self) -> CmdResult<SwitchState> {
|
|
||||||
let current_profile = {
|
|
||||||
let profiles_guard = Config::profiles().await;
|
|
||||||
profiles_guard.latest_ref().current.clone()
|
|
||||||
};
|
|
||||||
|
|
||||||
logging!(info, Type::Cmd, "Current profile: {:?}", current_profile);
|
|
||||||
self.ctx.previous_profile = current_profile;
|
|
||||||
Ok(SwitchState::ValidateTarget)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn handle_validate_target(&mut self) -> CmdResult<SwitchState> {
|
|
||||||
if self.ctx.cancelled() {
|
|
||||||
self.ctx.log_cancelled("before validation");
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.ctx.should_validate_target() {
|
|
||||||
let Some(target) = self.ctx.target_profile.clone() else {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Missing target profile while validation was requested; aborting switch"
|
|
||||||
);
|
|
||||||
return Err("missing target profile at validation".into());
|
|
||||||
};
|
|
||||||
if !validate_profile_yaml(&target).await? {
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.ctx.stale() {
|
|
||||||
StaleStage::BeforeCoreOperation.log(&self.ctx);
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(SwitchState::PatchDraft)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn handle_patch_draft(&mut self) -> CmdResult<SwitchState> {
|
|
||||||
if self.ctx.cancelled() {
|
|
||||||
self.ctx.log_cancelled("before patching configuration");
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Updating configuration draft, sequence: {}",
|
|
||||||
self.ctx.sequence()
|
|
||||||
);
|
|
||||||
|
|
||||||
let patch = self.ctx.take_profiles_patch()?;
|
|
||||||
self.ctx.new_profile_for_event = patch.current.clone();
|
|
||||||
let _ = Config::profiles().await.draft_mut().patch_config(patch);
|
|
||||||
|
|
||||||
if self.ctx.stale() {
|
|
||||||
StaleStage::BeforeCoreInteraction.log(&self.ctx);
|
|
||||||
Config::profiles().await.discard();
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(SwitchState::UpdateCore)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn handle_update_core(&mut self) -> CmdResult<SwitchState> {
|
|
||||||
let sequence = self.ctx.sequence();
|
|
||||||
let task_id = self.ctx.task_id;
|
|
||||||
let profile = self.ctx.profile_label.clone();
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Starting core configuration update, sequence: {}, task={:?}, profile={}",
|
|
||||||
sequence,
|
|
||||||
task_id,
|
|
||||||
profile
|
|
||||||
);
|
|
||||||
|
|
||||||
let heartbeat = self.ctx.heartbeat.clone();
|
|
||||||
let start = Instant::now();
|
|
||||||
let mut ticker = time::interval(Duration::from_secs(1));
|
|
||||||
ticker.set_missed_tick_behavior(time::MissedTickBehavior::Delay);
|
|
||||||
|
|
||||||
let update_future = CoreManager::global().update_config();
|
|
||||||
tokio::pin!(update_future);
|
|
||||||
|
|
||||||
let timeout = time::sleep(Duration::from_secs(30));
|
|
||||||
tokio::pin!(timeout);
|
|
||||||
|
|
||||||
let cancel_token = self.ctx.cancel_token();
|
|
||||||
let mut cancel_notifier: Pin<Box<dyn std::future::Future<Output = ()> + Send>> =
|
|
||||||
match cancel_token {
|
|
||||||
Some(token) => Box::pin(async move {
|
|
||||||
token.cancelled_future().await;
|
|
||||||
}),
|
|
||||||
None => Box::pin(future::pending()),
|
|
||||||
};
|
|
||||||
|
|
||||||
enum UpdateOutcome {
|
|
||||||
Finished(Result<(bool, SmartString), Error>),
|
|
||||||
Timeout,
|
|
||||||
Cancelled,
|
|
||||||
}
|
|
||||||
|
|
||||||
let update_outcome = loop {
|
|
||||||
tokio::select! {
|
|
||||||
res = &mut update_future => break UpdateOutcome::Finished(res),
|
|
||||||
_ = &mut timeout => break UpdateOutcome::Timeout,
|
|
||||||
_ = &mut cancel_notifier => break UpdateOutcome::Cancelled,
|
|
||||||
_ = ticker.tick() => {
|
|
||||||
let elapsed_ms = start.elapsed().as_millis();
|
|
||||||
heartbeat.touch();
|
|
||||||
match task_id {
|
|
||||||
Some(id) => logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"Switch task {} (profile={}) UpdateCore still running (elapsed={}ms)",
|
|
||||||
id,
|
|
||||||
profile,
|
|
||||||
elapsed_ms
|
|
||||||
),
|
|
||||||
None => logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"Profile patch {} UpdateCore still running (elapsed={}ms)",
|
|
||||||
profile,
|
|
||||||
elapsed_ms
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let elapsed_ms = start.elapsed().as_millis();
|
|
||||||
|
|
||||||
let outcome = match update_outcome {
|
|
||||||
UpdateOutcome::Finished(Ok((true, _))) => {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Core configuration update succeeded in {}ms",
|
|
||||||
elapsed_ms
|
|
||||||
);
|
|
||||||
CoreUpdateOutcome::Success
|
|
||||||
}
|
|
||||||
UpdateOutcome::Finished(Ok((false, msg))) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Core configuration update validation failed in {}ms: {}",
|
|
||||||
elapsed_ms,
|
|
||||||
msg
|
|
||||||
);
|
|
||||||
CoreUpdateOutcome::ValidationFailed {
|
|
||||||
message: msg.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
UpdateOutcome::Finished(Err(err)) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Core configuration update errored in {}ms: {}",
|
|
||||||
elapsed_ms,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
CoreUpdateOutcome::CoreError {
|
|
||||||
message: err.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
UpdateOutcome::Timeout => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Core configuration update timed out after {}ms",
|
|
||||||
elapsed_ms
|
|
||||||
);
|
|
||||||
CoreUpdateOutcome::Timeout
|
|
||||||
}
|
|
||||||
UpdateOutcome::Cancelled => {
|
|
||||||
self.ctx.log_cancelled("during core update");
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Core configuration update cancelled after {}ms",
|
|
||||||
elapsed_ms
|
|
||||||
);
|
|
||||||
self.ctx.release_locks();
|
|
||||||
Config::profiles().await.discard();
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
self.ctx.release_core_guard();
|
|
||||||
|
|
||||||
Ok(SwitchState::Finalize(outcome))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn handle_finalize(
|
|
||||||
&mut self,
|
|
||||||
outcome: CoreUpdateOutcome,
|
|
||||||
) -> CmdResult<SwitchState> {
|
|
||||||
let next_state = match outcome {
|
|
||||||
CoreUpdateOutcome::Success => self.finalize_success().await,
|
|
||||||
CoreUpdateOutcome::ValidationFailed { message } => {
|
|
||||||
self.finalize_validation_failed(message).await
|
|
||||||
}
|
|
||||||
CoreUpdateOutcome::CoreError { message } => self.finalize_core_error(message).await,
|
|
||||||
CoreUpdateOutcome::Timeout => self.finalize_timeout().await,
|
|
||||||
};
|
|
||||||
|
|
||||||
if next_state.is_err() || matches!(next_state, Ok(SwitchState::Complete(_))) {
|
|
||||||
self.ctx.release_switch_scope();
|
|
||||||
}
|
|
||||||
|
|
||||||
next_state
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn finalize_success(&mut self) -> CmdResult<SwitchState> {
|
|
||||||
if self.abort_if_stale_post_core().await? {
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.log_successful_update();
|
|
||||||
|
|
||||||
if !self.apply_config_with_timeout().await? {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Apply step failed; attempting to restore previous profile before completing"
|
|
||||||
);
|
|
||||||
restore_previous_profile(self.ctx.previous_profile.clone()).await?;
|
|
||||||
return Ok(SwitchState::Complete(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.refresh_clash_with_timeout().await;
|
|
||||||
self.update_tray_tooltip_with_timeout().await;
|
|
||||||
self.update_tray_menu_with_timeout().await;
|
|
||||||
if let Err(err) = self.persist_profiles_with_timeout().await {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"Persisting new profile configuration failed; attempting to restore previous profile: {}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
restore_previous_profile(self.ctx.previous_profile.clone()).await?;
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
self.emit_profile_change_event();
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"Finalize success pipeline completed for sequence {}",
|
|
||||||
self.ctx.sequence()
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(SwitchState::Complete(true))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn finalize_validation_failed(
|
|
||||||
&mut self,
|
|
||||||
message: String,
|
|
||||||
) -> CmdResult<SwitchState> {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Configuration validation failed: {}",
|
|
||||||
message
|
|
||||||
);
|
|
||||||
Config::profiles().await.discard();
|
|
||||||
restore_previous_profile(self.ctx.previous_profile.clone()).await?;
|
|
||||||
handle::Handle::notice_message("config_validate::error", message);
|
|
||||||
Ok(SwitchState::Complete(false))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn finalize_core_error(&mut self, message: String) -> CmdResult<SwitchState> {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Error occurred during update: {}, sequence: {}",
|
|
||||||
message,
|
|
||||||
self.ctx.sequence()
|
|
||||||
);
|
|
||||||
Config::profiles().await.discard();
|
|
||||||
handle::Handle::notice_message("config_validate::boot_error", message);
|
|
||||||
Ok(SwitchState::Complete(false))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn finalize_timeout(&mut self) -> CmdResult<SwitchState> {
|
|
||||||
let timeout_msg =
|
|
||||||
"Configuration update timed out (30s); possible validation or core communication stall";
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Cmd,
|
|
||||||
"{}, sequence: {}",
|
|
||||||
timeout_msg,
|
|
||||||
self.ctx.sequence()
|
|
||||||
);
|
|
||||||
Config::profiles().await.discard();
|
|
||||||
restore_previous_profile(self.ctx.previous_profile.clone()).await?;
|
|
||||||
handle::Handle::notice_message("config_validate::timeout", timeout_msg);
|
|
||||||
Ok(SwitchState::Complete(false))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn abort_if_stale_post_core(&mut self) -> CmdResult<bool> {
|
|
||||||
if self.ctx.stale() {
|
|
||||||
StaleStage::AfterCoreOperation.log(&self.ctx);
|
|
||||||
Config::profiles().await.discard();
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn log_successful_update(&self) {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Configuration update succeeded, sequence: {}",
|
|
||||||
self.ctx.sequence()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn apply_config_with_timeout(&mut self) -> CmdResult<bool> {
|
|
||||||
let apply_result = time::timeout(CONFIG_APPLY_TIMEOUT, async {
|
|
||||||
Config::profiles().await.apply()
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if apply_result.is_ok() {
|
|
||||||
Ok(true)
|
|
||||||
} else {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Applying profile configuration timed out after {:?}",
|
|
||||||
CONFIG_APPLY_TIMEOUT
|
|
||||||
);
|
|
||||||
Config::profiles().await.discard();
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn refresh_clash_with_timeout(&self) {
|
|
||||||
let start = Instant::now();
|
|
||||||
let result = time::timeout(REFRESH_TIMEOUT, async {
|
|
||||||
handle::Handle::refresh_clash();
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let elapsed = start.elapsed().as_millis();
|
|
||||||
match result {
|
|
||||||
Ok(_) => logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"refresh_clash_with_timeout completed in {}ms",
|
|
||||||
elapsed
|
|
||||||
),
|
|
||||||
Err(_) => logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Refreshing Clash state timed out after {:?} (elapsed={}ms)",
|
|
||||||
REFRESH_TIMEOUT,
|
|
||||||
elapsed
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn update_tray_tooltip_with_timeout(&self) {
|
|
||||||
let start = Instant::now();
|
|
||||||
let update_tooltip = time::timeout(TRAY_UPDATE_TIMEOUT, async {
|
|
||||||
Tray::global().update_tooltip().await
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
let elapsed = start.elapsed().as_millis();
|
|
||||||
|
|
||||||
if update_tooltip.is_err() {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Updating tray tooltip timed out after {:?} (elapsed={}ms)",
|
|
||||||
TRAY_UPDATE_TIMEOUT,
|
|
||||||
elapsed
|
|
||||||
);
|
|
||||||
} else if let Ok(Err(err)) = update_tooltip {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Failed to update tray tooltip asynchronously: {}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"update_tray_tooltip_with_timeout completed in {}ms",
|
|
||||||
elapsed
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn update_tray_menu_with_timeout(&self) {
|
|
||||||
let start = Instant::now();
|
|
||||||
let update_menu = time::timeout(TRAY_UPDATE_TIMEOUT, async {
|
|
||||||
Tray::global().update_menu().await
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
let elapsed = start.elapsed().as_millis();
|
|
||||||
|
|
||||||
if update_menu.is_err() {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Updating tray menu timed out after {:?} (elapsed={}ms)",
|
|
||||||
TRAY_UPDATE_TIMEOUT,
|
|
||||||
elapsed
|
|
||||||
);
|
|
||||||
} else if let Ok(Err(err)) = update_menu {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Cmd,
|
|
||||||
"Failed to update tray menu asynchronously: {}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"update_tray_menu_with_timeout completed in {}ms",
|
|
||||||
elapsed
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn persist_profiles_with_timeout(&self) -> CmdResult<()> {
|
|
||||||
let start = Instant::now();
|
|
||||||
let save_future = AsyncHandler::spawn_blocking(|| {
|
|
||||||
futures::executor::block_on(async { profiles_save_file_safe().await })
|
|
||||||
});
|
|
||||||
|
|
||||||
let elapsed = start.elapsed().as_millis();
|
|
||||||
match time::timeout(SAVE_PROFILES_TIMEOUT, save_future).await {
|
|
||||||
Err(_) => {
|
|
||||||
let message = format!(
|
|
||||||
"Persisting configuration file timed out after {:?} (elapsed={}ms)",
|
|
||||||
SAVE_PROFILES_TIMEOUT, elapsed
|
|
||||||
);
|
|
||||||
logging!(warn, Type::Cmd, "{}", message);
|
|
||||||
Err(message.into())
|
|
||||||
}
|
|
||||||
Ok(join_result) => match join_result {
|
|
||||||
Err(join_err) => {
|
|
||||||
let message = format!(
|
|
||||||
"Persisting configuration file failed: blocking task join error: {join_err}"
|
|
||||||
);
|
|
||||||
logging!(error, Type::Cmd, "{}", message);
|
|
||||||
Err(message.into())
|
|
||||||
}
|
|
||||||
Ok(save_result) => match save_result {
|
|
||||||
Ok(()) => {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Cmd,
|
|
||||||
"persist_profiles_with_timeout completed in {}ms",
|
|
||||||
elapsed
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
let message = format!("Persisting configuration file failed: {}", err);
|
|
||||||
logging!(error, Type::Cmd, "{}", message);
|
|
||||||
Err(message.into())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn emit_profile_change_event(&self) {
|
|
||||||
if let Some(current) = self.ctx.new_profile_for_event.clone() {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Cmd,
|
|
||||||
"Emitting configuration change event to frontend: {}, sequence: {}",
|
|
||||||
current,
|
|
||||||
self.ctx.sequence()
|
|
||||||
);
|
|
||||||
handle::Handle::notify_profile_changed(current);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,14 +1,7 @@
|
|||||||
use crate::{
|
use crate::{APP_HANDLE, constants::timing, singleton};
|
||||||
APP_HANDLE, config::Config, constants::timing, logging, singleton, utils::logging::Type,
|
|
||||||
};
|
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use serde_json::{Value, json};
|
|
||||||
use smartstring::alias::String;
|
use smartstring::alias::String;
|
||||||
use std::{
|
use std::{sync::Arc, thread};
|
||||||
sync::Arc,
|
|
||||||
thread,
|
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
|
||||||
};
|
|
||||||
use tauri::{AppHandle, Manager, WebviewWindow};
|
use tauri::{AppHandle, Manager, WebviewWindow};
|
||||||
use tauri_plugin_mihomo::{Mihomo, MihomoExt};
|
use tauri_plugin_mihomo::{Mihomo, MihomoExt};
|
||||||
use tokio::sync::RwLockReadGuard;
|
use tokio::sync::RwLockReadGuard;
|
||||||
@@ -73,14 +66,10 @@ impl Handle {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
let system_opt = handle.notification_system.read();
|
||||||
let system_opt = handle.notification_system.read();
|
if let Some(system) = system_opt.as_ref() {
|
||||||
if let Some(system) = system_opt.as_ref() {
|
system.send_event(FrontendEvent::RefreshClash);
|
||||||
system.send_event(FrontendEvent::RefreshClash);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Self::spawn_proxy_snapshot();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn refresh_verge() {
|
pub fn refresh_verge() {
|
||||||
@@ -96,37 +85,11 @@ impl Handle {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn notify_profile_changed(profile_id: String) {
|
pub fn notify_profile_changed(profile_id: String) {
|
||||||
let handle = Self::global();
|
Self::send_event(FrontendEvent::ProfileChanged {
|
||||||
if handle.is_exiting() {
|
current_profile_id: profile_id,
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let system_opt = handle.notification_system.read();
|
|
||||||
if let Some(system) = system_opt.as_ref() {
|
|
||||||
system.send_event(FrontendEvent::ProfileChanged {
|
|
||||||
current_profile_id: profile_id,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn notify_profile_switch_finished(
|
|
||||||
profile_id: String,
|
|
||||||
success: bool,
|
|
||||||
notify: bool,
|
|
||||||
task_id: u64,
|
|
||||||
) {
|
|
||||||
Self::send_event(FrontendEvent::ProfileSwitchFinished {
|
|
||||||
profile_id,
|
|
||||||
success,
|
|
||||||
notify,
|
|
||||||
task_id,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn notify_rust_panic(message: String, location: String) {
|
|
||||||
Self::send_event(FrontendEvent::RustPanic { message, location });
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn notify_timer_updated(profile_index: String) {
|
pub fn notify_timer_updated(profile_index: String) {
|
||||||
Self::send_event(FrontendEvent::TimerUpdated { profile_index });
|
Self::send_event(FrontendEvent::TimerUpdated { profile_index });
|
||||||
}
|
}
|
||||||
@@ -137,86 +100,6 @@ impl Handle {
|
|||||||
|
|
||||||
pub fn notify_profile_update_completed(uid: String) {
|
pub fn notify_profile_update_completed(uid: String) {
|
||||||
Self::send_event(FrontendEvent::ProfileUpdateCompleted { uid });
|
Self::send_event(FrontendEvent::ProfileUpdateCompleted { uid });
|
||||||
Self::spawn_proxy_snapshot();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn notify_proxies_updated(payload: Value) {
|
|
||||||
Self::send_event(FrontendEvent::ProxiesUpdated { payload });
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn build_proxy_snapshot() -> Option<Value> {
|
|
||||||
let mihomo_guard = Self::mihomo().await;
|
|
||||||
let proxies = match mihomo_guard.get_proxies().await {
|
|
||||||
Ok(data) => match serde_json::to_value(&data) {
|
|
||||||
Ok(value) => value,
|
|
||||||
Err(error) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Frontend,
|
|
||||||
"Failed to serialize proxies snapshot: {error}"
|
|
||||||
);
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(error) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Frontend,
|
|
||||||
"Failed to fetch proxies for snapshot: {error}"
|
|
||||||
);
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
drop(mihomo_guard);
|
|
||||||
|
|
||||||
let providers_guard = Self::mihomo().await;
|
|
||||||
let providers_value = match providers_guard.get_proxy_providers().await {
|
|
||||||
Ok(data) => serde_json::to_value(&data).unwrap_or_else(|error| {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Frontend,
|
|
||||||
"Failed to serialize proxy providers for snapshot: {error}"
|
|
||||||
);
|
|
||||||
Value::Null
|
|
||||||
}),
|
|
||||||
Err(error) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Frontend,
|
|
||||||
"Failed to fetch proxy providers for snapshot: {error}"
|
|
||||||
);
|
|
||||||
Value::Null
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
drop(providers_guard);
|
|
||||||
|
|
||||||
let profile_guard = Config::profiles().await;
|
|
||||||
let profile_id = profile_guard.latest_ref().current.clone();
|
|
||||||
drop(profile_guard);
|
|
||||||
|
|
||||||
let emitted_at = SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.map(|duration| duration.as_millis() as i64)
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
let payload = json!({
|
|
||||||
"proxies": proxies,
|
|
||||||
"providers": providers_value,
|
|
||||||
"profileId": profile_id,
|
|
||||||
"emittedAt": emitted_at,
|
|
||||||
});
|
|
||||||
|
|
||||||
Some(payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn spawn_proxy_snapshot() {
|
|
||||||
tauri::async_runtime::spawn(async {
|
|
||||||
if let Some(payload) = Handle::build_proxy_snapshot().await {
|
|
||||||
Handle::notify_proxies_updated(payload);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn notice_message<S: Into<String>, M: Into<String>>(status: S, msg: M) {
|
pub fn notice_message<S: Into<String>, M: Into<String>>(status: S, msg: M) {
|
||||||
|
|||||||
@@ -10,10 +10,7 @@ use anyhow::{Result, anyhow};
|
|||||||
use smartstring::alias::String;
|
use smartstring::alias::String;
|
||||||
use std::{path::PathBuf, time::Instant};
|
use std::{path::PathBuf, time::Instant};
|
||||||
use tauri_plugin_mihomo::Error as MihomoError;
|
use tauri_plugin_mihomo::Error as MihomoError;
|
||||||
use tokio::time::{sleep, timeout};
|
use tokio::time::sleep;
|
||||||
|
|
||||||
const RELOAD_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(5);
|
|
||||||
const MAX_RELOAD_ATTEMPTS: usize = 3;
|
|
||||||
|
|
||||||
impl CoreManager {
|
impl CoreManager {
|
||||||
pub async fn use_default_config(&self, error_key: &str, error_msg: &str) -> Result<()> {
|
pub async fn use_default_config(&self, error_key: &str, error_msg: &str) -> Result<()> {
|
||||||
@@ -42,38 +39,12 @@ impl CoreManager {
|
|||||||
return Ok((true, String::new()));
|
return Ok((true, String::new()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
|
|
||||||
let _permit = self
|
let _permit = self
|
||||||
.update_semaphore
|
.update_semaphore
|
||||||
.try_acquire()
|
.try_acquire()
|
||||||
.map_err(|_| anyhow!("Config update already in progress"))?;
|
.map_err(|_| anyhow!("Config update already in progress"))?;
|
||||||
|
|
||||||
let result = self.perform_config_update().await;
|
self.perform_config_update().await
|
||||||
|
|
||||||
match &result {
|
|
||||||
Ok((success, msg)) => {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] Finished (success={}, elapsed={}ms, msg={})",
|
|
||||||
success,
|
|
||||||
start.elapsed().as_millis(),
|
|
||||||
msg
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] Failed after {}ms: {}",
|
|
||||||
start.elapsed().as_millis(),
|
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_update_config(&self) -> Result<bool> {
|
fn should_update_config(&self) -> Result<bool> {
|
||||||
@@ -91,73 +62,20 @@ impl CoreManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn perform_config_update(&self) -> Result<(bool, String)> {
|
async fn perform_config_update(&self) -> Result<(bool, String)> {
|
||||||
logging!(debug, Type::Core, "[ConfigUpdate] Pipeline start");
|
|
||||||
let total_start = Instant::now();
|
|
||||||
|
|
||||||
let mut stage_timer = Instant::now();
|
|
||||||
Config::generate().await?;
|
Config::generate().await?;
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] Generation completed in {}ms",
|
|
||||||
stage_timer.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
|
|
||||||
stage_timer = Instant::now();
|
match CoreConfigValidator::global().validate_config().await {
|
||||||
let validation_result = CoreConfigValidator::global().validate_config().await;
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] Validation completed in {}ms",
|
|
||||||
stage_timer.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
|
|
||||||
match validation_result {
|
|
||||||
Ok((true, _)) => {
|
Ok((true, _)) => {
|
||||||
stage_timer = Instant::now();
|
|
||||||
let run_path = Config::generate_file(ConfigType::Run).await?;
|
let run_path = Config::generate_file(ConfigType::Run).await?;
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] Runtime file generated in {}ms",
|
|
||||||
stage_timer.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
stage_timer = Instant::now();
|
|
||||||
self.apply_config(run_path).await?;
|
self.apply_config(run_path).await?;
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] Core apply completed in {}ms",
|
|
||||||
stage_timer.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] Pipeline succeeded in {}ms",
|
|
||||||
total_start.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
Ok((true, String::new()))
|
Ok((true, String::new()))
|
||||||
}
|
}
|
||||||
Ok((false, error_msg)) => {
|
Ok((false, error_msg)) => {
|
||||||
Config::runtime().await.discard();
|
Config::runtime().await.discard();
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] Validation reported failure after {}ms: {}",
|
|
||||||
total_start.elapsed().as_millis(),
|
|
||||||
error_msg
|
|
||||||
);
|
|
||||||
Ok((false, error_msg))
|
Ok((false, error_msg))
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
Config::runtime().await.discard();
|
Config::runtime().await.discard();
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] Validation errored after {}ms: {}",
|
|
||||||
total_start.elapsed().as_millis(),
|
|
||||||
e
|
|
||||||
);
|
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -170,49 +88,17 @@ impl CoreManager {
|
|||||||
pub(super) async fn apply_config(&self, path: PathBuf) -> Result<()> {
|
pub(super) async fn apply_config(&self, path: PathBuf) -> Result<()> {
|
||||||
let path_str = dirs::path_to_str(&path)?;
|
let path_str = dirs::path_to_str(&path)?;
|
||||||
|
|
||||||
let reload_start = Instant::now();
|
match self.reload_config(path_str).await {
|
||||||
match self.reload_config_with_retry(path_str).await {
|
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
Config::runtime().await.apply();
|
Config::runtime().await.apply();
|
||||||
logging!(
|
logging!(info, Type::Core, "Configuration applied");
|
||||||
debug,
|
|
||||||
Type::Core,
|
|
||||||
"Configuration applied (reload={}ms)",
|
|
||||||
reload_start.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
Err(err) if Self::should_restart_on_error(&err) => {
|
||||||
|
self.retry_with_restart(path_str).await
|
||||||
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
if Self::should_restart_for_anyhow(&err) {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Core,
|
|
||||||
"Reload failed after {}ms with retryable/timeout error; attempting restart: {}",
|
|
||||||
reload_start.elapsed().as_millis(),
|
|
||||||
err
|
|
||||||
);
|
|
||||||
match self.retry_with_restart(path_str).await {
|
|
||||||
Ok(_) => return Ok(()),
|
|
||||||
Err(retry_err) => {
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Core,
|
|
||||||
"Reload retry with restart failed: {}",
|
|
||||||
retry_err
|
|
||||||
);
|
|
||||||
Config::runtime().await.discard();
|
|
||||||
return Err(retry_err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Config::runtime().await.discard();
|
Config::runtime().await.discard();
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::Core,
|
|
||||||
"Failed to apply config after {}ms: {}",
|
|
||||||
reload_start.elapsed().as_millis(),
|
|
||||||
err
|
|
||||||
);
|
|
||||||
Err(anyhow!("Failed to apply config: {}", err))
|
Err(anyhow!("Failed to apply config: {}", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -227,116 +113,17 @@ impl CoreManager {
|
|||||||
self.restart_core().await?;
|
self.restart_core().await?;
|
||||||
sleep(timing::CONFIG_RELOAD_DELAY).await;
|
sleep(timing::CONFIG_RELOAD_DELAY).await;
|
||||||
|
|
||||||
self.reload_config_with_retry(config_path).await?;
|
self.reload_config(config_path).await?;
|
||||||
Config::runtime().await.apply();
|
Config::runtime().await.apply();
|
||||||
logging!(info, Type::Core, "Configuration applied after restart");
|
logging!(info, Type::Core, "Configuration applied after restart");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn reload_config_with_retry(&self, path: &str) -> Result<()> {
|
async fn reload_config(&self, path: &str) -> Result<(), MihomoError> {
|
||||||
for attempt in 1..=MAX_RELOAD_ATTEMPTS {
|
handle::Handle::mihomo()
|
||||||
let attempt_start = Instant::now();
|
|
||||||
let reload_future = self.reload_config_once(path);
|
|
||||||
match timeout(RELOAD_TIMEOUT, reload_future).await {
|
|
||||||
Ok(Ok(())) => {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Core,
|
|
||||||
"reload_config attempt {}/{} succeeded in {}ms",
|
|
||||||
attempt,
|
|
||||||
MAX_RELOAD_ATTEMPTS,
|
|
||||||
attempt_start.elapsed().as_millis()
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Ok(Err(err)) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Core,
|
|
||||||
"reload_config attempt {}/{} failed after {}ms: {}",
|
|
||||||
attempt,
|
|
||||||
MAX_RELOAD_ATTEMPTS,
|
|
||||||
attempt_start.elapsed().as_millis(),
|
|
||||||
err
|
|
||||||
);
|
|
||||||
if attempt == MAX_RELOAD_ATTEMPTS {
|
|
||||||
return Err(anyhow!(
|
|
||||||
"Failed to reload config after {} attempts: {}",
|
|
||||||
attempt,
|
|
||||||
err
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Core,
|
|
||||||
"reload_config attempt {}/{} timed out after {:?}",
|
|
||||||
attempt,
|
|
||||||
MAX_RELOAD_ATTEMPTS,
|
|
||||||
RELOAD_TIMEOUT
|
|
||||||
);
|
|
||||||
if attempt == MAX_RELOAD_ATTEMPTS {
|
|
||||||
return Err(anyhow!(
|
|
||||||
"Config reload timed out after {:?} ({} attempts)",
|
|
||||||
RELOAD_TIMEOUT,
|
|
||||||
MAX_RELOAD_ATTEMPTS
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(anyhow!(
|
|
||||||
"Config reload retry loop exited unexpectedly ({} attempts)",
|
|
||||||
MAX_RELOAD_ATTEMPTS
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn reload_config_once(&self, path: &str) -> Result<(), MihomoError> {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] reload_config_once begin path={} ",
|
|
||||||
path
|
|
||||||
);
|
|
||||||
let start = Instant::now();
|
|
||||||
let result = handle::Handle::mihomo()
|
|
||||||
.await
|
.await
|
||||||
.reload_config(true, path)
|
.reload_config(true, path)
|
||||||
.await;
|
.await
|
||||||
let elapsed = start.elapsed().as_millis();
|
|
||||||
match result {
|
|
||||||
Ok(()) => {
|
|
||||||
logging!(
|
|
||||||
info,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] reload_config_once succeeded (elapsed={}ms)",
|
|
||||||
elapsed
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Core,
|
|
||||||
"[ConfigUpdate] reload_config_once failed (elapsed={}ms, err={})",
|
|
||||||
elapsed,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
Err(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn should_restart_for_anyhow(err: &anyhow::Error) -> bool {
|
|
||||||
if let Some(mihomo_err) = err.downcast_ref::<MihomoError>() {
|
|
||||||
return Self::should_restart_on_error(mihomo_err);
|
|
||||||
}
|
|
||||||
let msg = err.to_string();
|
|
||||||
msg.contains("timed out")
|
|
||||||
|| msg.contains("reload")
|
|
||||||
|| msg.contains("Failed to apply config")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_restart_on_error(err: &MihomoError) -> bool {
|
fn should_restart_on_error(err: &MihomoError) -> bool {
|
||||||
|
|||||||
@@ -1,71 +1,38 @@
|
|||||||
use crate::{constants::retry, logging, utils::logging::Type};
|
use crate::{
|
||||||
use once_cell::sync::Lazy;
|
constants::{retry, timing},
|
||||||
|
logging,
|
||||||
|
utils::logging::Type,
|
||||||
|
};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use smartstring::alias::String;
|
use smartstring::alias::String;
|
||||||
use std::{
|
use std::{
|
||||||
sync::{
|
sync::{
|
||||||
Arc,
|
atomic::{AtomicU64, Ordering},
|
||||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
|
||||||
mpsc,
|
mpsc,
|
||||||
},
|
},
|
||||||
thread,
|
thread,
|
||||||
time::Instant,
|
time::Instant,
|
||||||
};
|
};
|
||||||
use tauri::Emitter;
|
use tauri::{Emitter, WebviewWindow};
|
||||||
use tauri::async_runtime;
|
|
||||||
|
|
||||||
#[allow(dead_code)] // Temporarily suppress warnings while diagnostics disable certain events
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub enum FrontendEvent {
|
pub enum FrontendEvent {
|
||||||
RefreshClash,
|
RefreshClash,
|
||||||
RefreshVerge,
|
RefreshVerge,
|
||||||
RefreshProxy,
|
NoticeMessage { status: String, message: String },
|
||||||
ProxiesUpdated {
|
ProfileChanged { current_profile_id: String },
|
||||||
payload: serde_json::Value,
|
TimerUpdated { profile_index: String },
|
||||||
},
|
ProfileUpdateStarted { uid: String },
|
||||||
NoticeMessage {
|
ProfileUpdateCompleted { uid: String },
|
||||||
status: String,
|
|
||||||
message: String,
|
|
||||||
},
|
|
||||||
ProfileChanged {
|
|
||||||
current_profile_id: String,
|
|
||||||
},
|
|
||||||
ProfileSwitchFinished {
|
|
||||||
profile_id: String,
|
|
||||||
success: bool,
|
|
||||||
notify: bool,
|
|
||||||
task_id: u64,
|
|
||||||
},
|
|
||||||
TimerUpdated {
|
|
||||||
profile_index: String,
|
|
||||||
},
|
|
||||||
ProfileUpdateStarted {
|
|
||||||
uid: String,
|
|
||||||
},
|
|
||||||
ProfileUpdateCompleted {
|
|
||||||
uid: String,
|
|
||||||
},
|
|
||||||
RustPanic {
|
|
||||||
message: String,
|
|
||||||
location: String,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static EMIT_SERIALIZER: Lazy<tokio::sync::Mutex<()>> = Lazy::new(|| tokio::sync::Mutex::new(()));
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
struct EventStats {
|
struct EventStats {
|
||||||
|
total_sent: AtomicU64,
|
||||||
total_errors: AtomicU64,
|
total_errors: AtomicU64,
|
||||||
last_error_time: RwLock<Option<Instant>>,
|
last_error_time: RwLock<Option<Instant>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
#[allow(dead_code)]
|
|
||||||
struct BufferedProxies {
|
|
||||||
pending: parking_lot::Mutex<Option<serde_json::Value>>,
|
|
||||||
in_flight: AtomicBool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct ErrorMessage {
|
pub struct ErrorMessage {
|
||||||
pub status: String,
|
pub status: String,
|
||||||
@@ -80,7 +47,6 @@ pub struct NotificationSystem {
|
|||||||
pub(super) is_running: bool,
|
pub(super) is_running: bool,
|
||||||
stats: EventStats,
|
stats: EventStats,
|
||||||
emergency_mode: RwLock<bool>,
|
emergency_mode: RwLock<bool>,
|
||||||
proxies_buffer: Arc<BufferedProxies>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for NotificationSystem {
|
impl Default for NotificationSystem {
|
||||||
@@ -97,7 +63,6 @@ impl NotificationSystem {
|
|||||||
is_running: false,
|
is_running: false,
|
||||||
stats: EventStats::default(),
|
stats: EventStats::default(),
|
||||||
emergency_mode: RwLock::new(false),
|
emergency_mode: RwLock::new(false),
|
||||||
proxies_buffer: Arc::new(BufferedProxies::default()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,78 +117,13 @@ impl NotificationSystem {
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
let event_label = Self::describe_event(&event);
|
if system.should_skip_event(&event) {
|
||||||
|
return;
|
||||||
match event {
|
|
||||||
FrontendEvent::ProxiesUpdated { payload } => {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Frontend,
|
|
||||||
"Queueing proxies-updated event for buffered emit: {}",
|
|
||||||
event_label
|
|
||||||
);
|
|
||||||
system.enqueue_proxies_updated(payload);
|
|
||||||
}
|
|
||||||
other => {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Frontend,
|
|
||||||
"Queueing event for async emit: {}",
|
|
||||||
event_label
|
|
||||||
);
|
|
||||||
|
|
||||||
let (event_name, payload_result) = system.serialize_event(other);
|
|
||||||
let payload = match payload_result {
|
|
||||||
Ok(value) => value,
|
|
||||||
Err(err) => {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Frontend,
|
|
||||||
"Failed to serialize event {}: {}",
|
|
||||||
event_name,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Frontend,
|
|
||||||
"Dispatching async emit: {}",
|
|
||||||
event_name
|
|
||||||
);
|
|
||||||
let _ = Self::emit_via_app(event_name, payload);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn enqueue_proxies_updated(&self, payload: serde_json::Value) {
|
|
||||||
let replaced = {
|
|
||||||
let mut slot = self.proxies_buffer.pending.lock();
|
|
||||||
let had_pending = slot.is_some();
|
|
||||||
*slot = Some(payload);
|
|
||||||
had_pending
|
|
||||||
};
|
|
||||||
|
|
||||||
if replaced {
|
|
||||||
logging!(
|
|
||||||
debug,
|
|
||||||
Type::Frontend,
|
|
||||||
"Replaced pending proxies-updated payload with latest snapshot"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if self
|
if let Some(window) = super::handle::Handle::get_window() {
|
||||||
.proxies_buffer
|
system.emit_to_window(&window, event);
|
||||||
.in_flight
|
thread::sleep(timing::EVENT_EMIT_DELAY);
|
||||||
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
|
|
||||||
.is_ok()
|
|
||||||
{
|
|
||||||
let buffer = Arc::clone(&self.proxies_buffer);
|
|
||||||
async_runtime::spawn(async move {
|
|
||||||
Self::flush_proxies(buffer).await;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -235,95 +135,25 @@ impl NotificationSystem {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn emit_via_app(event_name: &'static str, payload: serde_json::Value) -> Result<(), String> {
|
fn emit_to_window(&self, window: &WebviewWindow, event: FrontendEvent) {
|
||||||
let app_handle = super::handle::Handle::app_handle().clone();
|
let (event_name, payload) = self.serialize_event(event);
|
||||||
let event_name = event_name.to_string();
|
|
||||||
async_runtime::spawn(async move {
|
let Ok(payload) = payload else {
|
||||||
if let Err(err) = app_handle.emit_to("main", event_name.as_str(), payload) {
|
self.stats.total_errors.fetch_add(1, Ordering::Relaxed);
|
||||||
logging!(
|
return;
|
||||||
warn,
|
};
|
||||||
Type::Frontend,
|
|
||||||
"emit_to failed for {}: {}",
|
match window.emit(event_name, payload) {
|
||||||
event_name,
|
Ok(_) => {
|
||||||
err
|
self.stats.total_sent.fetch_add(1, Ordering::Relaxed);
|
||||||
);
|
|
||||||
}
|
}
|
||||||
});
|
Err(e) => {
|
||||||
Ok(())
|
logging!(warn, Type::Frontend, "Event emit failed: {}", e);
|
||||||
}
|
self.handle_emit_error();
|
||||||
|
|
||||||
async fn flush_proxies(buffer: Arc<BufferedProxies>) {
|
|
||||||
const EVENT_NAME: &str = "proxies-updated";
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let payload_opt = {
|
|
||||||
let mut guard = buffer.pending.lock();
|
|
||||||
guard.take()
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(payload) = payload_opt else {
|
|
||||||
buffer.in_flight.store(false, Ordering::Release);
|
|
||||||
|
|
||||||
if buffer.pending.lock().is_some()
|
|
||||||
&& buffer
|
|
||||||
.in_flight
|
|
||||||
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
|
|
||||||
.is_ok()
|
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
logging!(debug, Type::Frontend, "Dispatching buffered proxies emit");
|
|
||||||
let _guard = EMIT_SERIALIZER.lock().await;
|
|
||||||
if let Err(err) = Self::emit_via_app(EVENT_NAME, payload) {
|
|
||||||
logging!(
|
|
||||||
warn,
|
|
||||||
Type::Frontend,
|
|
||||||
"Buffered proxies emit failed: {}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn describe_event(event: &FrontendEvent) -> String {
|
|
||||||
match event {
|
|
||||||
FrontendEvent::RefreshClash => "RefreshClash".into(),
|
|
||||||
FrontendEvent::RefreshVerge => "RefreshVerge".into(),
|
|
||||||
FrontendEvent::RefreshProxy => "RefreshProxy".into(),
|
|
||||||
FrontendEvent::ProxiesUpdated { .. } => "ProxiesUpdated".into(),
|
|
||||||
FrontendEvent::NoticeMessage { status, .. } => {
|
|
||||||
format!("NoticeMessage({})", status).into()
|
|
||||||
}
|
|
||||||
FrontendEvent::ProfileChanged { current_profile_id } => {
|
|
||||||
format!("ProfileChanged({})", current_profile_id).into()
|
|
||||||
}
|
|
||||||
FrontendEvent::ProfileSwitchFinished {
|
|
||||||
profile_id,
|
|
||||||
task_id,
|
|
||||||
..
|
|
||||||
} => format!(
|
|
||||||
"ProfileSwitchFinished(profile={}, task={})",
|
|
||||||
profile_id, task_id
|
|
||||||
)
|
|
||||||
.into(),
|
|
||||||
FrontendEvent::TimerUpdated { profile_index } => {
|
|
||||||
format!("TimerUpdated({})", profile_index).into()
|
|
||||||
}
|
|
||||||
FrontendEvent::ProfileUpdateStarted { uid } => {
|
|
||||||
format!("ProfileUpdateStarted({})", uid).into()
|
|
||||||
}
|
|
||||||
FrontendEvent::ProfileUpdateCompleted { uid } => {
|
|
||||||
format!("ProfileUpdateCompleted({})", uid).into()
|
|
||||||
}
|
|
||||||
FrontendEvent::RustPanic { message, .. } => format!("RustPanic({})", message).into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
fn serialize_event(
|
fn serialize_event(
|
||||||
&self,
|
&self,
|
||||||
event: FrontendEvent,
|
event: FrontendEvent,
|
||||||
@@ -337,25 +167,9 @@ impl NotificationSystem {
|
|||||||
"verge://notice-message",
|
"verge://notice-message",
|
||||||
serde_json::to_value((status, message)),
|
serde_json::to_value((status, message)),
|
||||||
),
|
),
|
||||||
FrontendEvent::RefreshProxy => ("verge://refresh-proxy-config", Ok(json!("yes"))),
|
|
||||||
FrontendEvent::ProxiesUpdated { payload } => ("proxies-updated", Ok(payload)),
|
|
||||||
FrontendEvent::ProfileChanged { current_profile_id } => {
|
FrontendEvent::ProfileChanged { current_profile_id } => {
|
||||||
("profile-changed", Ok(json!(current_profile_id)))
|
("profile-changed", Ok(json!(current_profile_id)))
|
||||||
}
|
}
|
||||||
FrontendEvent::ProfileSwitchFinished {
|
|
||||||
profile_id,
|
|
||||||
success,
|
|
||||||
notify,
|
|
||||||
task_id,
|
|
||||||
} => (
|
|
||||||
"profile-switch-finished",
|
|
||||||
Ok(json!({
|
|
||||||
"profileId": profile_id,
|
|
||||||
"success": success,
|
|
||||||
"notify": notify,
|
|
||||||
"taskId": task_id
|
|
||||||
})),
|
|
||||||
),
|
|
||||||
FrontendEvent::TimerUpdated { profile_index } => {
|
FrontendEvent::TimerUpdated { profile_index } => {
|
||||||
("verge://timer-updated", Ok(json!(profile_index)))
|
("verge://timer-updated", Ok(json!(profile_index)))
|
||||||
}
|
}
|
||||||
@@ -365,10 +179,6 @@ impl NotificationSystem {
|
|||||||
FrontendEvent::ProfileUpdateCompleted { uid } => {
|
FrontendEvent::ProfileUpdateCompleted { uid } => {
|
||||||
("profile-update-completed", Ok(json!({ "uid": uid })))
|
("profile-update-completed", Ok(json!({ "uid": uid })))
|
||||||
}
|
}
|
||||||
FrontendEvent::RustPanic { message, location } => (
|
|
||||||
"rust-panic",
|
|
||||||
Ok(json!({ "message": message, "location": location })),
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -394,19 +204,10 @@ impl NotificationSystem {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(sender) = &self.sender {
|
if let Some(sender) = &self.sender {
|
||||||
if sender.send(event).is_err() {
|
sender.send(event).is_ok()
|
||||||
logging!(
|
} else {
|
||||||
warn,
|
false
|
||||||
Type::Frontend,
|
|
||||||
"Failed to send event to worker thread"
|
|
||||||
);
|
|
||||||
self.handle_emit_error();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn shutdown(&mut self) {
|
pub fn shutdown(&mut self) {
|
||||||
|
|||||||
@@ -194,7 +194,6 @@ mod app_init {
|
|||||||
cmd::get_profiles,
|
cmd::get_profiles,
|
||||||
cmd::enhance_profiles,
|
cmd::enhance_profiles,
|
||||||
cmd::patch_profiles_config,
|
cmd::patch_profiles_config,
|
||||||
cmd::switch_profile,
|
|
||||||
cmd::view_profile,
|
cmd::view_profile,
|
||||||
cmd::patch_profile,
|
cmd::patch_profile,
|
||||||
cmd::create_profile,
|
cmd::create_profile,
|
||||||
@@ -205,8 +204,6 @@ mod app_init {
|
|||||||
cmd::read_profile_file,
|
cmd::read_profile_file,
|
||||||
cmd::save_profile_file,
|
cmd::save_profile_file,
|
||||||
cmd::get_next_update_time,
|
cmd::get_next_update_time,
|
||||||
cmd::get_profile_switch_status,
|
|
||||||
cmd::get_profile_switch_events,
|
|
||||||
cmd::script_validate_notice,
|
cmd::script_validate_notice,
|
||||||
cmd::validate_script_file,
|
cmd::validate_script_file,
|
||||||
cmd::create_local_backup,
|
cmd::create_local_backup,
|
||||||
@@ -223,7 +220,6 @@ mod app_init {
|
|||||||
cmd::get_system_info,
|
cmd::get_system_info,
|
||||||
cmd::get_unlock_items,
|
cmd::get_unlock_items,
|
||||||
cmd::check_media_unlock,
|
cmd::check_media_unlock,
|
||||||
cmd::frontend_log,
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -362,28 +358,6 @@ pub fn run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::panic::set_hook(Box::new(|info| {
|
|
||||||
let payload = info
|
|
||||||
.payload()
|
|
||||||
.downcast_ref::<&'static str>()
|
|
||||||
.map(|s| (*s).to_string())
|
|
||||||
.or_else(|| info.payload().downcast_ref::<String>().cloned())
|
|
||||||
.unwrap_or_else(|| "Unknown panic".to_string());
|
|
||||||
let location = info
|
|
||||||
.location()
|
|
||||||
.map(|loc| format!("{}:{}", loc.file(), loc.line()))
|
|
||||||
.unwrap_or_else(|| "unknown location".to_string());
|
|
||||||
|
|
||||||
logging!(
|
|
||||||
error,
|
|
||||||
Type::System,
|
|
||||||
"Rust panic captured: {} @ {}",
|
|
||||||
payload,
|
|
||||||
location
|
|
||||||
);
|
|
||||||
handle::Handle::notify_rust_panic(payload.into(), location.into());
|
|
||||||
}));
|
|
||||||
|
|
||||||
#[cfg(feature = "clippy")]
|
#[cfg(feature = "clippy")]
|
||||||
let context = tauri::test::mock_context(tauri::test::noop_assets());
|
let context = tauri::test::mock_context(tauri::test::noop_assets());
|
||||||
#[cfg(feature = "clippy")]
|
#[cfg(feature = "clippy")]
|
||||||
|
|||||||
@@ -68,13 +68,6 @@ impl<T: Clone + ToOwned> Draft<Box<T>> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// 尝试获取最新只读视图,若当前持有写锁则返回 `None`
|
|
||||||
pub fn try_latest_ref(&self) -> Option<MappedRwLockReadGuard<'_, Box<T>>> {
|
|
||||||
self.inner
|
|
||||||
.try_read()
|
|
||||||
.map(|guard| RwLockReadGuard::map(guard, |inner| inner.1.as_ref().unwrap_or(&inner.0)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 提交草稿,返回旧正式数据
|
/// 提交草稿,返回旧正式数据
|
||||||
pub fn apply(&self) -> Option<Box<T>> {
|
pub fn apply(&self) -> Option<Box<T>> {
|
||||||
let mut inner = self.inner.write();
|
let mut inner = self.inner.write();
|
||||||
|
|||||||
@@ -100,12 +100,10 @@ export const CurrentProxyCard = () => {
|
|||||||
const { t } = useTranslation();
|
const { t } = useTranslation();
|
||||||
const navigate = useNavigate();
|
const navigate = useNavigate();
|
||||||
const theme = useTheme();
|
const theme = useTheme();
|
||||||
const { proxies, proxyHydration, clashConfig, refreshProxy, rules } =
|
const { proxies, clashConfig, refreshProxy, rules } = useAppData();
|
||||||
useAppData();
|
|
||||||
const { verge } = useVerge();
|
const { verge } = useVerge();
|
||||||
const { current: currentProfile } = useProfiles();
|
const { current: currentProfile } = useProfiles();
|
||||||
const autoDelayEnabled = verge?.enable_auto_delay_detection ?? false;
|
const autoDelayEnabled = verge?.enable_auto_delay_detection ?? false;
|
||||||
const isLiveHydration = proxyHydration === "live";
|
|
||||||
const currentProfileId = currentProfile?.uid || null;
|
const currentProfileId = currentProfile?.uid || null;
|
||||||
|
|
||||||
const getProfileStorageKey = useCallback(
|
const getProfileStorageKey = useCallback(
|
||||||
@@ -717,6 +715,7 @@ export const CurrentProxyCard = () => {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
refreshProxy();
|
refreshProxy();
|
||||||
if (sortType === 1) {
|
if (sortType === 1) {
|
||||||
setDelaySortRefresh((prev) => prev + 1);
|
setDelaySortRefresh((prev) => prev + 1);
|
||||||
@@ -841,24 +840,13 @@ export const CurrentProxyCard = () => {
|
|||||||
iconColor={currentProxy ? "primary" : undefined}
|
iconColor={currentProxy ? "primary" : undefined}
|
||||||
action={
|
action={
|
||||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||||
{!isLiveHydration && (
|
|
||||||
<Chip
|
|
||||||
size="small"
|
|
||||||
color={proxyHydration === "snapshot" ? "warning" : "info"}
|
|
||||||
label={
|
|
||||||
proxyHydration === "snapshot"
|
|
||||||
? t("Snapshot data")
|
|
||||||
: t("Syncing...")
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
<Tooltip title={t("Delay check")}>
|
<Tooltip title={t("Delay check")}>
|
||||||
<span>
|
<span>
|
||||||
<IconButton
|
<IconButton
|
||||||
size="small"
|
size="small"
|
||||||
color="inherit"
|
color="inherit"
|
||||||
onClick={handleCheckDelay}
|
onClick={handleCheckDelay}
|
||||||
disabled={isDirectMode || !isLiveHydration}
|
disabled={isDirectMode}
|
||||||
>
|
>
|
||||||
<NetworkCheckRounded />
|
<NetworkCheckRounded />
|
||||||
</IconButton>
|
</IconButton>
|
||||||
@@ -972,7 +960,7 @@ export const CurrentProxyCard = () => {
|
|||||||
value={state.selection.group}
|
value={state.selection.group}
|
||||||
onChange={handleGroupChange}
|
onChange={handleGroupChange}
|
||||||
label={t("Group")}
|
label={t("Group")}
|
||||||
disabled={isGlobalMode || isDirectMode || !isLiveHydration}
|
disabled={isGlobalMode || isDirectMode}
|
||||||
>
|
>
|
||||||
{state.proxyData.groups.map((group) => (
|
{state.proxyData.groups.map((group) => (
|
||||||
<MenuItem key={group.name} value={group.name}>
|
<MenuItem key={group.name} value={group.name}>
|
||||||
@@ -990,7 +978,7 @@ export const CurrentProxyCard = () => {
|
|||||||
value={state.selection.proxy}
|
value={state.selection.proxy}
|
||||||
onChange={handleProxyChange}
|
onChange={handleProxyChange}
|
||||||
label={t("Proxy")}
|
label={t("Proxy")}
|
||||||
disabled={isDirectMode || !isLiveHydration}
|
disabled={isDirectMode}
|
||||||
renderValue={renderProxyValue}
|
renderValue={renderProxyValue}
|
||||||
MenuProps={{
|
MenuProps={{
|
||||||
PaperProps: {
|
PaperProps: {
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import { RefreshRounded, StorageOutlined } from "@mui/icons-material";
|
import { RefreshRounded, StorageOutlined } from "@mui/icons-material";
|
||||||
import {
|
import {
|
||||||
Box,
|
Box,
|
||||||
Chip,
|
|
||||||
Button,
|
Button,
|
||||||
Dialog,
|
Dialog,
|
||||||
DialogActions,
|
DialogActions,
|
||||||
@@ -19,7 +18,7 @@ import {
|
|||||||
} from "@mui/material";
|
} from "@mui/material";
|
||||||
import { useLockFn } from "ahooks";
|
import { useLockFn } from "ahooks";
|
||||||
import dayjs from "dayjs";
|
import dayjs from "dayjs";
|
||||||
import { useMemo, useState } from "react";
|
import { useState } from "react";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { updateProxyProvider } from "tauri-plugin-mihomo-api";
|
import { updateProxyProvider } from "tauri-plugin-mihomo-api";
|
||||||
|
|
||||||
@@ -49,61 +48,29 @@ const parseExpire = (expire?: number) => {
|
|||||||
export const ProviderButton = () => {
|
export const ProviderButton = () => {
|
||||||
const { t } = useTranslation();
|
const { t } = useTranslation();
|
||||||
const [open, setOpen] = useState(false);
|
const [open, setOpen] = useState(false);
|
||||||
const {
|
const { proxyProviders, refreshProxy, refreshProxyProviders } = useAppData();
|
||||||
proxyProviders,
|
|
||||||
proxyHydration,
|
|
||||||
refreshProxy,
|
|
||||||
refreshProxyProviders,
|
|
||||||
} = useAppData();
|
|
||||||
|
|
||||||
const isHydrating = proxyHydration !== "live";
|
|
||||||
const [updating, setUpdating] = useState<Record<string, boolean>>({});
|
const [updating, setUpdating] = useState<Record<string, boolean>>({});
|
||||||
|
|
||||||
// 检查是否有提供者
|
// 检查是否有提供者
|
||||||
const hasProviders = Object.keys(proxyProviders || {}).length > 0;
|
const hasProviders = Object.keys(proxyProviders || {}).length > 0;
|
||||||
|
|
||||||
// Hydration hint badge keeps users aware of sync state
|
|
||||||
const hydrationChip = useMemo(() => {
|
|
||||||
if (proxyHydration === "live") return null;
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Chip
|
|
||||||
size="small"
|
|
||||||
color={proxyHydration === "snapshot" ? "warning" : "info"}
|
|
||||||
label={
|
|
||||||
proxyHydration === "snapshot"
|
|
||||||
? t("Snapshot data")
|
|
||||||
: t("Proxy data is syncing, please wait")
|
|
||||||
}
|
|
||||||
sx={{ fontWeight: 500 }}
|
|
||||||
/>
|
|
||||||
);
|
|
||||||
}, [proxyHydration, t]);
|
|
||||||
|
|
||||||
// 更新单个代理提供者
|
// 更新单个代理提供者
|
||||||
const updateProvider = useLockFn(async (name: string) => {
|
const updateProvider = useLockFn(async (name: string) => {
|
||||||
if (isHydrating) {
|
|
||||||
showNotice("info", t("Proxy data is syncing, please wait"));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// 设置更新状态
|
// 设置更新状态
|
||||||
setUpdating((prev) => ({ ...prev, [name]: true }));
|
setUpdating((prev) => ({ ...prev, [name]: true }));
|
||||||
|
|
||||||
await updateProxyProvider(name);
|
await updateProxyProvider(name);
|
||||||
await refreshProxyProviders();
|
|
||||||
|
// 刷新数据
|
||||||
await refreshProxy();
|
await refreshProxy();
|
||||||
showNotice(
|
await refreshProxyProviders();
|
||||||
"success",
|
|
||||||
t("Provider {{name}} updated successfully", { name }),
|
showNotice("success", `${name} 更新成功`);
|
||||||
);
|
|
||||||
} catch (err: any) {
|
} catch (err: any) {
|
||||||
showNotice(
|
showNotice(
|
||||||
"error",
|
"error",
|
||||||
t("Provider {{name}} update failed: {{message}}", {
|
`${name} 更新失败: ${err?.message || err.toString()}`,
|
||||||
name,
|
|
||||||
message: err?.message || err.toString(),
|
|
||||||
}),
|
|
||||||
);
|
);
|
||||||
} finally {
|
} finally {
|
||||||
// 清除更新状态
|
// 清除更新状态
|
||||||
@@ -113,16 +80,11 @@ export const ProviderButton = () => {
|
|||||||
|
|
||||||
// 更新所有代理提供者
|
// 更新所有代理提供者
|
||||||
const updateAllProviders = useLockFn(async () => {
|
const updateAllProviders = useLockFn(async () => {
|
||||||
if (isHydrating) {
|
|
||||||
showNotice("info", t("Proxy data is syncing, please wait"));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// 获取所有provider的名称
|
// 获取所有provider的名称
|
||||||
const allProviders = Object.keys(proxyProviders || {});
|
const allProviders = Object.keys(proxyProviders || {});
|
||||||
if (allProviders.length === 0) {
|
if (allProviders.length === 0) {
|
||||||
showNotice("info", t("No providers to update"));
|
showNotice("info", "没有可更新的代理提供者");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,67 +110,54 @@ export const ProviderButton = () => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
await refreshProxyProviders();
|
// 刷新数据
|
||||||
await refreshProxy();
|
await refreshProxy();
|
||||||
showNotice("success", t("All providers updated successfully"));
|
await refreshProxyProviders();
|
||||||
|
|
||||||
|
showNotice("success", "全部代理提供者更新成功");
|
||||||
} catch (err: any) {
|
} catch (err: any) {
|
||||||
showNotice(
|
showNotice("error", `更新失败: ${err?.message || err.toString()}`);
|
||||||
"error",
|
|
||||||
t("Failed to update providers: {{message}}", {
|
|
||||||
message: err?.message || err.toString(),
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
} finally {
|
} finally {
|
||||||
// 清除所有更新状态
|
// 清除所有更新状态
|
||||||
setUpdating({});
|
setUpdating({});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
const handleClose = () => setOpen(false);
|
const handleClose = () => {
|
||||||
|
setOpen(false);
|
||||||
|
};
|
||||||
|
|
||||||
if (!hasProviders) return null;
|
if (!hasProviders) return null;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1, mr: 1 }}>
|
<Button
|
||||||
<Button
|
variant="outlined"
|
||||||
variant="outlined"
|
size="small"
|
||||||
size="small"
|
startIcon={<StorageOutlined />}
|
||||||
startIcon={<StorageOutlined />}
|
onClick={() => setOpen(true)}
|
||||||
onClick={() => setOpen(true)}
|
sx={{ mr: 1 }}
|
||||||
disabled={isHydrating}
|
>
|
||||||
title={
|
{t("Proxy Provider")}
|
||||||
isHydrating ? t("Proxy data is syncing, please wait") : undefined
|
</Button>
|
||||||
}
|
|
||||||
>
|
|
||||||
{t("Proxy Provider")}
|
|
||||||
</Button>
|
|
||||||
{hydrationChip}
|
|
||||||
</Box>
|
|
||||||
|
|
||||||
<Dialog open={open} onClose={handleClose} maxWidth="sm" fullWidth>
|
<Dialog open={open} onClose={handleClose} maxWidth="sm" fullWidth>
|
||||||
<DialogTitle>
|
<DialogTitle>
|
||||||
<Box
|
<Box
|
||||||
sx={{
|
display="flex"
|
||||||
display: "flex",
|
justifyContent="space-between"
|
||||||
justifyContent: "space-between",
|
alignItems="center"
|
||||||
alignItems: "center",
|
|
||||||
}}
|
|
||||||
>
|
>
|
||||||
<Typography variant="h6">{t("Proxy Provider")}</Typography>
|
<Typography variant="h6">{t("Proxy Provider")}</Typography>
|
||||||
<Button
|
<Box>
|
||||||
variant="contained"
|
<Button
|
||||||
size="small"
|
variant="contained"
|
||||||
onClick={updateAllProviders}
|
size="small"
|
||||||
disabled={isHydrating}
|
onClick={updateAllProviders}
|
||||||
title={
|
>
|
||||||
isHydrating
|
{t("Update All")}
|
||||||
? t("Proxy data is syncing, please wait")
|
</Button>
|
||||||
: undefined
|
</Box>
|
||||||
}
|
|
||||||
>
|
|
||||||
{t("Update All")}
|
|
||||||
</Button>
|
|
||||||
</Box>
|
</Box>
|
||||||
</DialogTitle>
|
</DialogTitle>
|
||||||
|
|
||||||
@@ -217,63 +166,54 @@ export const ProviderButton = () => {
|
|||||||
{Object.entries(proxyProviders || {})
|
{Object.entries(proxyProviders || {})
|
||||||
.sort()
|
.sort()
|
||||||
.map(([key, item]) => {
|
.map(([key, item]) => {
|
||||||
if (!item) return null;
|
const provider = item;
|
||||||
|
const time = dayjs(provider.updatedAt);
|
||||||
const time = dayjs(item.updatedAt);
|
|
||||||
const isUpdating = updating[key];
|
const isUpdating = updating[key];
|
||||||
const sub = item.subscriptionInfo;
|
|
||||||
const hasSubInfo = Boolean(sub);
|
// 订阅信息
|
||||||
const upload = sub?.Upload ?? 0;
|
const sub = provider.subscriptionInfo;
|
||||||
const download = sub?.Download ?? 0;
|
const hasSubInfo = !!sub;
|
||||||
const total = sub?.Total ?? 0;
|
const upload = sub?.Upload || 0;
|
||||||
const expire = sub?.Expire ?? 0;
|
const download = sub?.Download || 0;
|
||||||
|
const total = sub?.Total || 0;
|
||||||
|
const expire = sub?.Expire || 0;
|
||||||
|
|
||||||
|
// 流量使用进度
|
||||||
const progress =
|
const progress =
|
||||||
total > 0
|
total > 0
|
||||||
? Math.min(
|
? Math.min(
|
||||||
|
Math.round(((download + upload) * 100) / total) + 1,
|
||||||
100,
|
100,
|
||||||
Math.max(0, ((upload + download) / total) * 100),
|
|
||||||
)
|
)
|
||||||
: 0;
|
: 0;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<ListItem
|
<ListItem
|
||||||
key={key}
|
key={key}
|
||||||
secondaryAction={
|
sx={[
|
||||||
<Box
|
{
|
||||||
sx={{
|
p: 0,
|
||||||
width: 40,
|
mb: "8px",
|
||||||
display: "flex",
|
borderRadius: 2,
|
||||||
justifyContent: "center",
|
overflow: "hidden",
|
||||||
alignItems: "center",
|
transition: "all 0.2s",
|
||||||
}}
|
},
|
||||||
>
|
({ palette: { mode, primary } }) => {
|
||||||
<IconButton
|
const bgcolor =
|
||||||
size="small"
|
mode === "light" ? "#ffffff" : "#24252f";
|
||||||
color="primary"
|
const hoverColor =
|
||||||
onClick={() => updateProvider(key)}
|
mode === "light"
|
||||||
disabled={isUpdating || isHydrating}
|
? alpha(primary.main, 0.1)
|
||||||
sx={{
|
: alpha(primary.main, 0.2);
|
||||||
animation: isUpdating
|
|
||||||
? "spin 1s linear infinite"
|
return {
|
||||||
: "none",
|
backgroundColor: bgcolor,
|
||||||
"@keyframes spin": {
|
"&:hover": {
|
||||||
"0%": { transform: "rotate(0deg)" },
|
backgroundColor: hoverColor,
|
||||||
"100%": { transform: "rotate(360deg)" },
|
},
|
||||||
},
|
};
|
||||||
}}
|
},
|
||||||
title={t("Update Provider") as string}
|
]}
|
||||||
>
|
|
||||||
<RefreshRounded />
|
|
||||||
</IconButton>
|
|
||||||
</Box>
|
|
||||||
}
|
|
||||||
sx={{
|
|
||||||
mb: 1,
|
|
||||||
borderRadius: 1,
|
|
||||||
border: "1px solid",
|
|
||||||
borderColor: alpha("#ccc", 0.4),
|
|
||||||
backgroundColor: alpha("#fff", 0.02),
|
|
||||||
}}
|
|
||||||
>
|
>
|
||||||
<ListItemText
|
<ListItemText
|
||||||
sx={{ px: 2, py: 1 }}
|
sx={{ px: 2, py: 1 }}
|
||||||
@@ -283,7 +223,6 @@ export const ProviderButton = () => {
|
|||||||
display: "flex",
|
display: "flex",
|
||||||
justifyContent: "space-between",
|
justifyContent: "space-between",
|
||||||
alignItems: "center",
|
alignItems: "center",
|
||||||
gap: 1,
|
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
<Typography
|
<Typography
|
||||||
@@ -293,12 +232,12 @@ export const ProviderButton = () => {
|
|||||||
title={key}
|
title={key}
|
||||||
sx={{ display: "flex", alignItems: "center" }}
|
sx={{ display: "flex", alignItems: "center" }}
|
||||||
>
|
>
|
||||||
<span style={{ marginRight: 8 }}>{key}</span>
|
<span style={{ marginRight: "8px" }}>{key}</span>
|
||||||
<TypeBox component="span">
|
<TypeBox component="span">
|
||||||
{item.proxies.length}
|
{provider.proxies.length}
|
||||||
</TypeBox>
|
</TypeBox>
|
||||||
<TypeBox component="span">
|
<TypeBox component="span">
|
||||||
{item.vehicleType}
|
{provider.vehicleType}
|
||||||
</TypeBox>
|
</TypeBox>
|
||||||
</Typography>
|
</Typography>
|
||||||
|
|
||||||
@@ -313,39 +252,72 @@ export const ProviderButton = () => {
|
|||||||
</Box>
|
</Box>
|
||||||
}
|
}
|
||||||
secondary={
|
secondary={
|
||||||
hasSubInfo ? (
|
<>
|
||||||
<>
|
{/* 订阅信息 */}
|
||||||
<Box
|
{hasSubInfo && (
|
||||||
sx={{
|
<>
|
||||||
mb: 1,
|
<Box
|
||||||
display: "flex",
|
sx={{
|
||||||
alignItems: "center",
|
mb: 1,
|
||||||
justifyContent: "space-between",
|
display: "flex",
|
||||||
}}
|
alignItems: "center",
|
||||||
>
|
justifyContent: "space-between",
|
||||||
<span title={t("Used / Total") as string}>
|
}}
|
||||||
{parseTraffic(upload + download)} /{" "}
|
>
|
||||||
{parseTraffic(total)}
|
<span title={t("Used / Total") as string}>
|
||||||
</span>
|
{parseTraffic(upload + download)} /{" "}
|
||||||
<span title={t("Expire Time") as string}>
|
{parseTraffic(total)}
|
||||||
{parseExpire(expire)}
|
</span>
|
||||||
</span>
|
<span title={t("Expire Time") as string}>
|
||||||
</Box>
|
{parseExpire(expire)}
|
||||||
|
</span>
|
||||||
|
</Box>
|
||||||
|
|
||||||
<LinearProgress
|
{/* 进度条 */}
|
||||||
variant="determinate"
|
<LinearProgress
|
||||||
value={progress}
|
variant="determinate"
|
||||||
sx={{
|
value={progress}
|
||||||
height: 6,
|
sx={{
|
||||||
borderRadius: 3,
|
height: 6,
|
||||||
opacity: total > 0 ? 1 : 0,
|
borderRadius: 3,
|
||||||
}}
|
opacity: total > 0 ? 1 : 0,
|
||||||
/>
|
}}
|
||||||
</>
|
/>
|
||||||
) : null
|
</>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
<Divider orientation="vertical" flexItem />
|
<Divider orientation="vertical" flexItem />
|
||||||
|
<Box
|
||||||
|
sx={{
|
||||||
|
width: 40,
|
||||||
|
display: "flex",
|
||||||
|
justifyContent: "center",
|
||||||
|
alignItems: "center",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<IconButton
|
||||||
|
size="small"
|
||||||
|
color="primary"
|
||||||
|
onClick={() => {
|
||||||
|
updateProvider(key);
|
||||||
|
}}
|
||||||
|
disabled={isUpdating}
|
||||||
|
sx={{
|
||||||
|
animation: isUpdating
|
||||||
|
? "spin 1s linear infinite"
|
||||||
|
: "none",
|
||||||
|
"@keyframes spin": {
|
||||||
|
"0%": { transform: "rotate(0deg)" },
|
||||||
|
"100%": { transform: "rotate(360deg)" },
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
title={t("Update Provider") as string}
|
||||||
|
>
|
||||||
|
<RefreshRounded />
|
||||||
|
</IconButton>
|
||||||
|
</Box>
|
||||||
</ListItem>
|
</ListItem>
|
||||||
);
|
);
|
||||||
})}
|
})}
|
||||||
|
|||||||
@@ -61,17 +61,10 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
}>({ open: false, message: "" });
|
}>({ open: false, message: "" });
|
||||||
|
|
||||||
const { verge } = useVerge();
|
const { verge } = useVerge();
|
||||||
const {
|
const { proxies: proxiesData } = useAppData();
|
||||||
proxies: proxiesData,
|
|
||||||
proxyHydration,
|
|
||||||
proxyTargetProfileId,
|
|
||||||
proxyDisplayProfileId,
|
|
||||||
isProxyRefreshPending,
|
|
||||||
} = useAppData();
|
|
||||||
const groups = proxiesData?.groups;
|
const groups = proxiesData?.groups;
|
||||||
const availableGroups = useMemo(() => groups ?? [], [groups]);
|
const availableGroups = useMemo(() => groups ?? [], [groups]);
|
||||||
const showHydrationOverlay = isProxyRefreshPending;
|
|
||||||
const pendingProfileSwitch = proxyTargetProfileId !== proxyDisplayProfileId;
|
|
||||||
const defaultRuleGroup = useMemo(() => {
|
const defaultRuleGroup = useMemo(() => {
|
||||||
if (isChainMode && mode === "rule" && availableGroups.length > 0) {
|
if (isChainMode && mode === "rule" && availableGroups.length > 0) {
|
||||||
return availableGroups[0].name;
|
return availableGroups[0].name;
|
||||||
@@ -83,35 +76,6 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
() => selectedGroup ?? defaultRuleGroup,
|
() => selectedGroup ?? defaultRuleGroup,
|
||||||
[selectedGroup, defaultRuleGroup],
|
[selectedGroup, defaultRuleGroup],
|
||||||
);
|
);
|
||||||
const hydrationChip = useMemo(() => {
|
|
||||||
if (proxyHydration === "live") return null;
|
|
||||||
|
|
||||||
const label =
|
|
||||||
proxyHydration === "snapshot" ? t("Snapshot data") : t("Syncing...");
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Chip
|
|
||||||
size="small"
|
|
||||||
color={proxyHydration === "snapshot" ? "warning" : "info"}
|
|
||||||
label={label}
|
|
||||||
sx={{ fontWeight: 500, height: 22 }}
|
|
||||||
/>
|
|
||||||
);
|
|
||||||
}, [proxyHydration, t]);
|
|
||||||
|
|
||||||
const overlayMessage = useMemo(() => {
|
|
||||||
if (!showHydrationOverlay) return null;
|
|
||||||
|
|
||||||
if (pendingProfileSwitch) {
|
|
||||||
return t("Loading proxy data for the selected profile...");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (proxyHydration === "snapshot") {
|
|
||||||
return t("Preparing proxy snapshot...");
|
|
||||||
}
|
|
||||||
|
|
||||||
return t("Syncing proxy data...");
|
|
||||||
}, [showHydrationOverlay, pendingProfileSwitch, proxyHydration, t]);
|
|
||||||
|
|
||||||
const { renderList, onProxies, onHeadState } = useRenderList(
|
const { renderList, onProxies, onHeadState } = useRenderList(
|
||||||
mode,
|
mode,
|
||||||
@@ -129,7 +93,7 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
[renderList],
|
[renderList],
|
||||||
);
|
);
|
||||||
|
|
||||||
// 系统代理选择
|
// 统代理选择
|
||||||
const { handleProxyGroupChange } = useProxySelection({
|
const { handleProxyGroupChange } = useProxySelection({
|
||||||
onSuccess: () => {
|
onSuccess: () => {
|
||||||
onProxies();
|
onProxies();
|
||||||
@@ -342,7 +306,12 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
try {
|
try {
|
||||||
await Promise.race([
|
await Promise.race([
|
||||||
delayManager.checkListDelay(names, groupName, timeout),
|
delayManager.checkListDelay(names, groupName, timeout),
|
||||||
delayGroup(groupName, url, timeout),
|
delayGroup(groupName, url, timeout).then((result) => {
|
||||||
|
console.log(
|
||||||
|
`[ProxyGroups] getGroupProxyDelays返回结果数量:`,
|
||||||
|
Object.keys(result || {}).length,
|
||||||
|
);
|
||||||
|
}), // 查询group delays 将清除fixed(不关注调用结果)
|
||||||
]);
|
]);
|
||||||
console.log(`[ProxyGroups] 延迟测试完成,组: ${groupName}`);
|
console.log(`[ProxyGroups] 延迟测试完成,组: ${groupName}`);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -407,11 +376,6 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (isChainMode) {
|
if (isChainMode) {
|
||||||
const chainVirtuosoHeight =
|
|
||||||
mode === "rule" && proxyGroupNames.length > 0
|
|
||||||
? "calc(100% - 80px)"
|
|
||||||
: "calc(100% - 14px)";
|
|
||||||
|
|
||||||
// 获取所有代理组
|
// 获取所有代理组
|
||||||
const proxyGroups = proxiesData?.groups || [];
|
const proxyGroups = proxiesData?.groups || [];
|
||||||
|
|
||||||
@@ -490,7 +454,10 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
<Virtuoso
|
<Virtuoso
|
||||||
ref={virtuosoRef}
|
ref={virtuosoRef}
|
||||||
style={{
|
style={{
|
||||||
height: chainVirtuosoHeight,
|
height:
|
||||||
|
mode === "rule" && proxyGroups.length > 0
|
||||||
|
? "calc(100% - 80px)" // 只有标题的高度
|
||||||
|
: "calc(100% - 14px)",
|
||||||
}}
|
}}
|
||||||
totalCount={renderList.length}
|
totalCount={renderList.length}
|
||||||
increaseViewportBy={{ top: 200, bottom: 200 }}
|
increaseViewportBy={{ top: 200, bottom: 200 }}
|
||||||
@@ -581,9 +548,7 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
{group.name}
|
{group.name}
|
||||||
</Typography>
|
</Typography>
|
||||||
<Typography variant="caption" color="text.secondary">
|
<Typography variant="caption" color="text.secondary">
|
||||||
{`${t("Group Type")}: ${group.type} · ${t("Proxy Count")}: ${
|
{group.type} · {group.all.length} 节点
|
||||||
Array.isArray(group.all) ? group.all.length : 0
|
|
||||||
}`}
|
|
||||||
</Typography>
|
</Typography>
|
||||||
</Box>
|
</Box>
|
||||||
</MenuItem>
|
</MenuItem>
|
||||||
@@ -591,7 +556,7 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
{availableGroups.length === 0 && (
|
{availableGroups.length === 0 && (
|
||||||
<MenuItem disabled>
|
<MenuItem disabled>
|
||||||
<Typography variant="body2" color="text.secondary">
|
<Typography variant="body2" color="text.secondary">
|
||||||
{t("Empty")}
|
暂无可用代理组
|
||||||
</Typography>
|
</Typography>
|
||||||
</MenuItem>
|
</MenuItem>
|
||||||
)}
|
)}
|
||||||
@@ -602,29 +567,9 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
style={{
|
style={{ position: "relative", height: "100%", willChange: "transform" }}
|
||||||
position: "relative",
|
|
||||||
height: "100%",
|
|
||||||
willChange: "transform",
|
|
||||||
opacity: showHydrationOverlay ? 0.45 : 1,
|
|
||||||
transition: "opacity 120ms ease",
|
|
||||||
}}
|
|
||||||
>
|
>
|
||||||
{hydrationChip && (
|
{/* 代理组导航栏 */}
|
||||||
<Box
|
|
||||||
sx={{
|
|
||||||
position: "absolute",
|
|
||||||
top: 8,
|
|
||||||
right: 16,
|
|
||||||
zIndex: 2,
|
|
||||||
display: "flex",
|
|
||||||
alignItems: "center",
|
|
||||||
gap: 8,
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
{hydrationChip}
|
|
||||||
</Box>
|
|
||||||
)}
|
|
||||||
{mode === "rule" && (
|
{mode === "rule" && (
|
||||||
<ProxyGroupNavigator
|
<ProxyGroupNavigator
|
||||||
proxyGroupNames={proxyGroupNames}
|
proxyGroupNames={proxyGroupNames}
|
||||||
@@ -663,39 +608,6 @@ export const ProxyGroups = (props: Props) => {
|
|||||||
)}
|
)}
|
||||||
/>
|
/>
|
||||||
<ScrollTopButton show={showScrollTop} onClick={scrollToTop} />
|
<ScrollTopButton show={showScrollTop} onClick={scrollToTop} />
|
||||||
{showHydrationOverlay && overlayMessage && (
|
|
||||||
<Box
|
|
||||||
sx={{
|
|
||||||
position: "absolute",
|
|
||||||
inset: 0,
|
|
||||||
zIndex: 3,
|
|
||||||
display: "flex",
|
|
||||||
alignItems: "center",
|
|
||||||
justifyContent: "center",
|
|
||||||
pointerEvents: "auto",
|
|
||||||
cursor: "wait",
|
|
||||||
backgroundColor: "rgba(8, 8, 8, 0.12)",
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
<Box
|
|
||||||
sx={{
|
|
||||||
px: 2.5,
|
|
||||||
py: 1.5,
|
|
||||||
borderRadius: 1,
|
|
||||||
bgcolor: "background.paper",
|
|
||||||
boxShadow: 3,
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
<Typography
|
|
||||||
variant="body2"
|
|
||||||
color="text.secondary"
|
|
||||||
sx={{ fontWeight: 500 }}
|
|
||||||
>
|
|
||||||
{overlayMessage}
|
|
||||||
</Typography>
|
|
||||||
</Box>
|
|
||||||
</Box>
|
|
||||||
)}
|
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -14,13 +14,50 @@ import {
|
|||||||
} from "./use-head-state";
|
} from "./use-head-state";
|
||||||
import { useWindowWidth } from "./use-window-width";
|
import { useWindowWidth } from "./use-window-width";
|
||||||
|
|
||||||
type RenderGroup = IProxyGroupItem;
|
// 定义代理项接口
|
||||||
|
interface IProxyItem {
|
||||||
|
name: string;
|
||||||
|
type: string;
|
||||||
|
udp: boolean;
|
||||||
|
xudp: boolean;
|
||||||
|
tfo: boolean;
|
||||||
|
mptcp: boolean;
|
||||||
|
smux: boolean;
|
||||||
|
history: {
|
||||||
|
time: string;
|
||||||
|
delay: number;
|
||||||
|
}[];
|
||||||
|
provider?: string;
|
||||||
|
testUrl?: string;
|
||||||
|
[key: string]: any; // 添加索引签名以适应其他可能的属性
|
||||||
|
}
|
||||||
|
|
||||||
|
// 代理组类型
|
||||||
|
type ProxyGroup = {
|
||||||
|
name: string;
|
||||||
|
type: string;
|
||||||
|
udp: boolean;
|
||||||
|
xudp: boolean;
|
||||||
|
tfo: boolean;
|
||||||
|
mptcp: boolean;
|
||||||
|
smux: boolean;
|
||||||
|
history: {
|
||||||
|
time: string;
|
||||||
|
delay: number;
|
||||||
|
}[];
|
||||||
|
now: string;
|
||||||
|
all: IProxyItem[];
|
||||||
|
hidden?: boolean;
|
||||||
|
icon?: string;
|
||||||
|
testUrl?: string;
|
||||||
|
provider?: string;
|
||||||
|
};
|
||||||
|
|
||||||
export interface IRenderItem {
|
export interface IRenderItem {
|
||||||
// 组 | head | item | empty | item col
|
// 组 | head | item | empty | item col
|
||||||
type: 0 | 1 | 2 | 3 | 4;
|
type: 0 | 1 | 2 | 3 | 4;
|
||||||
key: string;
|
key: string;
|
||||||
group: RenderGroup;
|
group: ProxyGroup;
|
||||||
proxy?: IProxyItem;
|
proxy?: IProxyItem;
|
||||||
col?: number;
|
col?: number;
|
||||||
proxyCol?: IProxyItem[];
|
proxyCol?: IProxyItem[];
|
||||||
@@ -62,7 +99,7 @@ export const useRenderList = (
|
|||||||
selectedGroup?: string | null,
|
selectedGroup?: string | null,
|
||||||
) => {
|
) => {
|
||||||
// 使用全局数据提供者
|
// 使用全局数据提供者
|
||||||
const { proxies: proxiesData, proxyHydration, refreshProxy } = useAppData();
|
const { proxies: proxiesData, refreshProxy } = useAppData();
|
||||||
const { verge } = useVerge();
|
const { verge } = useVerge();
|
||||||
const { width } = useWindowWidth();
|
const { width } = useWindowWidth();
|
||||||
const [headStates, setHeadState] = useHeadStateNew();
|
const [headStates, setHeadState] = useHeadStateNew();
|
||||||
@@ -86,29 +123,17 @@ export const useRenderList = (
|
|||||||
|
|
||||||
// 确保代理数据加载
|
// 确保代理数据加载
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!proxiesData || proxyHydration !== "live") return;
|
if (!proxiesData) return;
|
||||||
const { groups, proxies } = proxiesData;
|
const { groups, proxies } = proxiesData;
|
||||||
|
|
||||||
if (
|
if (
|
||||||
(mode === "rule" && !groups.length) ||
|
(mode === "rule" && !groups.length) ||
|
||||||
(mode === "global" && proxies.length < 2)
|
(mode === "global" && proxies.length < 2)
|
||||||
) {
|
) {
|
||||||
const handle = setTimeout(() => {
|
const handle = setTimeout(() => refreshProxy(), 500);
|
||||||
void refreshProxy().catch(() => {});
|
|
||||||
}, 500);
|
|
||||||
return () => clearTimeout(handle);
|
return () => clearTimeout(handle);
|
||||||
}
|
}
|
||||||
}, [proxiesData, proxyHydration, mode, refreshProxy]);
|
}, [proxiesData, mode, refreshProxy]);
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (proxyHydration !== "snapshot") return;
|
|
||||||
|
|
||||||
const handle = setTimeout(() => {
|
|
||||||
void refreshProxy().catch(() => {});
|
|
||||||
}, 1800);
|
|
||||||
|
|
||||||
return () => clearTimeout(handle);
|
|
||||||
}, [proxyHydration, refreshProxy]);
|
|
||||||
|
|
||||||
// 链式代理模式节点自动计算延迟
|
// 链式代理模式节点自动计算延迟
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@@ -122,7 +147,7 @@ export const useRenderList = (
|
|||||||
// 设置组监听器,当有延迟更新时自动刷新
|
// 设置组监听器,当有延迟更新时自动刷新
|
||||||
const groupListener = () => {
|
const groupListener = () => {
|
||||||
console.log("[ChainMode] 延迟更新,刷新UI");
|
console.log("[ChainMode] 延迟更新,刷新UI");
|
||||||
void refreshProxy().catch(() => {});
|
refreshProxy();
|
||||||
};
|
};
|
||||||
|
|
||||||
delayManager.setGroupListener("chain-mode", groupListener);
|
delayManager.setGroupListener("chain-mode", groupListener);
|
||||||
@@ -163,12 +188,9 @@ export const useRenderList = (
|
|||||||
// 链式代理模式下,显示代理组和其节点
|
// 链式代理模式下,显示代理组和其节点
|
||||||
if (isChainMode && runtimeConfig && mode === "rule") {
|
if (isChainMode && runtimeConfig && mode === "rule") {
|
||||||
// 使用正常的规则模式代理组
|
// 使用正常的规则模式代理组
|
||||||
const chainGroups = proxiesData.groups ?? [];
|
const allGroups = proxiesData.groups.length
|
||||||
const allGroups = chainGroups.length
|
? proxiesData.groups
|
||||||
? chainGroups
|
: [proxiesData.global!];
|
||||||
: proxiesData.global
|
|
||||||
? [proxiesData.global]
|
|
||||||
: [];
|
|
||||||
|
|
||||||
// 如果选择了特定代理组,只显示该组的节点
|
// 如果选择了特定代理组,只显示该组的节点
|
||||||
if (selectedGroup) {
|
if (selectedGroup) {
|
||||||
@@ -260,7 +282,7 @@ export const useRenderList = (
|
|||||||
});
|
});
|
||||||
|
|
||||||
// 创建一个虚拟的组来容纳所有节点
|
// 创建一个虚拟的组来容纳所有节点
|
||||||
const virtualGroup: RenderGroup = {
|
const virtualGroup: ProxyGroup = {
|
||||||
name: "All Proxies",
|
name: "All Proxies",
|
||||||
type: "Selector",
|
type: "Selector",
|
||||||
udp: false,
|
udp: false,
|
||||||
@@ -318,7 +340,7 @@ export const useRenderList = (
|
|||||||
});
|
});
|
||||||
|
|
||||||
// 创建一个虚拟的组来容纳所有节点
|
// 创建一个虚拟的组来容纳所有节点
|
||||||
const virtualGroup: RenderGroup = {
|
const virtualGroup: ProxyGroup = {
|
||||||
name: "All Proxies",
|
name: "All Proxies",
|
||||||
type: "Selector",
|
type: "Selector",
|
||||||
udp: false,
|
udp: false,
|
||||||
@@ -358,15 +380,12 @@ export const useRenderList = (
|
|||||||
|
|
||||||
// 正常模式的渲染逻辑
|
// 正常模式的渲染逻辑
|
||||||
const useRule = mode === "rule" || mode === "script";
|
const useRule = mode === "rule" || mode === "script";
|
||||||
const renderGroups = (() => {
|
const renderGroups =
|
||||||
const groups = proxiesData.groups ?? [];
|
useRule && proxiesData.groups.length
|
||||||
if (useRule && groups.length) {
|
? proxiesData.groups
|
||||||
return groups;
|
: [proxiesData.global!];
|
||||||
}
|
|
||||||
return proxiesData.global ? [proxiesData.global] : groups;
|
|
||||||
})();
|
|
||||||
|
|
||||||
const retList = renderGroups.flatMap((group: RenderGroup) => {
|
const retList = renderGroups.flatMap((group: ProxyGroup) => {
|
||||||
const headState = headStates[group.name] || DEFAULT_STATE;
|
const headState = headStates[group.name] || DEFAULT_STATE;
|
||||||
const ret: IRenderItem[] = [
|
const ret: IRenderItem[] = [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -2,6 +2,12 @@ import { useMemo } from "react";
|
|||||||
|
|
||||||
import { useAppData } from "@/providers/app-data-context";
|
import { useAppData } from "@/providers/app-data-context";
|
||||||
|
|
||||||
|
// 定义代理组类型
|
||||||
|
interface ProxyGroup {
|
||||||
|
name: string;
|
||||||
|
now: string;
|
||||||
|
}
|
||||||
|
|
||||||
// 获取当前代理节点信息的自定义Hook
|
// 获取当前代理节点信息的自定义Hook
|
||||||
export const useCurrentProxy = () => {
|
export const useCurrentProxy = () => {
|
||||||
// 从AppDataProvider获取数据
|
// 从AppDataProvider获取数据
|
||||||
@@ -31,15 +37,15 @@ export const useCurrentProxy = () => {
|
|||||||
"自动选择",
|
"自动选择",
|
||||||
];
|
];
|
||||||
const primaryGroup =
|
const primaryGroup =
|
||||||
groups.find((group) =>
|
groups.find((group: ProxyGroup) =>
|
||||||
primaryKeywords.some((keyword) =>
|
primaryKeywords.some((keyword) =>
|
||||||
group.name.toLowerCase().includes(keyword.toLowerCase()),
|
group.name.toLowerCase().includes(keyword.toLowerCase()),
|
||||||
),
|
),
|
||||||
) || groups.find((group) => group.name !== "GLOBAL");
|
) || groups.filter((g: ProxyGroup) => g.name !== "GLOBAL")[0];
|
||||||
|
|
||||||
if (primaryGroup) {
|
if (primaryGroup) {
|
||||||
primaryGroupName = primaryGroup.name;
|
primaryGroupName = primaryGroup.name;
|
||||||
currentName = primaryGroup.now ?? currentName;
|
currentName = primaryGroup.now;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,54 +5,33 @@ import {
|
|||||||
getProfiles,
|
getProfiles,
|
||||||
patchProfile,
|
patchProfile,
|
||||||
patchProfilesConfig,
|
patchProfilesConfig,
|
||||||
calcuProxies,
|
|
||||||
} from "@/services/cmds";
|
} from "@/services/cmds";
|
||||||
import {
|
import { calcuProxies } from "@/services/cmds";
|
||||||
useProfileStore,
|
|
||||||
selectEffectiveProfiles,
|
|
||||||
selectIsHydrating,
|
|
||||||
selectLastResult,
|
|
||||||
} from "@/stores/profile-store";
|
|
||||||
|
|
||||||
export const useProfiles = () => {
|
export const useProfiles = () => {
|
||||||
const profilesFromStore = useProfileStore(selectEffectiveProfiles);
|
|
||||||
const storeHydrating = useProfileStore(selectIsHydrating);
|
|
||||||
const lastResult = useProfileStore(selectLastResult);
|
|
||||||
const commitProfileSnapshot = useProfileStore(
|
|
||||||
(state) => state.commitHydrated,
|
|
||||||
);
|
|
||||||
|
|
||||||
const {
|
const {
|
||||||
data: swrProfiles,
|
data: profiles,
|
||||||
mutate: mutateProfiles,
|
mutate: mutateProfiles,
|
||||||
error,
|
error,
|
||||||
isValidating,
|
isValidating,
|
||||||
} = useSWR("getProfiles", getProfiles, {
|
} = useSWR("getProfiles", getProfiles, {
|
||||||
revalidateOnFocus: false,
|
revalidateOnFocus: false,
|
||||||
revalidateOnReconnect: false,
|
revalidateOnReconnect: false,
|
||||||
dedupingInterval: 500,
|
dedupingInterval: 500, // 减少去重时间,提高响应性
|
||||||
errorRetryCount: 3,
|
errorRetryCount: 3,
|
||||||
errorRetryInterval: 1000,
|
errorRetryInterval: 1000,
|
||||||
refreshInterval: 0,
|
refreshInterval: 0, // 完全由手动控制
|
||||||
onError: (err) => {
|
onError: (error) => {
|
||||||
console.error("[useProfiles] SWR错误:", err);
|
console.error("[useProfiles] SWR错误:", error);
|
||||||
},
|
},
|
||||||
onSuccess: (data) => {
|
onSuccess: (data) => {
|
||||||
commitProfileSnapshot(data);
|
|
||||||
console.log(
|
console.log(
|
||||||
"[useProfiles] 配置数据更新成功,配置数量",
|
"[useProfiles] 配置数据更新成功,配置数量:",
|
||||||
data?.items?.length || 0,
|
data?.items?.length || 0,
|
||||||
);
|
);
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
const rawProfiles = profilesFromStore ?? swrProfiles;
|
|
||||||
const profiles = (rawProfiles ?? {
|
|
||||||
current: null,
|
|
||||||
items: [],
|
|
||||||
}) as IProfilesConfig;
|
|
||||||
const hasProfiles = rawProfiles != null;
|
|
||||||
|
|
||||||
const patchProfiles = async (
|
const patchProfiles = async (
|
||||||
value: Partial<IProfilesConfig>,
|
value: Partial<IProfilesConfig>,
|
||||||
signal?: AbortSignal,
|
signal?: AbortSignal,
|
||||||
@@ -70,30 +49,32 @@ export const useProfiles = () => {
|
|||||||
await mutateProfiles();
|
await mutateProfiles();
|
||||||
|
|
||||||
return success;
|
return success;
|
||||||
} catch (err) {
|
} catch (error) {
|
||||||
if (err instanceof DOMException && err.name === "AbortError") {
|
if (error instanceof DOMException && error.name === "AbortError") {
|
||||||
throw err;
|
throw error;
|
||||||
}
|
}
|
||||||
|
|
||||||
await mutateProfiles();
|
await mutateProfiles();
|
||||||
throw err;
|
throw error;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const patchCurrent = async (value: Partial<IProfileItem>) => {
|
const patchCurrent = async (value: Partial<IProfileItem>) => {
|
||||||
if (!hasProfiles || !profiles.current) {
|
if (profiles?.current) {
|
||||||
return;
|
await patchProfile(profiles.current, value);
|
||||||
|
mutateProfiles();
|
||||||
}
|
}
|
||||||
await patchProfile(profiles.current, value);
|
|
||||||
mutateProfiles();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// 根据selected的节点选择
|
||||||
const activateSelected = async () => {
|
const activateSelected = async () => {
|
||||||
try {
|
try {
|
||||||
console.log("[ActivateSelected] 开始处理代理选择");
|
console.log("[ActivateSelected] 开始处理代理选择");
|
||||||
|
|
||||||
const proxiesData = await calcuProxies();
|
const [proxiesData, profileData] = await Promise.all([
|
||||||
const profileData = hasProfiles ? profiles : null;
|
calcuProxies(),
|
||||||
|
getProfiles(),
|
||||||
|
]);
|
||||||
|
|
||||||
if (!profileData || !proxiesData) {
|
if (!profileData || !proxiesData) {
|
||||||
console.log("[ActivateSelected] 代理或配置数据不可用,跳过处理");
|
console.log("[ActivateSelected] 代理或配置数据不可用,跳过处理");
|
||||||
@@ -109,6 +90,7 @@ export const useProfiles = () => {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 检查是否有saved的代理选择
|
||||||
const { selected = [] } = current;
|
const { selected = [] } = current;
|
||||||
if (selected.length === 0) {
|
if (selected.length === 0) {
|
||||||
console.log("[ActivateSelected] 当前profile无保存的代理选择,跳过");
|
console.log("[ActivateSelected] 当前profile无保存的代理选择,跳过");
|
||||||
@@ -116,7 +98,7 @@ export const useProfiles = () => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
console.log(
|
console.log(
|
||||||
`[ActivateSelected] 当前profile有${selected.length} 个代理选择配置`,
|
`[ActivateSelected] 当前profile有 ${selected.length} 个代理选择配置`,
|
||||||
);
|
);
|
||||||
|
|
||||||
const selectedMap = Object.fromEntries(
|
const selectedMap = Object.fromEntries(
|
||||||
@@ -133,6 +115,7 @@ export const useProfiles = () => {
|
|||||||
"LoadBalance",
|
"LoadBalance",
|
||||||
]);
|
]);
|
||||||
|
|
||||||
|
// 处理所有代理组
|
||||||
[global, ...groups].forEach((group) => {
|
[global, ...groups].forEach((group) => {
|
||||||
if (!group) {
|
if (!group) {
|
||||||
return;
|
return;
|
||||||
@@ -167,7 +150,7 @@ export const useProfiles = () => {
|
|||||||
|
|
||||||
if (!existsInGroup) {
|
if (!existsInGroup) {
|
||||||
console.warn(
|
console.warn(
|
||||||
`[ActivateSelected] 保存的代理${savedProxy} 不存在于代理组${name}`,
|
`[ActivateSelected] 保存的代理 ${savedProxy} 不存在于代理组 ${name}`,
|
||||||
);
|
);
|
||||||
hasChange = true;
|
hasChange = true;
|
||||||
newSelected.push({ name, now: now ?? savedProxy });
|
newSelected.push({ name, now: now ?? savedProxy });
|
||||||
@@ -190,7 +173,7 @@ export const useProfiles = () => {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log("[ActivateSelected] 完成代理切换,保存新的选择配置");
|
console.log(`[ActivateSelected] 完成代理切换,保存新的选择配置`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await patchProfile(profileData.current!, { selected: newSelected });
|
await patchProfile(profileData.current!, { selected: newSelected });
|
||||||
@@ -212,18 +195,14 @@ export const useProfiles = () => {
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
profiles,
|
profiles,
|
||||||
hasProfiles,
|
current: profiles?.items?.find((p) => p && p.uid === profiles.current),
|
||||||
current: hasProfiles
|
|
||||||
? (profiles.items?.find((p) => p && p.uid === profiles.current) ?? null)
|
|
||||||
: null,
|
|
||||||
activateSelected,
|
activateSelected,
|
||||||
patchProfiles,
|
patchProfiles,
|
||||||
patchCurrent,
|
patchCurrent,
|
||||||
mutateProfiles,
|
mutateProfiles,
|
||||||
isLoading: isValidating || storeHydrating,
|
// 新增故障检测状态
|
||||||
isHydrating: storeHydrating,
|
isLoading: isValidating,
|
||||||
lastResult,
|
|
||||||
error,
|
error,
|
||||||
isStale: !hasProfiles && !error && !isValidating,
|
isStale: !profiles && !error && !isValidating, // 检测是否处于异常状态
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
import { listen } from "@tauri-apps/api/event";
|
import { listen } from "@tauri-apps/api/event";
|
||||||
import { getCurrentWebviewWindow } from "@tauri-apps/api/webviewWindow";
|
import { getCurrentWebviewWindow } from "@tauri-apps/api/webviewWindow";
|
||||||
import { useEffect } from "react";
|
import { useEffect } from "react";
|
||||||
|
import { mutate } from "swr";
|
||||||
|
|
||||||
import { useListen } from "@/hooks/use-listen";
|
import { useListen } from "@/hooks/use-listen";
|
||||||
import { refreshClashData, refreshVergeData } from "@/services/refresh";
|
import { getAxios } from "@/services/api";
|
||||||
|
|
||||||
export const useLayoutEvents = (
|
export const useLayoutEvents = (
|
||||||
handleNotice: (payload: [string, string]) => void,
|
handleNotice: (payload: [string, string]) => void,
|
||||||
) => {
|
) => {
|
||||||
@@ -35,32 +37,32 @@ export const useLayoutEvents = (
|
|||||||
.catch((error) => console.error("[事件监听] 注册失败", error));
|
.catch((error) => console.error("[事件监听] 注册失败", error));
|
||||||
};
|
};
|
||||||
|
|
||||||
register(
|
|
||||||
addListener("verge://notice-message", ({ payload }) =>
|
|
||||||
handleNotice(payload as [string, string]),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
register(
|
register(
|
||||||
addListener("verge://refresh-clash-config", async () => {
|
addListener("verge://refresh-clash-config", async () => {
|
||||||
try {
|
await getAxios(true);
|
||||||
await refreshClashData();
|
mutate("getProxies");
|
||||||
} catch (error) {
|
mutate("getVersion");
|
||||||
console.error("[事件监听] 刷新 Clash 配置失败", error);
|
mutate("getClashConfig");
|
||||||
}
|
mutate("getProxyProviders");
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
register(
|
register(
|
||||||
addListener("verge://refresh-verge-config", () => {
|
addListener("verge://refresh-verge-config", () => {
|
||||||
try {
|
mutate("getVergeConfig");
|
||||||
refreshVergeData();
|
mutate("getSystemProxy");
|
||||||
} catch (error) {
|
mutate("getAutotemProxy");
|
||||||
console.error("[事件监听] 刷新 Verge 配置失败", error);
|
mutate("getRunningMode");
|
||||||
}
|
mutate("isServiceAvailable");
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
register(
|
||||||
|
addListener("verge://notice-message", ({ payload }) =>
|
||||||
|
handleNotice(payload as [string, string]),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
const appWindow = getCurrentWebviewWindow();
|
const appWindow = getCurrentWebviewWindow();
|
||||||
register(
|
register(
|
||||||
(async () => {
|
(async () => {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -6,15 +6,8 @@ import {
|
|||||||
RuleProvider,
|
RuleProvider,
|
||||||
} from "tauri-plugin-mihomo-api";
|
} from "tauri-plugin-mihomo-api";
|
||||||
|
|
||||||
import { ProxiesView, type ProfileSwitchStatus } from "@/services/cmds";
|
|
||||||
|
|
||||||
export interface AppDataContextType {
|
export interface AppDataContextType {
|
||||||
proxies: ProxiesView | null;
|
proxies: any;
|
||||||
proxyHydration: "none" | "snapshot" | "live";
|
|
||||||
proxyTargetProfileId: string | null;
|
|
||||||
proxyDisplayProfileId: string | null;
|
|
||||||
isProxyRefreshPending: boolean;
|
|
||||||
switchStatus: ProfileSwitchStatus | null;
|
|
||||||
clashConfig: BaseConfig;
|
clashConfig: BaseConfig;
|
||||||
rules: Rule[];
|
rules: Rule[];
|
||||||
sysproxy: any;
|
sysproxy: any;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import { listen } from "@tauri-apps/api/event";
|
import { listen } from "@tauri-apps/api/event";
|
||||||
import React, { useCallback, useEffect, useMemo, useRef } from "react";
|
import React, { useCallback, useEffect, useMemo } from "react";
|
||||||
import useSWR, { mutate as globalMutate } from "swr";
|
import useSWR from "swr";
|
||||||
import {
|
import {
|
||||||
getBaseConfig,
|
getBaseConfig,
|
||||||
getRuleProviders,
|
getRuleProviders,
|
||||||
@@ -9,53 +9,31 @@ import {
|
|||||||
|
|
||||||
import { useVerge } from "@/hooks/use-verge";
|
import { useVerge } from "@/hooks/use-verge";
|
||||||
import {
|
import {
|
||||||
|
calcuProxies,
|
||||||
calcuProxyProviders,
|
calcuProxyProviders,
|
||||||
getAppUptime,
|
getAppUptime,
|
||||||
getProfileSwitchStatus,
|
|
||||||
getProfileSwitchEvents,
|
|
||||||
getProfiles as fetchProfilesConfig,
|
|
||||||
getRunningMode,
|
getRunningMode,
|
||||||
readProfileFile,
|
|
||||||
getSystemProxy,
|
getSystemProxy,
|
||||||
type ProxiesView,
|
|
||||||
type ProfileSwitchStatus,
|
|
||||||
type SwitchResultStatus,
|
|
||||||
} from "@/services/cmds";
|
} from "@/services/cmds";
|
||||||
import { SWR_DEFAULTS, SWR_SLOW_POLL } from "@/services/config";
|
import { SWR_DEFAULTS, SWR_REALTIME, SWR_SLOW_POLL } from "@/services/config";
|
||||||
import { useProfileStore } from "@/stores/profile-store";
|
|
||||||
import {
|
|
||||||
applyLiveProxyPayload,
|
|
||||||
fetchLiveProxies,
|
|
||||||
type ProxiesUpdatedPayload,
|
|
||||||
useProxyStore,
|
|
||||||
} from "@/stores/proxy-store";
|
|
||||||
import { createProxySnapshotFromProfile } from "@/utils/proxy-snapshot";
|
|
||||||
|
|
||||||
import { AppDataContext, AppDataContextType } from "./app-data-context";
|
import { AppDataContext, AppDataContextType } from "./app-data-context";
|
||||||
|
|
||||||
// Global app data provider
|
// 全局数据提供者组件
|
||||||
export const AppDataProvider = ({
|
export const AppDataProvider = ({
|
||||||
children,
|
children,
|
||||||
}: {
|
}: {
|
||||||
children: React.ReactNode;
|
children: React.ReactNode;
|
||||||
}) => {
|
}) => {
|
||||||
const { verge } = useVerge();
|
const { verge } = useVerge();
|
||||||
const applyProfileSwitchResult = useProfileStore(
|
|
||||||
(state) => state.applySwitchResult,
|
const { data: proxiesData, mutate: refreshProxy } = useSWR(
|
||||||
);
|
"getProxies",
|
||||||
const commitProfileSnapshot = useProfileStore(
|
calcuProxies,
|
||||||
(state) => state.commitHydrated,
|
{
|
||||||
);
|
...SWR_REALTIME,
|
||||||
const setSwitchEventSeq = useProfileStore((state) => state.setLastEventSeq);
|
onError: (err) => console.warn("[DataProvider] Proxy fetch failed:", err),
|
||||||
const proxyView = useProxyStore((state) => state.data);
|
},
|
||||||
const proxyHydration = useProxyStore((state) => state.hydration);
|
|
||||||
const proxyProfileId = useProxyStore((state) => state.lastProfileId);
|
|
||||||
const pendingProxyProfileId = useProxyStore(
|
|
||||||
(state) => state.pendingProfileId,
|
|
||||||
);
|
|
||||||
const setProxySnapshot = useProxyStore((state) => state.setSnapshot);
|
|
||||||
const clearPendingProxyProfile = useProxyStore(
|
|
||||||
(state) => state.clearPendingProfile,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
const { data: clashConfig, mutate: refreshClashConfig } = useSWR(
|
const { data: clashConfig, mutate: refreshClashConfig } = useSWR(
|
||||||
@@ -82,259 +60,25 @@ export const AppDataProvider = ({
|
|||||||
SWR_DEFAULTS,
|
SWR_DEFAULTS,
|
||||||
);
|
);
|
||||||
|
|
||||||
const { data: switchStatus, mutate: mutateSwitchStatus } =
|
|
||||||
useSWR<ProfileSwitchStatus>(
|
|
||||||
"getProfileSwitchStatus",
|
|
||||||
getProfileSwitchStatus,
|
|
||||||
{
|
|
||||||
refreshInterval: (status) =>
|
|
||||||
status && (status.isSwitching || (status.queue?.length ?? 0) > 0)
|
|
||||||
? 400
|
|
||||||
: 4000,
|
|
||||||
dedupingInterval: 200,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
const isUnmountedRef = useRef(false);
|
|
||||||
// Keep track of pending timers so we can cancel them on unmount and avoid stray updates.
|
|
||||||
const scheduledTimeoutsRef = useRef<Set<number>>(new Set());
|
|
||||||
// Shared metadata to dedupe switch events coming from both polling and subscriptions.
|
|
||||||
const switchMetaRef = useRef<{
|
|
||||||
pendingProfileId: string | null;
|
|
||||||
lastResultTaskId: number | null;
|
|
||||||
}>({
|
|
||||||
pendingProfileId: null,
|
|
||||||
lastResultTaskId: null,
|
|
||||||
});
|
|
||||||
const switchEventSeqRef = useRef(0);
|
|
||||||
const profileChangeMetaRef = useRef<{
|
|
||||||
lastProfileId: string | null;
|
|
||||||
lastEventTs: number;
|
|
||||||
}>({
|
|
||||||
lastProfileId: null,
|
|
||||||
lastEventTs: 0,
|
|
||||||
});
|
|
||||||
const lastClashRefreshAtRef = useRef(0);
|
|
||||||
const PROFILE_EVENT_DEDUP_MS = 400;
|
|
||||||
const CLASH_REFRESH_DEDUP_MS = 300;
|
|
||||||
|
|
||||||
// Thin wrapper around setTimeout that no-ops once the provider unmounts.
|
|
||||||
const scheduleTimeout = useCallback(
|
|
||||||
(callback: () => void | Promise<void>, delay: number) => {
|
|
||||||
if (isUnmountedRef.current) return -1;
|
|
||||||
|
|
||||||
const timeoutId = window.setTimeout(() => {
|
|
||||||
scheduledTimeoutsRef.current.delete(timeoutId);
|
|
||||||
if (!isUnmountedRef.current) {
|
|
||||||
void callback();
|
|
||||||
}
|
|
||||||
}, delay);
|
|
||||||
|
|
||||||
scheduledTimeoutsRef.current.add(timeoutId);
|
|
||||||
return timeoutId;
|
|
||||||
},
|
|
||||||
[],
|
|
||||||
);
|
|
||||||
|
|
||||||
const clearAllTimeouts = useCallback(() => {
|
|
||||||
scheduledTimeoutsRef.current.forEach((timeoutId) =>
|
|
||||||
clearTimeout(timeoutId),
|
|
||||||
);
|
|
||||||
scheduledTimeoutsRef.current.clear();
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
// Delay live proxy refreshes slightly so we don't hammer Mihomo while a switch is still applying.
|
|
||||||
const queueProxyRefresh = useCallback(
|
|
||||||
(reason: string, delay = 1500) => {
|
|
||||||
scheduleTimeout(() => {
|
|
||||||
fetchLiveProxies().catch((error) =>
|
|
||||||
console.warn(
|
|
||||||
`[DataProvider] Proxy refresh failed (${reason}, fallback):`,
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
}, delay);
|
|
||||||
},
|
|
||||||
[scheduleTimeout],
|
|
||||||
);
|
|
||||||
// Prime the proxy store with the static selections from the profile YAML before live data arrives.
|
|
||||||
const seedProxySnapshot = useCallback(
|
|
||||||
async (profileId: string) => {
|
|
||||||
if (!profileId) return;
|
|
||||||
|
|
||||||
try {
|
|
||||||
const yamlContent = await readProfileFile(profileId);
|
|
||||||
const snapshot = createProxySnapshotFromProfile(yamlContent);
|
|
||||||
if (!snapshot) return;
|
|
||||||
|
|
||||||
setProxySnapshot(snapshot, profileId);
|
|
||||||
} catch (error) {
|
|
||||||
console.warn(
|
|
||||||
"[DataProvider] Failed to seed proxy snapshot from profile:",
|
|
||||||
error,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
[setProxySnapshot],
|
|
||||||
);
|
|
||||||
|
|
||||||
const handleSwitchResult = useCallback(
|
|
||||||
(result: SwitchResultStatus) => {
|
|
||||||
// Ignore duplicate notifications for the same switch execution.
|
|
||||||
const meta = switchMetaRef.current;
|
|
||||||
if (result.taskId === meta.lastResultTaskId) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
meta.lastResultTaskId = result.taskId;
|
|
||||||
|
|
||||||
// Optimistically update the SWR cache so the UI shows the new profile immediately.
|
|
||||||
void globalMutate(
|
|
||||||
"getProfiles",
|
|
||||||
(current?: IProfilesConfig | null) => {
|
|
||||||
if (!current || !result.success) {
|
|
||||||
return current;
|
|
||||||
}
|
|
||||||
if (current.current === result.profileId) {
|
|
||||||
return current;
|
|
||||||
}
|
|
||||||
return {
|
|
||||||
...current,
|
|
||||||
current: result.profileId,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
false,
|
|
||||||
);
|
|
||||||
|
|
||||||
applyProfileSwitchResult(result);
|
|
||||||
if (!result.success) {
|
|
||||||
clearPendingProxyProfile();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (result.success && result.cancelled !== true) {
|
|
||||||
// Once the backend settles, refresh all dependent data in the background.
|
|
||||||
scheduleTimeout(() => {
|
|
||||||
void Promise.allSettled([
|
|
||||||
fetchProfilesConfig().then((data) => {
|
|
||||||
commitProfileSnapshot(data);
|
|
||||||
globalMutate("getProfiles", data, false);
|
|
||||||
}),
|
|
||||||
fetchLiveProxies(),
|
|
||||||
refreshProxyProviders(),
|
|
||||||
refreshRules(),
|
|
||||||
refreshRuleProviders(),
|
|
||||||
]).catch((error) => {
|
|
||||||
console.warn(
|
|
||||||
"[DataProvider] Background refresh after profile switch failed:",
|
|
||||||
error,
|
|
||||||
);
|
|
||||||
});
|
|
||||||
}, 100);
|
|
||||||
}
|
|
||||||
|
|
||||||
void mutateSwitchStatus((current) => {
|
|
||||||
if (!current) {
|
|
||||||
return current;
|
|
||||||
}
|
|
||||||
const filteredQueue = current.queue.filter(
|
|
||||||
(task) => task.taskId !== result.taskId,
|
|
||||||
);
|
|
||||||
const active =
|
|
||||||
current.active && current.active.taskId === result.taskId
|
|
||||||
? null
|
|
||||||
: current.active;
|
|
||||||
const isSwitching = filteredQueue.length > 0;
|
|
||||||
return {
|
|
||||||
...current,
|
|
||||||
active,
|
|
||||||
queue: filteredQueue,
|
|
||||||
isSwitching,
|
|
||||||
lastResult: result,
|
|
||||||
};
|
|
||||||
}, false);
|
|
||||||
},
|
|
||||||
[
|
|
||||||
scheduleTimeout,
|
|
||||||
refreshProxyProviders,
|
|
||||||
refreshRules,
|
|
||||||
refreshRuleProviders,
|
|
||||||
mutateSwitchStatus,
|
|
||||||
applyProfileSwitchResult,
|
|
||||||
commitProfileSnapshot,
|
|
||||||
clearPendingProxyProfile,
|
|
||||||
],
|
|
||||||
);
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
isUnmountedRef.current = false;
|
let lastProfileId: string | null = null;
|
||||||
return () => {
|
let lastUpdateTime = 0;
|
||||||
isUnmountedRef.current = true;
|
const refreshThrottle = 800;
|
||||||
clearAllTimeouts();
|
|
||||||
};
|
|
||||||
}, [clearAllTimeouts]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
let isUnmounted = false;
|
||||||
if (!switchStatus) {
|
const scheduledTimeouts = new Set<number>();
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const meta = switchMetaRef.current;
|
|
||||||
const nextTarget =
|
|
||||||
switchStatus.active?.profileId ??
|
|
||||||
(switchStatus.queue.length > 0 ? switchStatus.queue[0].profileId : null);
|
|
||||||
|
|
||||||
if (nextTarget && nextTarget !== meta.pendingProfileId) {
|
|
||||||
meta.pendingProfileId = nextTarget;
|
|
||||||
void seedProxySnapshot(nextTarget);
|
|
||||||
} else if (!nextTarget) {
|
|
||||||
meta.pendingProfileId = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const lastResult = switchStatus.lastResult ?? null;
|
|
||||||
if (lastResult) {
|
|
||||||
handleSwitchResult(lastResult);
|
|
||||||
}
|
|
||||||
}, [switchStatus, seedProxySnapshot, handleSwitchResult]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
let disposed = false;
|
|
||||||
|
|
||||||
const pollEvents = async () => {
|
|
||||||
if (disposed) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
const events = await getProfileSwitchEvents(switchEventSeqRef.current);
|
|
||||||
if (events.length > 0) {
|
|
||||||
switchEventSeqRef.current = events[events.length - 1].sequence;
|
|
||||||
setSwitchEventSeq(switchEventSeqRef.current);
|
|
||||||
events.forEach((event) => handleSwitchResult(event.result));
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.warn("[DataProvider] Failed to poll switch events:", error);
|
|
||||||
} finally {
|
|
||||||
if (!disposed) {
|
|
||||||
const nextDelay =
|
|
||||||
switchStatus &&
|
|
||||||
(switchStatus.isSwitching || (switchStatus.queue?.length ?? 0) > 0)
|
|
||||||
? 250
|
|
||||||
: 1000;
|
|
||||||
scheduleTimeout(pollEvents, nextDelay);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
scheduleTimeout(pollEvents, 0);
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
disposed = true;
|
|
||||||
};
|
|
||||||
}, [scheduleTimeout, handleSwitchResult, switchStatus, setSwitchEventSeq]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
const cleanupFns: Array<() => void> = [];
|
const cleanupFns: Array<() => void> = [];
|
||||||
|
|
||||||
const registerCleanup = (fn: () => void) => {
|
const registerCleanup = (fn: () => void) => {
|
||||||
cleanupFns.push(fn);
|
if (isUnmounted) {
|
||||||
|
try {
|
||||||
|
fn();
|
||||||
|
} catch (error) {
|
||||||
|
console.error("[DataProvider] Immediate cleanup failed:", error);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cleanupFns.push(fn);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const addWindowListener = (eventName: string, handler: EventListener) => {
|
const addWindowListener = (eventName: string, handler: EventListener) => {
|
||||||
@@ -343,319 +87,140 @@ export const AppDataProvider = ({
|
|||||||
return () => window.removeEventListener(eventName, handler);
|
return () => window.removeEventListener(eventName, handler);
|
||||||
};
|
};
|
||||||
|
|
||||||
const runProfileChangedPipeline = (
|
const scheduleTimeout = (
|
||||||
profileId: string | null,
|
callback: () => void | Promise<void>,
|
||||||
source: "tauri" | "window",
|
delay: number,
|
||||||
) => {
|
) => {
|
||||||
|
if (isUnmounted) return -1;
|
||||||
|
|
||||||
|
const timeoutId = window.setTimeout(() => {
|
||||||
|
scheduledTimeouts.delete(timeoutId);
|
||||||
|
if (!isUnmounted) {
|
||||||
|
void callback();
|
||||||
|
}
|
||||||
|
}, delay);
|
||||||
|
|
||||||
|
scheduledTimeouts.add(timeoutId);
|
||||||
|
return timeoutId;
|
||||||
|
};
|
||||||
|
|
||||||
|
const clearAllTimeouts = () => {
|
||||||
|
scheduledTimeouts.forEach((timeoutId) => clearTimeout(timeoutId));
|
||||||
|
scheduledTimeouts.clear();
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleProfileChanged = (event: { payload: string }) => {
|
||||||
|
const newProfileId = event.payload;
|
||||||
const now = Date.now();
|
const now = Date.now();
|
||||||
const meta = profileChangeMetaRef.current;
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
meta.lastProfileId === profileId &&
|
lastProfileId === newProfileId &&
|
||||||
now - meta.lastEventTs < PROFILE_EVENT_DEDUP_MS
|
now - lastUpdateTime < refreshThrottle
|
||||||
) {
|
) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
meta.lastProfileId = profileId;
|
lastProfileId = newProfileId;
|
||||||
meta.lastEventTs = now;
|
lastUpdateTime = now;
|
||||||
|
|
||||||
if (profileId) {
|
|
||||||
void seedProxySnapshot(profileId);
|
|
||||||
}
|
|
||||||
|
|
||||||
queueProxyRefresh(`profile-changed-${source}`, 500);
|
|
||||||
|
|
||||||
scheduleTimeout(() => {
|
scheduleTimeout(() => {
|
||||||
void fetchProfilesConfig()
|
refreshRules().catch((error) =>
|
||||||
.then((data) => {
|
console.warn("[DataProvider] Rules refresh failed:", error),
|
||||||
commitProfileSnapshot(data);
|
|
||||||
globalMutate("getProfiles", data, false);
|
|
||||||
})
|
|
||||||
.catch((error) =>
|
|
||||||
console.warn(
|
|
||||||
"[AppDataProvider] Failed to refresh profiles after profile change:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
void refreshProxyProviders().catch((error) =>
|
|
||||||
console.warn(
|
|
||||||
"[AppDataProvider] Proxy providers refresh failed after profile change:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
);
|
||||||
void refreshRules().catch((error) =>
|
refreshRuleProviders().catch((error) =>
|
||||||
console.warn(
|
console.warn("[DataProvider] Rule providers refresh failed:", error),
|
||||||
"[AppDataProvider] Rules refresh failed after profile change:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
void refreshRuleProviders().catch((error) =>
|
|
||||||
console.warn(
|
|
||||||
"[AppDataProvider] Rule providers refresh failed after profile change:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
);
|
||||||
}, 200);
|
}, 200);
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleProfileChanged = (event: { payload: string }) => {
|
const handleRefreshClash = () => {
|
||||||
runProfileChangedPipeline(event.payload ?? null, "tauri");
|
|
||||||
};
|
|
||||||
|
|
||||||
const runRefreshClashPipeline = (source: "tauri" | "window") => {
|
|
||||||
const now = Date.now();
|
const now = Date.now();
|
||||||
if (now - lastClashRefreshAtRef.current < CLASH_REFRESH_DEDUP_MS) {
|
if (now - lastUpdateTime <= refreshThrottle) return;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
lastClashRefreshAtRef.current = now;
|
|
||||||
|
|
||||||
|
lastUpdateTime = now;
|
||||||
scheduleTimeout(() => {
|
scheduleTimeout(() => {
|
||||||
void refreshClashConfig().catch((error) =>
|
refreshProxy().catch((error) =>
|
||||||
console.warn(
|
console.error("[DataProvider] Proxy refresh failed:", error),
|
||||||
"[AppDataProvider] Clash config refresh failed after backend update:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
);
|
||||||
void refreshRules().catch((error) =>
|
}, 200);
|
||||||
console.warn(
|
|
||||||
"[AppDataProvider] Rules refresh failed after backend update:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
void refreshRuleProviders().catch((error) =>
|
|
||||||
console.warn(
|
|
||||||
"[AppDataProvider] Rule providers refresh failed after backend update:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
void refreshProxyProviders().catch((error) =>
|
|
||||||
console.warn(
|
|
||||||
"[AppDataProvider] Proxy providers refresh failed after backend update:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
}, 0);
|
|
||||||
|
|
||||||
queueProxyRefresh(`refresh-clash-config-${source}`, 400);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleProfileUpdateCompleted = (_: { payload: { uid: string } }) => {
|
const handleRefreshProxy = () => {
|
||||||
queueProxyRefresh("profile-update-completed", 3000);
|
const now = Date.now();
|
||||||
if (!isUnmountedRef.current) {
|
if (now - lastUpdateTime <= refreshThrottle) return;
|
||||||
scheduleTimeout(() => {
|
|
||||||
void refreshProxyProviders().catch((error) =>
|
|
||||||
console.warn(
|
|
||||||
"[DataProvider] Proxy providers refresh failed after profile update completed:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
}, 0);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const isProxiesPayload = (
|
lastUpdateTime = now;
|
||||||
value: unknown,
|
scheduleTimeout(() => {
|
||||||
): value is ProxiesUpdatedPayload => {
|
refreshProxy().catch((error) =>
|
||||||
if (!value || typeof value !== "object") {
|
console.warn("[DataProvider] Proxy refresh failed:", error),
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const candidate = value as Partial<ProxiesUpdatedPayload>;
|
|
||||||
return candidate.proxies !== undefined && candidate.proxies !== null;
|
|
||||||
};
|
|
||||||
|
|
||||||
const handleProxiesUpdatedPayload = (
|
|
||||||
rawPayload: unknown,
|
|
||||||
source: "tauri" | "window",
|
|
||||||
) => {
|
|
||||||
if (!isProxiesPayload(rawPayload)) {
|
|
||||||
console.warn(
|
|
||||||
`[AppDataProvider] Ignored ${source} proxies-updated payload`,
|
|
||||||
rawPayload,
|
|
||||||
);
|
);
|
||||||
queueProxyRefresh(`proxies-updated-${source}-invalid`, 500);
|
}, 200);
|
||||||
return;
|
};
|
||||||
|
|
||||||
|
const initializeListeners = async () => {
|
||||||
|
try {
|
||||||
|
const unlistenProfile = await listen<string>(
|
||||||
|
"profile-changed",
|
||||||
|
handleProfileChanged,
|
||||||
|
);
|
||||||
|
registerCleanup(unlistenProfile);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("[AppDataProvider] 监听 Profile 事件失败:", error);
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
applyLiveProxyPayload(rawPayload);
|
const unlistenClash = await listen(
|
||||||
} catch (error) {
|
"verge://refresh-clash-config",
|
||||||
console.warn(
|
handleRefreshClash,
|
||||||
`[AppDataProvider] Failed to apply ${source} proxies-updated payload`,
|
|
||||||
error,
|
|
||||||
);
|
);
|
||||||
queueProxyRefresh(`proxies-updated-${source}-apply-failed`, 500);
|
const unlistenProxy = await listen(
|
||||||
|
"verge://refresh-proxy-config",
|
||||||
|
handleRefreshProxy,
|
||||||
|
);
|
||||||
|
|
||||||
|
registerCleanup(() => {
|
||||||
|
unlistenClash();
|
||||||
|
unlistenProxy();
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.warn("[AppDataProvider] 设置 Tauri 事件监听器失败:", error);
|
||||||
|
|
||||||
|
const fallbackHandlers: Array<[string, EventListener]> = [
|
||||||
|
["verge://refresh-clash-config", handleRefreshClash],
|
||||||
|
["verge://refresh-proxy-config", handleRefreshProxy],
|
||||||
|
];
|
||||||
|
|
||||||
|
fallbackHandlers.forEach(([eventName, handler]) => {
|
||||||
|
registerCleanup(addWindowListener(eventName, handler));
|
||||||
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
listen<{ uid: string }>(
|
void initializeListeners();
|
||||||
"profile-update-completed",
|
|
||||||
handleProfileUpdateCompleted,
|
|
||||||
)
|
|
||||||
.then(registerCleanup)
|
|
||||||
.catch((error) =>
|
|
||||||
console.error(
|
|
||||||
"[AppDataProvider] failed to attach profile update listeners:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
listen<string>("profile-changed", handleProfileChanged)
|
|
||||||
.then(registerCleanup)
|
|
||||||
.catch((error) =>
|
|
||||||
console.error(
|
|
||||||
"[AppDataProvider] failed to attach profile-changed listener:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
listen<ProxiesUpdatedPayload>("proxies-updated", (event) => {
|
|
||||||
handleProxiesUpdatedPayload(event.payload, "tauri");
|
|
||||||
})
|
|
||||||
.then(registerCleanup)
|
|
||||||
.catch((error) =>
|
|
||||||
console.error(
|
|
||||||
"[AppDataProvider] failed to attach proxies-updated listener:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
listen("verge://refresh-clash-config", () => {
|
|
||||||
runRefreshClashPipeline("tauri");
|
|
||||||
})
|
|
||||||
.then(registerCleanup)
|
|
||||||
.catch((error) =>
|
|
||||||
console.error(
|
|
||||||
"[AppDataProvider] failed to attach refresh-clash-config listener:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
listen("verge://refresh-proxy-config", () => {
|
|
||||||
queueProxyRefresh("refresh-proxy-config-tauri", 500);
|
|
||||||
})
|
|
||||||
.then(registerCleanup)
|
|
||||||
.catch((error) =>
|
|
||||||
console.error(
|
|
||||||
"[AppDataProvider] failed to attach refresh-proxy-config listener:",
|
|
||||||
error,
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
const fallbackHandlers: Array<[string, EventListener]> = [
|
|
||||||
[
|
|
||||||
"profile-update-completed",
|
|
||||||
((event: Event) => {
|
|
||||||
const payload = (event as CustomEvent<{ uid: string }>).detail ?? {
|
|
||||||
uid: "",
|
|
||||||
};
|
|
||||||
handleProfileUpdateCompleted({ payload });
|
|
||||||
}) as EventListener,
|
|
||||||
],
|
|
||||||
[
|
|
||||||
"profile-changed",
|
|
||||||
((event: Event) => {
|
|
||||||
const payload = (event as CustomEvent<string | null>).detail ?? null;
|
|
||||||
runProfileChangedPipeline(payload, "window");
|
|
||||||
}) as EventListener,
|
|
||||||
],
|
|
||||||
[
|
|
||||||
"proxies-updated",
|
|
||||||
((event: Event) => {
|
|
||||||
const payload = (event as CustomEvent<ProxiesUpdatedPayload>).detail;
|
|
||||||
handleProxiesUpdatedPayload(payload, "window");
|
|
||||||
}) as EventListener,
|
|
||||||
],
|
|
||||||
[
|
|
||||||
"verge://refresh-clash-config",
|
|
||||||
(() => {
|
|
||||||
runRefreshClashPipeline("window");
|
|
||||||
}) as EventListener,
|
|
||||||
],
|
|
||||||
[
|
|
||||||
"verge://refresh-proxy-config",
|
|
||||||
(() => {
|
|
||||||
queueProxyRefresh("refresh-proxy-config-window", 500);
|
|
||||||
}) as EventListener,
|
|
||||||
],
|
|
||||||
];
|
|
||||||
|
|
||||||
fallbackHandlers.forEach(([eventName, handler]) => {
|
|
||||||
registerCleanup(addWindowListener(eventName, handler));
|
|
||||||
});
|
|
||||||
|
|
||||||
return () => {
|
return () => {
|
||||||
cleanupFns.forEach((fn) => {
|
isUnmounted = true;
|
||||||
|
clearAllTimeouts();
|
||||||
|
|
||||||
|
const errors: Error[] = [];
|
||||||
|
cleanupFns.splice(0).forEach((fn) => {
|
||||||
try {
|
try {
|
||||||
fn();
|
fn();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("[AppDataProvider] cleanup error:", error);
|
errors.push(
|
||||||
|
error instanceof Error ? error : new Error(String(error)),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if (errors.length > 0) {
|
||||||
|
console.error(
|
||||||
|
`[DataProvider] ${errors.length} errors during cleanup:`,
|
||||||
|
errors,
|
||||||
|
);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}, [
|
}, [refreshProxy, refreshRules, refreshRuleProviders]);
|
||||||
commitProfileSnapshot,
|
|
||||||
queueProxyRefresh,
|
|
||||||
refreshClashConfig,
|
|
||||||
refreshProxyProviders,
|
|
||||||
refreshRuleProviders,
|
|
||||||
refreshRules,
|
|
||||||
scheduleTimeout,
|
|
||||||
seedProxySnapshot,
|
|
||||||
]);
|
|
||||||
|
|
||||||
const switchTargetProfileId =
|
|
||||||
switchStatus?.active?.profileId ??
|
|
||||||
(switchStatus && switchStatus.queue.length > 0
|
|
||||||
? switchStatus.queue[0].profileId
|
|
||||||
: null);
|
|
||||||
|
|
||||||
const proxyTargetProfileId =
|
|
||||||
switchTargetProfileId ?? pendingProxyProfileId ?? proxyProfileId ?? null;
|
|
||||||
const displayProxyStateRef = useRef<{
|
|
||||||
view: ProxiesView | null;
|
|
||||||
profileId: string | null;
|
|
||||||
}>({
|
|
||||||
view: proxyView,
|
|
||||||
profileId: proxyTargetProfileId,
|
|
||||||
});
|
|
||||||
|
|
||||||
const currentDisplay = displayProxyStateRef.current;
|
|
||||||
|
|
||||||
if (!proxyView) {
|
|
||||||
if (
|
|
||||||
currentDisplay.view !== null ||
|
|
||||||
currentDisplay.profileId !== proxyTargetProfileId
|
|
||||||
) {
|
|
||||||
displayProxyStateRef.current = {
|
|
||||||
view: null,
|
|
||||||
profileId: proxyTargetProfileId,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} else if (proxyHydration === "live") {
|
|
||||||
if (
|
|
||||||
currentDisplay.view !== proxyView ||
|
|
||||||
currentDisplay.profileId !== proxyTargetProfileId
|
|
||||||
) {
|
|
||||||
displayProxyStateRef.current = {
|
|
||||||
view: proxyView,
|
|
||||||
profileId: proxyTargetProfileId,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} else if (!currentDisplay.view) {
|
|
||||||
displayProxyStateRef.current = {
|
|
||||||
view: proxyView,
|
|
||||||
profileId: proxyTargetProfileId,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
const displayProxyState = displayProxyStateRef.current;
|
|
||||||
const proxyDisplayProfileId = displayProxyState.profileId;
|
|
||||||
const proxiesForRender = displayProxyState.view ?? proxyView;
|
|
||||||
const isProxyRefreshPending =
|
|
||||||
(switchStatus?.isSwitching ?? false) ||
|
|
||||||
proxyHydration !== "live" ||
|
|
||||||
proxyTargetProfileId !== proxyDisplayProfileId;
|
|
||||||
|
|
||||||
const { data: sysproxy, mutate: refreshSysproxy } = useSWR(
|
const { data: sysproxy, mutate: refreshSysproxy } = useSWR(
|
||||||
"getSystemProxy",
|
"getSystemProxy",
|
||||||
@@ -675,10 +240,10 @@ export const AppDataProvider = ({
|
|||||||
errorRetryCount: 1,
|
errorRetryCount: 1,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Provide unified refresh method
|
// 提供统一的刷新方法
|
||||||
const refreshAll = useCallback(async () => {
|
const refreshAll = useCallback(async () => {
|
||||||
await Promise.all([
|
await Promise.all([
|
||||||
fetchLiveProxies(),
|
refreshProxy(),
|
||||||
refreshClashConfig(),
|
refreshClashConfig(),
|
||||||
refreshRules(),
|
refreshRules(),
|
||||||
refreshSysproxy(),
|
refreshSysproxy(),
|
||||||
@@ -686,6 +251,7 @@ export const AppDataProvider = ({
|
|||||||
refreshRuleProviders(),
|
refreshRuleProviders(),
|
||||||
]);
|
]);
|
||||||
}, [
|
}, [
|
||||||
|
refreshProxy,
|
||||||
refreshClashConfig,
|
refreshClashConfig,
|
||||||
refreshRules,
|
refreshRules,
|
||||||
refreshSysproxy,
|
refreshSysproxy,
|
||||||
@@ -693,22 +259,22 @@ export const AppDataProvider = ({
|
|||||||
refreshRuleProviders,
|
refreshRuleProviders,
|
||||||
]);
|
]);
|
||||||
|
|
||||||
// Aggregate data into context value
|
// 聚合所有数据
|
||||||
const value = useMemo(() => {
|
const value = useMemo(() => {
|
||||||
// Compute the system proxy address
|
// 计算系统代理地址
|
||||||
const calculateSystemProxyAddress = () => {
|
const calculateSystemProxyAddress = () => {
|
||||||
if (!verge || !clashConfig) return "-";
|
if (!verge || !clashConfig) return "-";
|
||||||
|
|
||||||
const isPacMode = verge.proxy_auto_config ?? false;
|
const isPacMode = verge.proxy_auto_config ?? false;
|
||||||
|
|
||||||
if (isPacMode) {
|
if (isPacMode) {
|
||||||
// PAC mode: display the desired proxy address
|
// PAC模式:显示我们期望设置的代理地址
|
||||||
const proxyHost = verge.proxy_host || "127.0.0.1";
|
const proxyHost = verge.proxy_host || "127.0.0.1";
|
||||||
const proxyPort =
|
const proxyPort =
|
||||||
verge.verge_mixed_port || clashConfig.mixedPort || 7897;
|
verge.verge_mixed_port || clashConfig.mixedPort || 7897;
|
||||||
return `${proxyHost}:${proxyPort}`;
|
return `${proxyHost}:${proxyPort}`;
|
||||||
} else {
|
} else {
|
||||||
// HTTP proxy mode: prefer system address, fallback to desired address if invalid
|
// HTTP代理模式:优先使用系统地址,但如果格式不正确则使用期望地址
|
||||||
const systemServer = sysproxy?.server;
|
const systemServer = sysproxy?.server;
|
||||||
if (
|
if (
|
||||||
systemServer &&
|
systemServer &&
|
||||||
@@ -717,7 +283,7 @@ export const AppDataProvider = ({
|
|||||||
) {
|
) {
|
||||||
return systemServer;
|
return systemServer;
|
||||||
} else {
|
} else {
|
||||||
// System address invalid: fallback to desired proxy address
|
// 系统地址无效,返回期望的代理地址
|
||||||
const proxyHost = verge.proxy_host || "127.0.0.1";
|
const proxyHost = verge.proxy_host || "127.0.0.1";
|
||||||
const proxyPort =
|
const proxyPort =
|
||||||
verge.verge_mixed_port || clashConfig.mixedPort || 7897;
|
verge.verge_mixed_port || clashConfig.mixedPort || 7897;
|
||||||
@@ -727,27 +293,22 @@ export const AppDataProvider = ({
|
|||||||
};
|
};
|
||||||
|
|
||||||
return {
|
return {
|
||||||
// Data
|
// 数据
|
||||||
proxies: proxiesForRender,
|
proxies: proxiesData,
|
||||||
proxyHydration,
|
|
||||||
proxyTargetProfileId,
|
|
||||||
proxyDisplayProfileId,
|
|
||||||
isProxyRefreshPending,
|
|
||||||
switchStatus: switchStatus ?? null,
|
|
||||||
clashConfig,
|
clashConfig,
|
||||||
rules: rulesData?.rules || [],
|
rules: rulesData?.rules || [],
|
||||||
sysproxy,
|
sysproxy,
|
||||||
runningMode,
|
runningMode,
|
||||||
uptime: uptimeData || 0,
|
uptime: uptimeData || 0,
|
||||||
|
|
||||||
// Provider data
|
// 提供者数据
|
||||||
proxyProviders: proxyProviders || {},
|
proxyProviders: proxyProviders || {},
|
||||||
ruleProviders: ruleProviders?.providers || {},
|
ruleProviders: ruleProviders?.providers || {},
|
||||||
|
|
||||||
systemProxyAddress: calculateSystemProxyAddress(),
|
systemProxyAddress: calculateSystemProxyAddress(),
|
||||||
|
|
||||||
// Refresh helpers
|
// 刷新方法
|
||||||
refreshProxy: fetchLiveProxies,
|
refreshProxy,
|
||||||
refreshClashConfig,
|
refreshClashConfig,
|
||||||
refreshRules,
|
refreshRules,
|
||||||
refreshSysproxy,
|
refreshSysproxy,
|
||||||
@@ -756,12 +317,7 @@ export const AppDataProvider = ({
|
|||||||
refreshAll,
|
refreshAll,
|
||||||
} as AppDataContextType;
|
} as AppDataContextType;
|
||||||
}, [
|
}, [
|
||||||
proxiesForRender,
|
proxiesData,
|
||||||
proxyHydration,
|
|
||||||
proxyTargetProfileId,
|
|
||||||
proxyDisplayProfileId,
|
|
||||||
isProxyRefreshPending,
|
|
||||||
switchStatus,
|
|
||||||
clashConfig,
|
clashConfig,
|
||||||
rulesData,
|
rulesData,
|
||||||
sysproxy,
|
sysproxy,
|
||||||
@@ -770,6 +326,7 @@ export const AppDataProvider = ({
|
|||||||
proxyProviders,
|
proxyProviders,
|
||||||
ruleProviders,
|
ruleProviders,
|
||||||
verge,
|
verge,
|
||||||
|
refreshProxy,
|
||||||
refreshClashConfig,
|
refreshClashConfig,
|
||||||
refreshRules,
|
refreshRules,
|
||||||
refreshSysproxy,
|
refreshSysproxy,
|
||||||
|
|||||||
@@ -4,52 +4,6 @@ import { getProxies, getProxyProviders } from "tauri-plugin-mihomo-api";
|
|||||||
|
|
||||||
import { showNotice } from "@/services/noticeService";
|
import { showNotice } from "@/services/noticeService";
|
||||||
|
|
||||||
export type ProxyProviderRecord = Record<
|
|
||||||
string,
|
|
||||||
IProxyProviderItem | undefined
|
|
||||||
>;
|
|
||||||
|
|
||||||
export interface SwitchTaskStatus {
|
|
||||||
taskId: number;
|
|
||||||
profileId: string;
|
|
||||||
notify: boolean;
|
|
||||||
stage?: number | null;
|
|
||||||
queued: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface SwitchResultStatus {
|
|
||||||
taskId: number;
|
|
||||||
profileId: string;
|
|
||||||
success: boolean;
|
|
||||||
cancelled?: boolean;
|
|
||||||
finishedAt: number;
|
|
||||||
errorStage?: string | null;
|
|
||||||
errorDetail?: string | null;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ProfileSwitchStatus {
|
|
||||||
isSwitching: boolean;
|
|
||||||
active?: SwitchTaskStatus | null;
|
|
||||||
queue: SwitchTaskStatus[];
|
|
||||||
cleanupProfiles: string[];
|
|
||||||
lastResult?: SwitchResultStatus | null;
|
|
||||||
lastUpdated: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface SwitchResultEvent {
|
|
||||||
sequence: number;
|
|
||||||
result: SwitchResultStatus;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Persist the last proxy provider payload so UI can render while waiting on Mihomo.
|
|
||||||
let cachedProxyProviders: ProxyProviderRecord | null = null;
|
|
||||||
|
|
||||||
export const getCachedProxyProviders = () => cachedProxyProviders;
|
|
||||||
|
|
||||||
export const setCachedProxyProviders = (record: ProxyProviderRecord | null) => {
|
|
||||||
cachedProxyProviders = record;
|
|
||||||
};
|
|
||||||
|
|
||||||
export async function copyClashEnv() {
|
export async function copyClashEnv() {
|
||||||
return invoke<void>("copy_clash_env");
|
return invoke<void>("copy_clash_env");
|
||||||
}
|
}
|
||||||
@@ -66,14 +20,6 @@ export async function patchProfilesConfig(profiles: IProfilesConfig) {
|
|||||||
return invoke<void>("patch_profiles_config", { profiles });
|
return invoke<void>("patch_profiles_config", { profiles });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Triggers the async state-machine driven switch flow on the backend.
|
|
||||||
export async function switchProfileCommand(
|
|
||||||
profileIndex: string,
|
|
||||||
notifySuccess: boolean,
|
|
||||||
) {
|
|
||||||
return invoke<boolean>("switch_profile", { profileIndex, notifySuccess });
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function createProfile(
|
export async function createProfile(
|
||||||
item: Partial<IProfileItem>,
|
item: Partial<IProfileItem>,
|
||||||
fileData?: string | null,
|
fileData?: string | null,
|
||||||
@@ -167,29 +113,27 @@ export async function syncTrayProxySelection() {
|
|||||||
return invoke<void>("sync_tray_proxy_selection");
|
return invoke<void>("sync_tray_proxy_selection");
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ProxiesView {
|
export async function calcuProxies(): Promise<{
|
||||||
global: IProxyGroupItem;
|
global: IProxyGroupItem;
|
||||||
direct: IProxyItem;
|
direct: IProxyItem;
|
||||||
groups: IProxyGroupItem[];
|
groups: IProxyGroupItem[];
|
||||||
records: Record<string, IProxyItem>;
|
records: Record<string, IProxyItem>;
|
||||||
proxies: IProxyItem[];
|
proxies: IProxyItem[];
|
||||||
}
|
}> {
|
||||||
|
const [proxyResponse, providerResponse] = await Promise.all([
|
||||||
|
getProxies(),
|
||||||
|
calcuProxyProviders(),
|
||||||
|
]);
|
||||||
|
|
||||||
export function buildProxyView(
|
|
||||||
proxyResponse: Awaited<ReturnType<typeof getProxies>>,
|
|
||||||
providerRecord?: ProxyProviderRecord | null,
|
|
||||||
): ProxiesView {
|
|
||||||
const proxyRecord = proxyResponse.proxies;
|
const proxyRecord = proxyResponse.proxies;
|
||||||
|
const providerRecord = providerResponse;
|
||||||
|
|
||||||
// provider name map
|
// provider name map
|
||||||
const providerMap = providerRecord
|
const providerMap = Object.fromEntries(
|
||||||
? Object.fromEntries(
|
Object.entries(providerRecord).flatMap(([provider, item]) =>
|
||||||
Object.entries(providerRecord).flatMap(([provider, item]) => {
|
item!.proxies.map((p) => [p.name, { ...p, provider }]),
|
||||||
if (!item) return [];
|
),
|
||||||
return item.proxies.map((p) => [p.name, { ...p, provider }]);
|
);
|
||||||
}),
|
|
||||||
)
|
|
||||||
: {};
|
|
||||||
|
|
||||||
// compatible with proxy-providers
|
// compatible with proxy-providers
|
||||||
const generateItem = (name: string) => {
|
const generateItem = (name: string) => {
|
||||||
@@ -263,56 +207,16 @@ export function buildProxyView(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function calcuProxies(): Promise<ProxiesView> {
|
|
||||||
const proxyResponse = await getProxies();
|
|
||||||
|
|
||||||
let providerRecord = cachedProxyProviders;
|
|
||||||
if (!providerRecord) {
|
|
||||||
try {
|
|
||||||
providerRecord = await calcuProxyProviders();
|
|
||||||
} catch (error) {
|
|
||||||
console.warn("[calcuProxies] 代理提供者加载失败:", error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buildProxyView(proxyResponse, providerRecord);
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function calcuProxyProviders() {
|
export async function calcuProxyProviders() {
|
||||||
const providers = await getProxyProviders();
|
const providers = await getProxyProviders();
|
||||||
const mappedEntries = Object.entries(providers.providers)
|
return Object.fromEntries(
|
||||||
.sort()
|
Object.entries(providers.providers)
|
||||||
.filter(
|
.sort()
|
||||||
([, item]) =>
|
.filter(
|
||||||
item?.vehicleType === "HTTP" || item?.vehicleType === "File",
|
([_, item]) =>
|
||||||
)
|
item?.vehicleType === "HTTP" || item?.vehicleType === "File",
|
||||||
.map(([name, item]) => {
|
),
|
||||||
if (!item) return [name, undefined] as const;
|
);
|
||||||
|
|
||||||
const subscriptionInfo =
|
|
||||||
item.subscriptionInfo && typeof item.subscriptionInfo === "object"
|
|
||||||
? {
|
|
||||||
Upload: item.subscriptionInfo.Upload ?? 0,
|
|
||||||
Download: item.subscriptionInfo.Download ?? 0,
|
|
||||||
Total: item.subscriptionInfo.Total ?? 0,
|
|
||||||
Expire: item.subscriptionInfo.Expire ?? 0,
|
|
||||||
}
|
|
||||||
: undefined;
|
|
||||||
|
|
||||||
const normalized: IProxyProviderItem = {
|
|
||||||
name: item.name,
|
|
||||||
type: item.type,
|
|
||||||
proxies: item.proxies ?? [],
|
|
||||||
updatedAt: item.updatedAt ?? "",
|
|
||||||
vehicleType: item.vehicleType ?? "",
|
|
||||||
subscriptionInfo,
|
|
||||||
};
|
|
||||||
return [name, normalized] as const;
|
|
||||||
});
|
|
||||||
|
|
||||||
const mapped = Object.fromEntries(mappedEntries) as ProxyProviderRecord;
|
|
||||||
cachedProxyProviders = mapped;
|
|
||||||
return mapped;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function getClashLogs() {
|
export async function getClashLogs() {
|
||||||
@@ -651,13 +555,3 @@ export const isAdmin = async () => {
|
|||||||
export async function getNextUpdateTime(uid: string) {
|
export async function getNextUpdateTime(uid: string) {
|
||||||
return invoke<number | null>("get_next_update_time", { uid });
|
return invoke<number | null>("get_next_update_time", { uid });
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function getProfileSwitchStatus() {
|
|
||||||
return invoke<ProfileSwitchStatus>("get_profile_switch_status");
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function getProfileSwitchEvents(afterSequence: number) {
|
|
||||||
return invoke<SwitchResultEvent[]>("get_profile_switch_events", {
|
|
||||||
afterSequence,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,20 +14,10 @@ let nextId = 0;
|
|||||||
let notices: NoticeItem[] = [];
|
let notices: NoticeItem[] = [];
|
||||||
const listeners: Set<Listener> = new Set();
|
const listeners: Set<Listener> = new Set();
|
||||||
|
|
||||||
function flushListeners() {
|
function notifyListeners() {
|
||||||
listeners.forEach((listener) => listener([...notices])); // Pass a copy
|
listeners.forEach((listener) => listener([...notices])); // Pass a copy
|
||||||
}
|
}
|
||||||
|
|
||||||
let notifyScheduled = false;
|
|
||||||
function scheduleNotify() {
|
|
||||||
if (notifyScheduled) return;
|
|
||||||
notifyScheduled = true;
|
|
||||||
requestAnimationFrame(() => {
|
|
||||||
notifyScheduled = false;
|
|
||||||
flushListeners();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shows a notification.
|
// Shows a notification.
|
||||||
|
|
||||||
export function showNotice(
|
export function showNotice(
|
||||||
@@ -54,7 +44,7 @@ export function showNotice(
|
|||||||
}
|
}
|
||||||
|
|
||||||
notices = [...notices, newNotice];
|
notices = [...notices, newNotice];
|
||||||
scheduleNotify();
|
notifyListeners();
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,7 +56,7 @@ export function hideNotice(id: number) {
|
|||||||
clearTimeout(notice.timerId); // Clear timeout if manually closed
|
clearTimeout(notice.timerId); // Clear timeout if manually closed
|
||||||
}
|
}
|
||||||
notices = notices.filter((n) => n.id !== id);
|
notices = notices.filter((n) => n.id !== id);
|
||||||
scheduleNotify();
|
notifyListeners();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subscribes a listener function to notice state changes.
|
// Subscribes a listener function to notice state changes.
|
||||||
@@ -87,5 +77,5 @@ export function clearAllNotices() {
|
|||||||
if (n.timerId) clearTimeout(n.timerId);
|
if (n.timerId) clearTimeout(n.timerId);
|
||||||
});
|
});
|
||||||
notices = [];
|
notices = [];
|
||||||
scheduleNotify();
|
notifyListeners();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,24 +0,0 @@
|
|||||||
import { mutate } from "swr";
|
|
||||||
|
|
||||||
import { getAxios } from "@/services/api";
|
|
||||||
|
|
||||||
export const refreshClashData = async () => {
|
|
||||||
try {
|
|
||||||
await getAxios(true);
|
|
||||||
} catch (error) {
|
|
||||||
console.warn("[Refresh] getAxios failed during clash refresh:", error);
|
|
||||||
}
|
|
||||||
|
|
||||||
mutate("getProxies");
|
|
||||||
mutate("getVersion");
|
|
||||||
mutate("getClashConfig");
|
|
||||||
mutate("getProxyProviders");
|
|
||||||
};
|
|
||||||
|
|
||||||
export const refreshVergeData = () => {
|
|
||||||
mutate("getVergeConfig");
|
|
||||||
mutate("getSystemProxy");
|
|
||||||
mutate("getAutotemProxy");
|
|
||||||
mutate("getRunningMode");
|
|
||||||
mutate("isServiceAvailable");
|
|
||||||
};
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
import { create } from "zustand";
|
|
||||||
|
|
||||||
import type { SwitchResultStatus } from "@/services/cmds";
|
|
||||||
|
|
||||||
interface ProfileStoreState {
|
|
||||||
data: IProfilesConfig | null;
|
|
||||||
optimisticCurrent: string | null;
|
|
||||||
isHydrating: boolean;
|
|
||||||
lastEventSeq: number;
|
|
||||||
lastResult: SwitchResultStatus | null;
|
|
||||||
applySwitchResult: (result: SwitchResultStatus) => void;
|
|
||||||
commitHydrated: (data: IProfilesConfig) => void;
|
|
||||||
setLastEventSeq: (sequence: number) => void;
|
|
||||||
}
|
|
||||||
|
|
||||||
export const useProfileStore = create<ProfileStoreState>((set) => ({
|
|
||||||
data: null,
|
|
||||||
optimisticCurrent: null,
|
|
||||||
isHydrating: false,
|
|
||||||
lastEventSeq: 0,
|
|
||||||
lastResult: null,
|
|
||||||
applySwitchResult(result) {
|
|
||||||
// Record the optimistic switch outcome so the UI reflects the desired profile immediately.
|
|
||||||
set((state) => ({
|
|
||||||
lastResult: result,
|
|
||||||
optimisticCurrent: result.success ? result.profileId : null,
|
|
||||||
isHydrating: result.success ? true : state.isHydrating,
|
|
||||||
}));
|
|
||||||
},
|
|
||||||
commitHydrated(data) {
|
|
||||||
set({
|
|
||||||
data,
|
|
||||||
optimisticCurrent: null,
|
|
||||||
isHydrating: false,
|
|
||||||
});
|
|
||||||
},
|
|
||||||
setLastEventSeq(sequence) {
|
|
||||||
set({ lastEventSeq: sequence });
|
|
||||||
},
|
|
||||||
}));
|
|
||||||
|
|
||||||
export const selectEffectiveProfiles = (state: ProfileStoreState) => {
|
|
||||||
if (!state.data) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
// Prefer the optimistic selection while hydration is pending.
|
|
||||||
const current = state.optimisticCurrent ?? state.data.current;
|
|
||||||
if (
|
|
||||||
state.optimisticCurrent &&
|
|
||||||
state.optimisticCurrent !== state.data.current
|
|
||||||
) {
|
|
||||||
return { ...state.data, current } as IProfilesConfig;
|
|
||||||
}
|
|
||||||
return state.data;
|
|
||||||
};
|
|
||||||
|
|
||||||
export const selectIsHydrating = (state: ProfileStoreState) =>
|
|
||||||
state.isHydrating;
|
|
||||||
export const selectLastResult = (state: ProfileStoreState) => state.lastResult;
|
|
||||||
@@ -1,298 +0,0 @@
|
|||||||
import type { getProxies } from "tauri-plugin-mihomo-api";
|
|
||||||
import { create } from "zustand";
|
|
||||||
|
|
||||||
import {
|
|
||||||
ProxiesView,
|
|
||||||
ProxyProviderRecord,
|
|
||||||
buildProxyView,
|
|
||||||
calcuProxies,
|
|
||||||
getCachedProxyProviders,
|
|
||||||
setCachedProxyProviders,
|
|
||||||
} from "@/services/cmds";
|
|
||||||
import { AsyncEventQueue, nextTick } from "@/utils/asyncQueue";
|
|
||||||
|
|
||||||
type ProxyHydration = "none" | "snapshot" | "live";
|
|
||||||
type RawProxiesResponse = Awaited<ReturnType<typeof getProxies>>;
|
|
||||||
|
|
||||||
export interface ProxiesUpdatedPayload {
|
|
||||||
proxies: RawProxiesResponse;
|
|
||||||
providers?: Record<string, unknown> | null;
|
|
||||||
emittedAt?: number;
|
|
||||||
profileId?: string | null;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface ProxyStoreState {
|
|
||||||
data: ProxiesView | null;
|
|
||||||
hydration: ProxyHydration;
|
|
||||||
lastUpdated: number | null;
|
|
||||||
lastProfileId: string | null;
|
|
||||||
liveFetchRequestId: number;
|
|
||||||
lastAppliedFetchId: number;
|
|
||||||
pendingProfileId: string | null;
|
|
||||||
pendingSnapshotFetchId: number | null;
|
|
||||||
setSnapshot: (snapshot: ProxiesView, profileId: string) => void;
|
|
||||||
setLive: (payload: ProxiesUpdatedPayload) => void;
|
|
||||||
startLiveFetch: () => number;
|
|
||||||
completeLiveFetch: (requestId: number, view: ProxiesView) => void;
|
|
||||||
clearPendingProfile: () => void;
|
|
||||||
reset: () => void;
|
|
||||||
}
|
|
||||||
|
|
||||||
const normalizeProviderPayload = (
|
|
||||||
raw: ProxiesUpdatedPayload["providers"],
|
|
||||||
): ProxyProviderRecord | null => {
|
|
||||||
if (!raw || typeof raw !== "object") return null;
|
|
||||||
|
|
||||||
const rawRecord = raw as Record<string, any>;
|
|
||||||
const source =
|
|
||||||
rawRecord.providers && typeof rawRecord.providers === "object"
|
|
||||||
? (rawRecord.providers as Record<string, any>)
|
|
||||||
: rawRecord;
|
|
||||||
|
|
||||||
const entries = Object.entries(source)
|
|
||||||
.sort(([a], [b]) => a.localeCompare(b))
|
|
||||||
.filter(([, value]) => {
|
|
||||||
if (!value || typeof value !== "object") {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const vehicleType = value.vehicleType;
|
|
||||||
return vehicleType === "HTTP" || vehicleType === "File";
|
|
||||||
})
|
|
||||||
.map(([name, value]) => {
|
|
||||||
const normalized: IProxyProviderItem = {
|
|
||||||
name: value.name ?? name,
|
|
||||||
type: value.type ?? "",
|
|
||||||
proxies: Array.isArray(value.proxies) ? value.proxies : [],
|
|
||||||
updatedAt: value.updatedAt ?? "",
|
|
||||||
vehicleType: value.vehicleType ?? "",
|
|
||||||
subscriptionInfo:
|
|
||||||
value.subscriptionInfo && typeof value.subscriptionInfo === "object"
|
|
||||||
? {
|
|
||||||
Upload: Number(value.subscriptionInfo.Upload ?? 0),
|
|
||||||
Download: Number(value.subscriptionInfo.Download ?? 0),
|
|
||||||
Total: Number(value.subscriptionInfo.Total ?? 0),
|
|
||||||
Expire: Number(value.subscriptionInfo.Expire ?? 0),
|
|
||||||
}
|
|
||||||
: undefined,
|
|
||||||
};
|
|
||||||
|
|
||||||
return [name, normalized] as const;
|
|
||||||
});
|
|
||||||
|
|
||||||
return Object.fromEntries(entries) as ProxyProviderRecord;
|
|
||||||
};
|
|
||||||
|
|
||||||
export const useProxyStore = create<ProxyStoreState>((set, get) => ({
|
|
||||||
data: null,
|
|
||||||
hydration: "none",
|
|
||||||
lastUpdated: null,
|
|
||||||
lastProfileId: null,
|
|
||||||
liveFetchRequestId: 0,
|
|
||||||
lastAppliedFetchId: 0,
|
|
||||||
pendingProfileId: null,
|
|
||||||
pendingSnapshotFetchId: null,
|
|
||||||
setSnapshot(snapshot, profileId) {
|
|
||||||
const stateBefore = get();
|
|
||||||
|
|
||||||
set((state) => ({
|
|
||||||
data: snapshot,
|
|
||||||
hydration: "snapshot",
|
|
||||||
lastUpdated: null,
|
|
||||||
pendingProfileId: profileId,
|
|
||||||
pendingSnapshotFetchId: state.liveFetchRequestId,
|
|
||||||
}));
|
|
||||||
|
|
||||||
const hasLiveHydration =
|
|
||||||
stateBefore.hydration === "live" &&
|
|
||||||
stateBefore.lastProfileId === profileId;
|
|
||||||
|
|
||||||
if (profileId && !hasLiveHydration) {
|
|
||||||
void fetchLiveProxies().catch((error) => {
|
|
||||||
console.warn(
|
|
||||||
"[ProxyStore] Failed to bootstrap live proxies from snapshot:",
|
|
||||||
error,
|
|
||||||
);
|
|
||||||
scheduleBootstrapLiveFetch(800);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
},
|
|
||||||
setLive(payload) {
|
|
||||||
const state = get();
|
|
||||||
const emittedAt = payload.emittedAt ?? Date.now();
|
|
||||||
|
|
||||||
if (
|
|
||||||
state.hydration === "live" &&
|
|
||||||
state.lastUpdated !== null &&
|
|
||||||
emittedAt <= state.lastUpdated
|
|
||||||
) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const providersRecord =
|
|
||||||
normalizeProviderPayload(payload.providers) ?? getCachedProxyProviders();
|
|
||||||
|
|
||||||
if (providersRecord) {
|
|
||||||
setCachedProxyProviders(providersRecord);
|
|
||||||
}
|
|
||||||
|
|
||||||
const view = buildProxyView(payload.proxies, providersRecord);
|
|
||||||
const nextProfileId = payload.profileId ?? state.lastProfileId;
|
|
||||||
|
|
||||||
set((current) => ({
|
|
||||||
data: view,
|
|
||||||
hydration: "live",
|
|
||||||
lastUpdated: emittedAt,
|
|
||||||
lastProfileId: nextProfileId ?? null,
|
|
||||||
lastAppliedFetchId: current.liveFetchRequestId,
|
|
||||||
pendingProfileId: null,
|
|
||||||
pendingSnapshotFetchId: null,
|
|
||||||
}));
|
|
||||||
},
|
|
||||||
startLiveFetch() {
|
|
||||||
let nextRequestId = 0;
|
|
||||||
set((state) => {
|
|
||||||
nextRequestId = state.liveFetchRequestId + 1;
|
|
||||||
return {
|
|
||||||
liveFetchRequestId: nextRequestId,
|
|
||||||
};
|
|
||||||
});
|
|
||||||
return nextRequestId;
|
|
||||||
},
|
|
||||||
completeLiveFetch(requestId, view) {
|
|
||||||
const state = get();
|
|
||||||
if (requestId <= state.lastAppliedFetchId) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const shouldAdoptPending =
|
|
||||||
state.pendingProfileId !== null &&
|
|
||||||
requestId >= (state.pendingSnapshotFetchId ?? 0);
|
|
||||||
|
|
||||||
set({
|
|
||||||
data: view,
|
|
||||||
hydration: "live",
|
|
||||||
lastUpdated: Date.now(),
|
|
||||||
lastProfileId: shouldAdoptPending
|
|
||||||
? state.pendingProfileId
|
|
||||||
: state.lastProfileId,
|
|
||||||
lastAppliedFetchId: requestId,
|
|
||||||
pendingProfileId: shouldAdoptPending ? null : state.pendingProfileId,
|
|
||||||
pendingSnapshotFetchId: shouldAdoptPending
|
|
||||||
? null
|
|
||||||
: state.pendingSnapshotFetchId,
|
|
||||||
});
|
|
||||||
},
|
|
||||||
clearPendingProfile() {
|
|
||||||
set({
|
|
||||||
pendingProfileId: null,
|
|
||||||
pendingSnapshotFetchId: null,
|
|
||||||
});
|
|
||||||
},
|
|
||||||
reset() {
|
|
||||||
set({
|
|
||||||
data: null,
|
|
||||||
hydration: "none",
|
|
||||||
lastUpdated: null,
|
|
||||||
lastProfileId: null,
|
|
||||||
liveFetchRequestId: 0,
|
|
||||||
lastAppliedFetchId: 0,
|
|
||||||
pendingProfileId: null,
|
|
||||||
pendingSnapshotFetchId: null,
|
|
||||||
});
|
|
||||||
scheduleBootstrapLiveFetch(200);
|
|
||||||
},
|
|
||||||
}));
|
|
||||||
|
|
||||||
const liveApplyQueue = new AsyncEventQueue();
|
|
||||||
let pendingLivePayload: ProxiesUpdatedPayload | null = null;
|
|
||||||
let liveApplyScheduled = false;
|
|
||||||
|
|
||||||
const scheduleLiveApply = () => {
|
|
||||||
if (liveApplyScheduled) return;
|
|
||||||
liveApplyScheduled = true;
|
|
||||||
|
|
||||||
const dispatch = () => {
|
|
||||||
liveApplyScheduled = false;
|
|
||||||
const payload = pendingLivePayload;
|
|
||||||
pendingLivePayload = null;
|
|
||||||
if (!payload) return;
|
|
||||||
|
|
||||||
liveApplyQueue.enqueue(async () => {
|
|
||||||
await nextTick();
|
|
||||||
useProxyStore.getState().setLive(payload);
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
if (
|
|
||||||
typeof window !== "undefined" &&
|
|
||||||
typeof window.requestAnimationFrame === "function"
|
|
||||||
) {
|
|
||||||
window.requestAnimationFrame(dispatch);
|
|
||||||
} else {
|
|
||||||
setTimeout(dispatch, 16);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
export const applyLiveProxyPayload = (payload: ProxiesUpdatedPayload) => {
|
|
||||||
pendingLivePayload = payload;
|
|
||||||
scheduleLiveApply();
|
|
||||||
};
|
|
||||||
|
|
||||||
export const fetchLiveProxies = async () => {
|
|
||||||
const requestId = useProxyStore.getState().startLiveFetch();
|
|
||||||
const view = await calcuProxies();
|
|
||||||
useProxyStore.getState().completeLiveFetch(requestId, view);
|
|
||||||
};
|
|
||||||
|
|
||||||
const MAX_BOOTSTRAP_ATTEMPTS = 5;
|
|
||||||
const BOOTSTRAP_BASE_DELAY_MS = 600;
|
|
||||||
let bootstrapAttempts = 0;
|
|
||||||
let bootstrapTimer: number | null = null;
|
|
||||||
|
|
||||||
function attemptBootstrapLiveFetch() {
|
|
||||||
const state = useProxyStore.getState();
|
|
||||||
if (state.hydration === "live") {
|
|
||||||
bootstrapAttempts = 0;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bootstrapAttempts >= MAX_BOOTSTRAP_ATTEMPTS) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const attemptNumber = ++bootstrapAttempts;
|
|
||||||
|
|
||||||
void fetchLiveProxies()
|
|
||||||
.then(() => {
|
|
||||||
bootstrapAttempts = 0;
|
|
||||||
})
|
|
||||||
.catch((error) => {
|
|
||||||
console.warn(
|
|
||||||
`[ProxyStore] Bootstrap live fetch attempt ${attemptNumber} failed:`,
|
|
||||||
error,
|
|
||||||
);
|
|
||||||
if (attemptNumber < MAX_BOOTSTRAP_ATTEMPTS) {
|
|
||||||
scheduleBootstrapLiveFetch(BOOTSTRAP_BASE_DELAY_MS * attemptNumber);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function scheduleBootstrapLiveFetch(delay = 0) {
|
|
||||||
if (typeof window === "undefined") {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bootstrapTimer !== null) {
|
|
||||||
window.clearTimeout(bootstrapTimer);
|
|
||||||
bootstrapTimer = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
bootstrapTimer = window.setTimeout(() => {
|
|
||||||
bootstrapTimer = null;
|
|
||||||
attemptBootstrapLiveFetch();
|
|
||||||
}, delay);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (typeof window !== "undefined") {
|
|
||||||
void nextTick().then(() => scheduleBootstrapLiveFetch(0));
|
|
||||||
}
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
export class AsyncEventQueue {
|
|
||||||
private tail: Promise<void> = Promise.resolve();
|
|
||||||
|
|
||||||
enqueue(task: () => Promise<void> | void) {
|
|
||||||
this.tail = this.tail
|
|
||||||
.then(async () => {
|
|
||||||
await task();
|
|
||||||
})
|
|
||||||
.catch((error) => {
|
|
||||||
console.error("AsyncEventQueue task failed", error);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
clear() {
|
|
||||||
this.tail = Promise.resolve();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export const nextTick = () =>
|
|
||||||
new Promise<void>((resolve) => {
|
|
||||||
if (typeof queueMicrotask === "function") {
|
|
||||||
queueMicrotask(resolve);
|
|
||||||
} else {
|
|
||||||
Promise.resolve().then(() => resolve());
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
export const afterPaint = () =>
|
|
||||||
new Promise<void>((resolve) => {
|
|
||||||
requestAnimationFrame(() => resolve());
|
|
||||||
});
|
|
||||||
@@ -1,205 +0,0 @@
|
|||||||
import yaml from "js-yaml";
|
|
||||||
|
|
||||||
const createProxyItem = (
|
|
||||||
name: string,
|
|
||||||
partial: Partial<IProxyItem> = {},
|
|
||||||
): IProxyItem => ({
|
|
||||||
name,
|
|
||||||
type: partial.type ?? "unknown",
|
|
||||||
udp: partial.udp ?? false,
|
|
||||||
xudp: partial.xudp ?? false,
|
|
||||||
tfo: partial.tfo ?? false,
|
|
||||||
mptcp: partial.mptcp ?? false,
|
|
||||||
smux: partial.smux ?? false,
|
|
||||||
history: [],
|
|
||||||
provider: partial.provider,
|
|
||||||
testUrl: partial.testUrl,
|
|
||||||
hidden: partial.hidden,
|
|
||||||
icon: partial.icon,
|
|
||||||
fixed: partial.fixed,
|
|
||||||
});
|
|
||||||
|
|
||||||
const createGroupItem = (
|
|
||||||
name: string,
|
|
||||||
all: IProxyItem[],
|
|
||||||
partial: Partial<IProxyGroupItem> = {},
|
|
||||||
): IProxyGroupItem => {
|
|
||||||
const rest = { ...partial } as Partial<IProxyItem>;
|
|
||||||
delete (rest as Partial<IProxyGroupItem>).all;
|
|
||||||
const base = createProxyItem(name, rest);
|
|
||||||
return {
|
|
||||||
...base,
|
|
||||||
all,
|
|
||||||
now: partial.now ?? base.now,
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
const ensureProxyItem = (
|
|
||||||
map: Map<string, IProxyItem>,
|
|
||||||
name: string,
|
|
||||||
source?: Partial<IProxyItem>,
|
|
||||||
) => {
|
|
||||||
const key = String(name);
|
|
||||||
if (map.has(key)) return map.get(key)!;
|
|
||||||
const item = createProxyItem(key, source);
|
|
||||||
map.set(key, item);
|
|
||||||
return item;
|
|
||||||
};
|
|
||||||
|
|
||||||
const parseProxyEntry = (entry: any): IProxyItem | null => {
|
|
||||||
if (!entry || typeof entry !== "object") return null;
|
|
||||||
const name = entry.name || entry.uid || entry.id;
|
|
||||||
if (!name) return null;
|
|
||||||
return createProxyItem(String(name), {
|
|
||||||
type: entry.type ? String(entry.type) : undefined,
|
|
||||||
udp: Boolean(entry.udp),
|
|
||||||
xudp: Boolean(entry.xudp),
|
|
||||||
tfo: Boolean(entry.tfo),
|
|
||||||
mptcp: Boolean(entry.mptcp),
|
|
||||||
smux: Boolean(entry.smux),
|
|
||||||
testUrl: entry.test_url || entry.testUrl,
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
const isNonEmptyString = (value: unknown): value is string =>
|
|
||||||
typeof value === "string" && value.trim().length > 0;
|
|
||||||
|
|
||||||
const parseProxyGroup = (
|
|
||||||
entry: any,
|
|
||||||
proxyMap: Map<string, IProxyItem>,
|
|
||||||
): IProxyGroupItem | null => {
|
|
||||||
if (!entry || typeof entry !== "object") return null;
|
|
||||||
const name = entry.name;
|
|
||||||
if (!name) return null;
|
|
||||||
|
|
||||||
const rawProxies: unknown[] = Array.isArray(entry.proxies)
|
|
||||||
? entry.proxies
|
|
||||||
: [];
|
|
||||||
|
|
||||||
const proxyRefs: string[] = rawProxies
|
|
||||||
.filter(isNonEmptyString)
|
|
||||||
.map((item) => item.trim());
|
|
||||||
|
|
||||||
const uniqueNames: string[] = Array.from(new Set(proxyRefs));
|
|
||||||
|
|
||||||
const all = uniqueNames.map((proxyName) =>
|
|
||||||
ensureProxyItem(proxyMap, proxyName),
|
|
||||||
);
|
|
||||||
|
|
||||||
return createGroupItem(String(name), all, {
|
|
||||||
type: entry.type ? String(entry.type) : "Selector",
|
|
||||||
provider: entry.provider,
|
|
||||||
testUrl: entry.testUrl || entry.test_url,
|
|
||||||
now: typeof entry.now === "string" ? entry.now : undefined,
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
const mapRecords = (
|
|
||||||
proxies: Map<string, IProxyItem>,
|
|
||||||
groups: IProxyGroupItem[],
|
|
||||||
extra: IProxyItem[] = [],
|
|
||||||
): Record<string, IProxyItem> => {
|
|
||||||
const result: Record<string, IProxyItem> = {};
|
|
||||||
proxies.forEach((item, key) => {
|
|
||||||
result[key] = item;
|
|
||||||
});
|
|
||||||
groups.forEach((group) => {
|
|
||||||
result[group.name] = group as unknown as IProxyItem;
|
|
||||||
});
|
|
||||||
extra.forEach((item) => {
|
|
||||||
result[item.name] = item;
|
|
||||||
});
|
|
||||||
return result;
|
|
||||||
};
|
|
||||||
|
|
||||||
export const createProxySnapshotFromProfile = (
|
|
||||||
yamlContent: string,
|
|
||||||
): {
|
|
||||||
global: IProxyGroupItem;
|
|
||||||
direct: IProxyItem;
|
|
||||||
groups: IProxyGroupItem[];
|
|
||||||
records: Record<string, IProxyItem>;
|
|
||||||
proxies: IProxyItem[];
|
|
||||||
} | null => {
|
|
||||||
let parsed: any;
|
|
||||||
try {
|
|
||||||
parsed = yaml.load(yamlContent);
|
|
||||||
} catch (error) {
|
|
||||||
console.warn("[ProxySnapshot] Failed to parse YAML:", error);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!parsed || typeof parsed !== "object") {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const proxyMap = new Map<string, IProxyItem>();
|
|
||||||
|
|
||||||
if (Array.isArray((parsed as any).proxies)) {
|
|
||||||
for (const entry of (parsed as any).proxies) {
|
|
||||||
const item = parseProxyEntry(entry);
|
|
||||||
if (item) {
|
|
||||||
proxyMap.set(item.name, item);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const proxyProviders = (parsed as any)["proxy-providers"];
|
|
||||||
if (proxyProviders && typeof proxyProviders === "object") {
|
|
||||||
for (const key of Object.keys(proxyProviders)) {
|
|
||||||
const provider = proxyProviders[key];
|
|
||||||
if (provider && Array.isArray(provider.proxies)) {
|
|
||||||
provider.proxies
|
|
||||||
.filter(
|
|
||||||
(proxyName: unknown): proxyName is string =>
|
|
||||||
typeof proxyName === "string",
|
|
||||||
)
|
|
||||||
.forEach((proxyName: string) => ensureProxyItem(proxyMap, proxyName));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const groups: IProxyGroupItem[] = [];
|
|
||||||
if (Array.isArray((parsed as any)["proxy-groups"])) {
|
|
||||||
for (const entry of (parsed as any)["proxy-groups"]) {
|
|
||||||
const groupItem = parseProxyGroup(entry, proxyMap);
|
|
||||||
if (groupItem) {
|
|
||||||
groups.push(groupItem);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const direct = createProxyItem("DIRECT", { type: "Direct" });
|
|
||||||
const reject = createProxyItem("REJECT", { type: "Reject" });
|
|
||||||
|
|
||||||
ensureProxyItem(proxyMap, direct.name, direct);
|
|
||||||
ensureProxyItem(proxyMap, reject.name, reject);
|
|
||||||
|
|
||||||
let global = groups.find((group) => group.name === "GLOBAL");
|
|
||||||
if (!global) {
|
|
||||||
const globalRefs = groups.flatMap((group) =>
|
|
||||||
group.all.map((proxy) => proxy.name),
|
|
||||||
);
|
|
||||||
const unique = Array.from(new Set(globalRefs));
|
|
||||||
const all = unique.map((name) => ensureProxyItem(proxyMap, name));
|
|
||||||
global = createGroupItem("GLOBAL", all, {
|
|
||||||
type: "Selector",
|
|
||||||
hidden: true,
|
|
||||||
});
|
|
||||||
groups.unshift(global);
|
|
||||||
}
|
|
||||||
|
|
||||||
const proxies = Array.from(proxyMap.values()).filter(
|
|
||||||
(item) => !groups.some((group) => group.name === item.name),
|
|
||||||
);
|
|
||||||
|
|
||||||
const records = mapRecords(proxyMap, groups, [direct, reject]);
|
|
||||||
|
|
||||||
return {
|
|
||||||
global,
|
|
||||||
direct,
|
|
||||||
groups,
|
|
||||||
records,
|
|
||||||
proxies,
|
|
||||||
};
|
|
||||||
};
|
|
||||||
Reference in New Issue
Block a user