mirror of
https://github.com/clash-verge-rev/clash-verge-rev.git
synced 2026-01-29 00:35:38 +08:00
refactor: invock mihomo api by use tauri-plugin-mihomo (#4926)
* feat: add tauri-plugin-mihomo
* refactor: invock mihomo api by use tauri-plugin-mihomo
* chore: todo
* chore: update
* chore: update
* chore: update
* chore: update
* fix: incorrect delay status and update pretty config
* chore: update
* chore: remove cache
* chore: update
* chore: update
* fix: app freezed when change group proxy
* chore: update
* chore: update
* chore: add rustfmt.toml to tauri-plugin-mihomo
* chore: happy clippy
* refactor: connect mihomo websocket
* chore: update
* chore: update
* fix: parse bigint to number
* chore: update
* Revert "fix: parse bigint to number"
This reverts commit 74c006522e.
* chore: use number instead of bigint
* chore: cleanup
* fix: rule data not refresh when switch profile
* chore: update
* chore: cleanup
* chore: update
* fix: traffic graph data display
* feat: add ipc connection pool
* chore: update
* chore: clippy
* fix: incorrect delay status
* fix: typo
* fix: empty proxies tray menu
* chore: clippy
* chore: import tauri-plugin-mihomo by using git repo
* chore: cleanup
* fix: mihomo api
* fix: incorrect delay status
* chore: update tauri-plugin-mihomo dep
chore: update
This commit is contained in:
111
src-tauri/src/cache/mod.rs
vendored
111
src-tauri/src/cache/mod.rs
vendored
@@ -1,111 +0,0 @@
|
||||
use crate::singleton;
|
||||
use anyhow::Result;
|
||||
use dashmap::DashMap;
|
||||
use serde_json::Value;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
pub const SHORT_TERM_TTL: Duration = Duration::from_millis(4_250);
|
||||
|
||||
pub struct CacheEntry<T> {
|
||||
pub value: Arc<T>,
|
||||
pub expires_at: Instant,
|
||||
}
|
||||
|
||||
pub struct Cache<T> {
|
||||
pub map: DashMap<String, Arc<OnceCell<Box<CacheEntry<T>>>>>,
|
||||
}
|
||||
|
||||
impl<T> Cache<T> {
|
||||
fn new() -> Self {
|
||||
Cache {
|
||||
map: DashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_key(prefix: &str, id: &str) -> String {
|
||||
format!("{prefix}:{id}")
|
||||
}
|
||||
|
||||
pub async fn get_or_fetch<F, Fut>(&self, key: String, ttl: Duration, fetch_fn: F) -> Arc<T>
|
||||
where
|
||||
F: Fn() -> Fut + Send + Sync + 'static,
|
||||
Fut: std::future::Future<Output = T> + Send + 'static,
|
||||
T: Send + Sync + 'static,
|
||||
{
|
||||
loop {
|
||||
let now = Instant::now();
|
||||
let key_cloned = key.clone();
|
||||
|
||||
// Get or create the cell
|
||||
let cell = self
|
||||
.map
|
||||
.entry(key_cloned.clone())
|
||||
.or_insert_with(|| Arc::new(OnceCell::new()))
|
||||
.clone();
|
||||
|
||||
// Check if we have a valid cached entry
|
||||
if let Some(entry) = cell.get() {
|
||||
if entry.expires_at > now {
|
||||
return Arc::clone(&entry.value);
|
||||
}
|
||||
// Entry is expired, remove it
|
||||
self.map
|
||||
.remove_if(&key_cloned, |_, v| Arc::ptr_eq(v, &cell));
|
||||
continue; // Retry with fresh cell
|
||||
}
|
||||
|
||||
// Try to set a new value
|
||||
let value = fetch_fn().await;
|
||||
let entry = Box::new(CacheEntry {
|
||||
value: Arc::new(value),
|
||||
expires_at: Instant::now() + ttl,
|
||||
});
|
||||
|
||||
match cell.set(entry) {
|
||||
Ok(_) => {
|
||||
// Successfully set the value, it must exist now
|
||||
if let Some(set_entry) = cell.get() {
|
||||
return Arc::clone(&set_entry.value);
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
if let Some(existing_entry) = cell.get() {
|
||||
if existing_entry.expires_at > Instant::now() {
|
||||
return Arc::clone(&existing_entry.value);
|
||||
}
|
||||
self.map
|
||||
.remove_if(&key_cloned, |_, v| Arc::ptr_eq(v, &cell));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pub fn clean_key(&self, key: &str) {
|
||||
// self.map.remove(key);
|
||||
// }
|
||||
|
||||
// TODO
|
||||
pub fn clean_default_keys(&self) {
|
||||
// logging!(info, Type::Cache, "Cleaning proxies keys");
|
||||
// let proxies_key = Self::make_key("proxies", "default");
|
||||
// self.map.remove(&proxies_key);
|
||||
|
||||
// logging!(info, Type::Cache, "Cleaning providers keys");
|
||||
// let providers_key = Self::make_key("providers", "default");
|
||||
// self.map.remove(&providers_key);
|
||||
|
||||
// !The frontend goes crash if we clean the clash_config cache
|
||||
// logging!(info, Type::Cache, "Cleaning clash config keys");
|
||||
// let clash_config_key = Self::make_key("clash_config", "default");
|
||||
// self.map.remove(&clash_config_key);
|
||||
}
|
||||
}
|
||||
|
||||
pub type CacheService = Cache<Result<String>>;
|
||||
pub type CacheProxy = Cache<Value>;
|
||||
|
||||
singleton!(Cache<Value>, PROXY_INSTANCE);
|
||||
singleton!(Cache<Result<String>>, SERVICE_INSTANCE);
|
||||
@@ -1,21 +1,15 @@
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use super::CmdResult;
|
||||
use crate::{
|
||||
cache::CacheProxy,
|
||||
config::Config,
|
||||
core::{CoreManager, handle},
|
||||
};
|
||||
use crate::{
|
||||
config::*,
|
||||
feat,
|
||||
ipc::{self, IpcManager},
|
||||
logging,
|
||||
utils::logging::Type,
|
||||
wrap_err,
|
||||
core::{self, CoreManager, RunningMode, handle, logger},
|
||||
};
|
||||
use crate::{config::*, feat, logging, utils::logging::Type, wrap_err};
|
||||
use serde_yaml_ng::Mapping;
|
||||
use std::time::Duration;
|
||||
// use std::time::Duration;
|
||||
|
||||
const CONFIG_REFRESH_INTERVAL: Duration = Duration::from_secs(60);
|
||||
// const CONFIG_REFRESH_INTERVAL: Duration = Duration::from_secs(60);
|
||||
|
||||
/// 复制Clash环境变量
|
||||
#[tauri::command]
|
||||
@@ -112,20 +106,6 @@ pub async fn restart_core() -> CmdResult {
|
||||
result
|
||||
}
|
||||
|
||||
/// 获取代理延迟
|
||||
#[tauri::command]
|
||||
pub async fn clash_api_get_proxy_delay(
|
||||
name: String,
|
||||
url: Option<String>,
|
||||
timeout: i32,
|
||||
) -> CmdResult<serde_json::Value> {
|
||||
wrap_err!(
|
||||
IpcManager::global()
|
||||
.test_proxy_delay(&name, url, timeout)
|
||||
.await
|
||||
)
|
||||
}
|
||||
|
||||
/// 测试URL延迟
|
||||
#[tauri::command]
|
||||
pub async fn test_delay(url: String) -> CmdResult<u32> {
|
||||
@@ -307,317 +287,13 @@ pub async fn validate_dns_config() -> CmdResult<(bool, String)> {
|
||||
}
|
||||
}
|
||||
|
||||
/// 获取Clash版本信息
|
||||
#[tauri::command]
|
||||
pub async fn get_clash_version() -> CmdResult<serde_json::Value> {
|
||||
wrap_err!(IpcManager::global().get_version().await)
|
||||
}
|
||||
|
||||
/// 获取Clash配置
|
||||
#[tauri::command]
|
||||
pub async fn get_clash_config() -> CmdResult<serde_json::Value> {
|
||||
let manager = IpcManager::global();
|
||||
let cache = CacheProxy::global();
|
||||
let key = CacheProxy::make_key("clash_config", "default");
|
||||
let value = cache
|
||||
.get_or_fetch(key, CONFIG_REFRESH_INTERVAL, || async {
|
||||
manager.get_config().await.unwrap_or_else(|e| {
|
||||
logging!(error, Type::Cmd, "Failed to fetch clash config: {e}");
|
||||
serde_json::Value::Object(serde_json::Map::new())
|
||||
})
|
||||
})
|
||||
.await;
|
||||
Ok((*value).clone())
|
||||
}
|
||||
|
||||
/// 强制刷新Clash配置缓存
|
||||
#[tauri::command]
|
||||
pub async fn force_refresh_clash_config() -> CmdResult<serde_json::Value> {
|
||||
let cache = CacheProxy::global();
|
||||
let key = CacheProxy::make_key("clash_config", "default");
|
||||
cache.map.remove(&key);
|
||||
get_clash_config().await
|
||||
}
|
||||
|
||||
/// 更新地理数据
|
||||
#[tauri::command]
|
||||
pub async fn update_geo_data() -> CmdResult {
|
||||
wrap_err!(IpcManager::global().update_geo_data().await)
|
||||
}
|
||||
|
||||
/// 升级Clash核心
|
||||
#[tauri::command]
|
||||
pub async fn upgrade_clash_core() -> CmdResult {
|
||||
wrap_err!(IpcManager::global().upgrade_core().await)
|
||||
}
|
||||
|
||||
/// 获取规则
|
||||
#[tauri::command]
|
||||
pub async fn get_clash_rules() -> CmdResult<serde_json::Value> {
|
||||
wrap_err!(IpcManager::global().get_rules().await)
|
||||
}
|
||||
|
||||
/// 更新代理选择
|
||||
#[tauri::command]
|
||||
pub async fn update_proxy_choice(group: String, proxy: String) -> CmdResult {
|
||||
wrap_err!(IpcManager::global().update_proxy(&group, &proxy).await)
|
||||
}
|
||||
|
||||
/// 获取代理提供者
|
||||
#[tauri::command]
|
||||
pub async fn get_proxy_providers() -> CmdResult<serde_json::Value> {
|
||||
wrap_err!(IpcManager::global().get_providers_proxies().await)
|
||||
}
|
||||
|
||||
/// 获取规则提供者
|
||||
#[tauri::command]
|
||||
pub async fn get_rule_providers() -> CmdResult<serde_json::Value> {
|
||||
wrap_err!(IpcManager::global().get_rule_providers().await)
|
||||
}
|
||||
|
||||
/// 代理提供者健康检查
|
||||
#[tauri::command]
|
||||
pub async fn proxy_provider_health_check(name: String) -> CmdResult {
|
||||
wrap_err!(
|
||||
IpcManager::global()
|
||||
.proxy_provider_health_check(&name)
|
||||
.await
|
||||
)
|
||||
}
|
||||
|
||||
/// 更新代理提供者
|
||||
#[tauri::command]
|
||||
pub async fn update_proxy_provider(name: String) -> CmdResult {
|
||||
wrap_err!(IpcManager::global().update_proxy_provider(&name).await)
|
||||
}
|
||||
|
||||
/// 更新规则提供者
|
||||
#[tauri::command]
|
||||
pub async fn update_rule_provider(name: String) -> CmdResult {
|
||||
wrap_err!(IpcManager::global().update_rule_provider(&name).await)
|
||||
}
|
||||
|
||||
/// 获取连接
|
||||
#[tauri::command]
|
||||
pub async fn get_clash_connections() -> CmdResult<serde_json::Value> {
|
||||
wrap_err!(IpcManager::global().get_connections().await)
|
||||
}
|
||||
|
||||
/// 删除连接
|
||||
#[tauri::command]
|
||||
pub async fn delete_clash_connection(id: String) -> CmdResult {
|
||||
wrap_err!(IpcManager::global().delete_connection(&id).await)
|
||||
}
|
||||
|
||||
/// 关闭所有连接
|
||||
#[tauri::command]
|
||||
pub async fn close_all_clash_connections() -> CmdResult {
|
||||
wrap_err!(IpcManager::global().close_all_connections().await)
|
||||
}
|
||||
|
||||
/// 获取流量数据 (使用新的IPC流式监控)
|
||||
#[tauri::command]
|
||||
pub async fn get_traffic_data() -> CmdResult<serde_json::Value> {
|
||||
let traffic = crate::ipc::get_current_traffic().await;
|
||||
let result = serde_json::json!({
|
||||
"up": traffic.total_up,
|
||||
"down": traffic.total_down,
|
||||
"up_rate": traffic.up_rate,
|
||||
"down_rate": traffic.down_rate,
|
||||
"last_updated": traffic.last_updated.elapsed().as_secs()
|
||||
});
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// 获取内存数据 (使用新的IPC流式监控)
|
||||
#[tauri::command]
|
||||
pub async fn get_memory_data() -> CmdResult<serde_json::Value> {
|
||||
let memory = crate::ipc::get_current_memory().await;
|
||||
let usage_percent = if memory.oslimit > 0 {
|
||||
(memory.inuse as f64 / memory.oslimit as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
pub async fn get_clash_logs() -> CmdResult<VecDeque<String>> {
|
||||
let logs = match core::CoreManager::global().get_running_mode() {
|
||||
// TODO: 服务模式下日志获取接口
|
||||
RunningMode::Service => VecDeque::new(),
|
||||
RunningMode::Sidecar => logger::Logger::global().get_logs().clone(),
|
||||
_ => VecDeque::new(),
|
||||
};
|
||||
let result = serde_json::json!({
|
||||
"inuse": memory.inuse,
|
||||
"oslimit": memory.oslimit,
|
||||
"usage_percent": usage_percent,
|
||||
"last_updated": memory.last_updated.elapsed().as_secs()
|
||||
});
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// 启动流量监控服务 (IPC流式监控自动启动,此函数为兼容性保留)
|
||||
#[tauri::command]
|
||||
pub async fn start_traffic_service() -> CmdResult {
|
||||
logging!(trace, Type::Ipc, "启动流量监控服务 (IPC流式监控)");
|
||||
// 新的IPC监控在首次访问时自动启动
|
||||
// 触发一次访问以确保监控器已初始化
|
||||
let _ = crate::ipc::get_current_traffic().await;
|
||||
let _ = crate::ipc::get_current_memory().await;
|
||||
logging!(info, Type::Ipc, "IPC流式监控已激活");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 停止流量监控服务 (IPC流式监控无需显式停止,此函数为兼容性保留)
|
||||
#[tauri::command]
|
||||
pub async fn stop_traffic_service() -> CmdResult {
|
||||
logging!(trace, Type::Ipc, "停止流量监控服务请求 (IPC流式监控)");
|
||||
// 新的IPC监控是持久的,无需显式停止
|
||||
logging!(info, Type::Ipc, "IPC流式监控继续运行");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 获取格式化的流量数据 (包含单位,便于前端显示)
|
||||
#[tauri::command]
|
||||
pub async fn get_formatted_traffic_data() -> CmdResult<serde_json::Value> {
|
||||
logging!(trace, Type::Ipc, "获取格式化流量数据");
|
||||
let (up_rate, down_rate, total_up, total_down, is_fresh) =
|
||||
crate::ipc::get_formatted_traffic().await;
|
||||
let result = serde_json::json!({
|
||||
"up_rate_formatted": up_rate,
|
||||
"down_rate_formatted": down_rate,
|
||||
"total_up_formatted": total_up,
|
||||
"total_down_formatted": total_down,
|
||||
"is_fresh": is_fresh
|
||||
});
|
||||
logging!(
|
||||
debug,
|
||||
Type::Ipc,
|
||||
"格式化流量数据: ↑{up_rate}/s ↓{down_rate}/s (总计: ↑{total_up} ↓{total_down})"
|
||||
);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// 获取格式化的内存数据 (包含单位,便于前端显示)
|
||||
#[tauri::command]
|
||||
pub async fn get_formatted_memory_data() -> CmdResult<serde_json::Value> {
|
||||
logging!(info, Type::Ipc, "获取格式化内存数据");
|
||||
let (inuse, oslimit, usage_percent, is_fresh) = crate::ipc::get_formatted_memory().await;
|
||||
let result = serde_json::json!({
|
||||
"inuse_formatted": inuse,
|
||||
"oslimit_formatted": oslimit,
|
||||
"usage_percent": usage_percent,
|
||||
"is_fresh": is_fresh
|
||||
});
|
||||
logging!(
|
||||
debug,
|
||||
Type::Ipc,
|
||||
"格式化内存数据: {inuse} / {oslimit} ({usage_percent:.1}%)"
|
||||
);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// 获取系统监控概览 (流量+内存,便于前端一次性获取所有状态)
|
||||
#[tauri::command]
|
||||
pub async fn get_system_monitor_overview() -> CmdResult<serde_json::Value> {
|
||||
logging!(debug, Type::Ipc, "获取系统监控概览");
|
||||
|
||||
// 并发获取流量和内存数据
|
||||
let (traffic, memory) = tokio::join!(
|
||||
crate::ipc::get_current_traffic(),
|
||||
crate::ipc::get_current_memory()
|
||||
);
|
||||
|
||||
let (traffic_formatted, memory_formatted) = tokio::join!(
|
||||
crate::ipc::get_formatted_traffic(),
|
||||
crate::ipc::get_formatted_memory()
|
||||
);
|
||||
|
||||
let traffic_is_fresh = traffic.last_updated.elapsed().as_secs() < 5;
|
||||
let memory_is_fresh = memory.last_updated.elapsed().as_secs() < 10;
|
||||
|
||||
let result = serde_json::json!({
|
||||
"traffic": {
|
||||
"raw": {
|
||||
"up": traffic.total_up,
|
||||
"down": traffic.total_down,
|
||||
"up_rate": traffic.up_rate,
|
||||
"down_rate": traffic.down_rate
|
||||
},
|
||||
"formatted": {
|
||||
"up_rate": traffic_formatted.0,
|
||||
"down_rate": traffic_formatted.1,
|
||||
"total_up": traffic_formatted.2,
|
||||
"total_down": traffic_formatted.3
|
||||
},
|
||||
"is_fresh": traffic_is_fresh
|
||||
},
|
||||
"memory": {
|
||||
"raw": {
|
||||
"inuse": memory.inuse,
|
||||
"oslimit": memory.oslimit,
|
||||
"usage_percent": if memory.oslimit > 0 {
|
||||
(memory.inuse as f64 / memory.oslimit as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
},
|
||||
"formatted": {
|
||||
"inuse": memory_formatted.0,
|
||||
"oslimit": memory_formatted.1,
|
||||
"usage_percent": memory_formatted.2
|
||||
},
|
||||
"is_fresh": memory_is_fresh
|
||||
},
|
||||
"overall_status": if traffic_is_fresh && memory_is_fresh { "healthy" } else { "stale" }
|
||||
});
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// 获取代理组延迟
|
||||
#[tauri::command]
|
||||
pub async fn get_group_proxy_delays(
|
||||
group_name: String,
|
||||
url: Option<String>,
|
||||
timeout: Option<i32>,
|
||||
) -> CmdResult<serde_json::Value> {
|
||||
wrap_err!(
|
||||
IpcManager::global()
|
||||
.get_group_proxy_delays(&group_name, url, timeout.unwrap_or(10000))
|
||||
.await
|
||||
)
|
||||
}
|
||||
|
||||
/// 检查调试是否启用
|
||||
#[tauri::command]
|
||||
pub async fn is_clash_debug_enabled() -> CmdResult<bool> {
|
||||
match IpcManager::global().is_debug_enabled().await {
|
||||
Ok(enabled) => Ok(enabled),
|
||||
Err(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// 垃圾回收
|
||||
#[tauri::command]
|
||||
pub async fn clash_gc() -> CmdResult {
|
||||
wrap_err!(IpcManager::global().gc().await)
|
||||
}
|
||||
|
||||
/// 获取日志 (使用新的流式实现)
|
||||
#[tauri::command]
|
||||
pub async fn get_clash_logs() -> CmdResult<serde_json::Value> {
|
||||
Ok(ipc::get_logs_json().await)
|
||||
}
|
||||
|
||||
/// 启动日志监控
|
||||
#[tauri::command]
|
||||
pub async fn start_logs_monitoring(level: Option<String>) -> CmdResult {
|
||||
ipc::start_logs_monitoring(level).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 停止日志监控
|
||||
#[tauri::command]
|
||||
pub async fn stop_logs_monitoring() -> CmdResult {
|
||||
ipc::stop_logs_monitoring().await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 清除日志
|
||||
#[tauri::command]
|
||||
pub async fn clear_logs() -> CmdResult {
|
||||
ipc::clear_logs().await;
|
||||
Ok(())
|
||||
Ok(logs)
|
||||
}
|
||||
|
||||
@@ -503,11 +503,11 @@ pub async fn patch_profiles_config(profiles: IProfiles) -> CmdResult<bool> {
|
||||
handle::Handle::refresh_clash();
|
||||
|
||||
// 强制刷新代理缓存,确保profile切换后立即获取最新节点数据
|
||||
crate::process::AsyncHandler::spawn(|| async move {
|
||||
if let Err(e) = super::proxy::force_refresh_proxies().await {
|
||||
log::warn!(target: "app", "强制刷新代理缓存失败: {e}");
|
||||
}
|
||||
});
|
||||
// crate::process::AsyncHandler::spawn(|| async move {
|
||||
// if let Err(e) = super::proxy::force_refresh_proxies().await {
|
||||
// log::warn!(target: "app", "强制刷新代理缓存失败: {e}");
|
||||
// }
|
||||
// });
|
||||
|
||||
if let Err(e) = Tray::global().update_tooltip().await {
|
||||
log::warn!(target: "app", "异步更新托盘提示失败: {e}");
|
||||
|
||||
@@ -1,59 +1,7 @@
|
||||
use tauri::Emitter;
|
||||
|
||||
use super::CmdResult;
|
||||
use crate::{
|
||||
cache::CacheProxy,
|
||||
core::{handle::Handle, tray::Tray},
|
||||
ipc::IpcManager,
|
||||
logging,
|
||||
utils::logging::Type,
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
const PROXIES_REFRESH_INTERVAL: Duration = Duration::from_secs(60);
|
||||
const PROVIDERS_REFRESH_INTERVAL: Duration = Duration::from_secs(60);
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn get_proxies() -> CmdResult<serde_json::Value> {
|
||||
let cache = CacheProxy::global();
|
||||
let key = CacheProxy::make_key("proxies", "default");
|
||||
let value = cache
|
||||
.get_or_fetch(key, PROXIES_REFRESH_INTERVAL, || async {
|
||||
let manager = IpcManager::global();
|
||||
manager.get_proxies().await.unwrap_or_else(|e| {
|
||||
logging!(error, Type::Cmd, "Failed to fetch proxies: {e}");
|
||||
serde_json::Value::Object(serde_json::Map::new())
|
||||
})
|
||||
})
|
||||
.await;
|
||||
Ok((*value).clone())
|
||||
}
|
||||
|
||||
/// 强制刷新代理缓存用于profile切换
|
||||
#[tauri::command]
|
||||
pub async fn force_refresh_proxies() -> CmdResult<serde_json::Value> {
|
||||
let cache = CacheProxy::global();
|
||||
let key = CacheProxy::make_key("proxies", "default");
|
||||
cache.map.remove(&key);
|
||||
get_proxies().await
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn get_providers_proxies() -> CmdResult<serde_json::Value> {
|
||||
let cache = CacheProxy::global();
|
||||
let key = CacheProxy::make_key("providers", "default");
|
||||
let value = cache
|
||||
.get_or_fetch(key, PROVIDERS_REFRESH_INTERVAL, || async {
|
||||
let manager = IpcManager::global();
|
||||
manager.get_providers_proxies().await.unwrap_or_else(|e| {
|
||||
logging!(error, Type::Cmd, "Failed to fetch provider proxies: {e}");
|
||||
serde_json::Value::Object(serde_json::Map::new())
|
||||
})
|
||||
})
|
||||
.await;
|
||||
Ok((*value).clone())
|
||||
}
|
||||
use crate::{logging, utils::logging::Type};
|
||||
|
||||
// TODO: 前端通过 emit 发送更新事件, tray 监听更新事件
|
||||
/// 同步托盘和GUI的代理选择状态
|
||||
#[tauri::command]
|
||||
pub async fn sync_tray_proxy_selection() -> CmdResult<()> {
|
||||
@@ -70,54 +18,3 @@ pub async fn sync_tray_proxy_selection() -> CmdResult<()> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// 更新代理选择并同步托盘和GUI状态
|
||||
#[tauri::command]
|
||||
pub async fn update_proxy_and_sync(group: String, proxy: String) -> CmdResult<()> {
|
||||
match IpcManager::global().update_proxy(&group, &proxy).await {
|
||||
Ok(_) => {
|
||||
// println!("Proxy updated successfully: {} -> {}", group,proxy);
|
||||
logging!(
|
||||
info,
|
||||
Type::Cmd,
|
||||
"Proxy updated successfully: {} -> {}",
|
||||
group,
|
||||
proxy
|
||||
);
|
||||
|
||||
let cache = CacheProxy::global();
|
||||
let key = CacheProxy::make_key("proxies", "default");
|
||||
cache.map.remove(&key);
|
||||
|
||||
if let Err(e) = Tray::global().update_menu().await {
|
||||
logging!(error, Type::Cmd, "Failed to sync tray menu: {}", e);
|
||||
}
|
||||
|
||||
if let Some(app_handle) = Handle::global().app_handle() {
|
||||
let _ = app_handle.emit("verge://force-refresh-proxies", ());
|
||||
let _ = app_handle.emit("verge://refresh-proxy-config", ());
|
||||
}
|
||||
|
||||
logging!(
|
||||
info,
|
||||
Type::Cmd,
|
||||
"Proxy and sync completed successfully: {} -> {}",
|
||||
group,
|
||||
proxy
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
println!("1111111111111111");
|
||||
logging!(
|
||||
error,
|
||||
Type::Cmd,
|
||||
"Failed to update proxy: {} -> {}, error: {}",
|
||||
group,
|
||||
proxy,
|
||||
e
|
||||
);
|
||||
Err(e.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,9 +28,7 @@ pub async fn export_diagnostic_info() -> CmdResult<()> {
|
||||
let sysinfo = PlatformSpecification::new_sync();
|
||||
let info = format!("{sysinfo:?}");
|
||||
|
||||
let app_handle = handle::Handle::global()
|
||||
.app_handle()
|
||||
.ok_or("Failed to get app handle")?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let cliboard = app_handle.clipboard();
|
||||
if cliboard.write_text(info).is_err() {
|
||||
logging!(error, Type::System, "Failed to write to clipboard");
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use crate::AsyncHandler;
|
||||
use crate::core::logger::Logger;
|
||||
use crate::{
|
||||
config::*,
|
||||
core::{
|
||||
handle,
|
||||
service::{self, SERVICE_MANAGER, ServiceStatus},
|
||||
},
|
||||
ipc::IpcManager,
|
||||
logging, logging_error, singleton_lazy,
|
||||
utils::{
|
||||
dirs,
|
||||
@@ -25,6 +25,10 @@ use std::{
|
||||
};
|
||||
use tauri_plugin_shell::{ShellExt, process::CommandChild};
|
||||
|
||||
// TODO:
|
||||
// - 重构,提升模式切换速度
|
||||
// - 内核启动添加启动 IPC 启动参数, `-ext-ctl-unix` / `-ext-ctl-pipe`, 运行时配置需要删除相关配置项
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CoreManager {
|
||||
running: Arc<Mutex<RunningMode>>,
|
||||
@@ -250,11 +254,7 @@ impl CoreManager {
|
||||
let clash_core = Config::verge().await.latest_ref().get_valid_clash_core();
|
||||
logging!(info, Type::Config, true, "使用内核: {}", clash_core);
|
||||
|
||||
let app_handle = handle::Handle::global().app_handle().ok_or_else(|| {
|
||||
let msg = "Failed to get app handle";
|
||||
logging!(error, Type::Core, true, "{}", msg);
|
||||
anyhow::anyhow!(msg)
|
||||
})?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let app_dir = dirs::app_home_dir()?;
|
||||
let app_dir_str = dirs::path_to_str(&app_dir)?;
|
||||
logging!(info, Type::Config, true, "验证目录: {}", app_dir_str);
|
||||
@@ -414,7 +414,11 @@ impl CoreManager {
|
||||
logging_error!(Type::Core, true, "{}", msg);
|
||||
msg
|
||||
});
|
||||
match IpcManager::global().put_configs_force(run_path_str?).await {
|
||||
match handle::Handle::mihomo()
|
||||
.await
|
||||
.reload_config(true, run_path_str?)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
Config::runtime().await.apply();
|
||||
logging!(info, Type::Core, true, "Configuration updated successfully");
|
||||
@@ -733,9 +737,7 @@ impl CoreManager {
|
||||
logging!(info, Type::Core, true, "Running core by sidecar");
|
||||
|
||||
let config_file = &Config::generate_file(ConfigType::Run).await?;
|
||||
let app_handle = handle::Handle::global()
|
||||
.app_handle()
|
||||
.ok_or(anyhow::anyhow!("failed to get app handle"))?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let clash_core = Config::verge().await.latest_ref().get_valid_clash_core();
|
||||
let config_dir = dirs::app_home_dir()?;
|
||||
|
||||
@@ -774,12 +776,16 @@ impl CoreManager {
|
||||
while let Some(event) = rx.recv().await {
|
||||
match event {
|
||||
tauri_plugin_shell::process::CommandEvent::Stdout(line) => {
|
||||
if let Err(e) = writeln!(log_file, "{}", String::from_utf8_lossy(&line)) {
|
||||
let line = String::from_utf8_lossy(&line);
|
||||
Logger::global().append_log(line.to_string());
|
||||
if let Err(e) = writeln!(log_file, "{}", line) {
|
||||
eprintln!("[Sidecar] write stdout failed: {e}");
|
||||
}
|
||||
}
|
||||
tauri_plugin_shell::process::CommandEvent::Stderr(line) => {
|
||||
let _ = writeln!(log_file, "[stderr] {}", String::from_utf8_lossy(&line));
|
||||
let line = String::from_utf8_lossy(&line);
|
||||
Logger::global().append_log(line.to_string());
|
||||
let _ = writeln!(log_file, "[stderr] {}", line);
|
||||
}
|
||||
tauri_plugin_shell::process::CommandEvent::Terminated(term) => {
|
||||
let _ = writeln!(log_file, "[terminated] {:?}", term);
|
||||
@@ -900,6 +906,7 @@ impl CoreManager {
|
||||
|
||||
/// 停止核心运行
|
||||
pub async fn stop_core(&self) -> Result<()> {
|
||||
Logger::global().clear_logs();
|
||||
match self.get_running_mode() {
|
||||
RunningMode::Service => self.stop_core_by_service().await,
|
||||
RunningMode::Sidecar => self.stop_core_by_sidecar(),
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::singleton;
|
||||
use crate::{APP_HANDLE, singleton};
|
||||
use parking_lot::RwLock;
|
||||
use std::{
|
||||
sync::{
|
||||
@@ -10,6 +10,8 @@ use std::{
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tauri::{AppHandle, Emitter, Manager, WebviewWindow};
|
||||
use tauri_plugin_mihomo::{Mihomo, MihomoExt};
|
||||
use tokio::sync::{RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
use crate::{logging, utils::logging::Type};
|
||||
|
||||
@@ -107,7 +109,7 @@ impl NotificationSystem {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(window) = handle.get_window() {
|
||||
if let Some(window) = Handle::get_window() {
|
||||
*system.last_emit_time.write() = Instant::now();
|
||||
|
||||
let (event_name_str, payload_result) = match event {
|
||||
@@ -249,7 +251,6 @@ impl NotificationSystem {
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Handle {
|
||||
pub app_handle: Arc<RwLock<Option<AppHandle>>>,
|
||||
pub is_exiting: Arc<RwLock<bool>>,
|
||||
startup_errors: Arc<RwLock<Vec<ErrorMessage>>>,
|
||||
startup_completed: Arc<RwLock<bool>>,
|
||||
@@ -259,7 +260,6 @@ pub struct Handle {
|
||||
impl Default for Handle {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
app_handle: Arc::new(RwLock::new(None)),
|
||||
is_exiting: Arc::new(RwLock::new(false)),
|
||||
startup_errors: Arc::new(RwLock::new(Vec::new())),
|
||||
startup_completed: Arc::new(RwLock::new(false)),
|
||||
@@ -276,18 +276,13 @@ impl Handle {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn init(&self, app_handle: AppHandle) {
|
||||
pub fn init(&self) {
|
||||
// 如果正在退出,不要重新初始化
|
||||
if self.is_exiting() {
|
||||
log::debug!("Handle::init called while exiting, skipping initialization");
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
let mut handle = self.app_handle.write();
|
||||
*handle = Some(app_handle);
|
||||
}
|
||||
|
||||
let mut system_opt = self.notification_system.write();
|
||||
if let Some(system) = system_opt.as_mut() {
|
||||
// 只在未运行时启动
|
||||
@@ -300,12 +295,22 @@ impl Handle {
|
||||
}
|
||||
|
||||
/// 获取 AppHandle
|
||||
pub fn app_handle(&self) -> Option<AppHandle> {
|
||||
self.app_handle.read().clone()
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn app_handle() -> &'static AppHandle {
|
||||
APP_HANDLE.get().expect("failed to get global app handle")
|
||||
}
|
||||
|
||||
pub fn get_window(&self) -> Option<WebviewWindow> {
|
||||
let app_handle = self.app_handle()?;
|
||||
pub async fn mihomo() -> RwLockReadGuard<'static, Mihomo> {
|
||||
Self::app_handle().mihomo().read().await
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub async fn mihomo_mut() -> RwLockWriteGuard<'static, Mihomo> {
|
||||
Self::app_handle().mihomo().write().await
|
||||
}
|
||||
|
||||
pub fn get_window() -> Option<WebviewWindow> {
|
||||
let app_handle = Self::app_handle();
|
||||
let window: Option<WebviewWindow> = app_handle.get_webview_window("main");
|
||||
if window.is_none() {
|
||||
log::debug!(target:"app", "main window not found");
|
||||
@@ -520,14 +525,10 @@ impl Handle {
|
||||
#[cfg(target_os = "macos")]
|
||||
impl Handle {
|
||||
pub fn set_activation_policy(&self, policy: tauri::ActivationPolicy) -> Result<(), String> {
|
||||
let app_handle = self.app_handle();
|
||||
if let Some(app_handle) = app_handle.as_ref() {
|
||||
app_handle
|
||||
.set_activation_policy(policy)
|
||||
.map_err(|e| e.to_string())
|
||||
} else {
|
||||
Err("AppHandle not initialized".to_string())
|
||||
}
|
||||
let app_handle = Self::app_handle();
|
||||
app_handle
|
||||
.set_activation_policy(policy)
|
||||
.map_err(|e| e.to_string())
|
||||
}
|
||||
|
||||
pub fn set_activation_policy_regular(&self) {
|
||||
|
||||
@@ -200,9 +200,7 @@ impl Hotkey {
|
||||
hotkey: &str,
|
||||
function: HotkeyFunction,
|
||||
) -> Result<()> {
|
||||
let app_handle = handle::Handle::global()
|
||||
.app_handle()
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to get app handle for hotkey registration"))?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let manager = app_handle.global_shortcut();
|
||||
|
||||
logging!(
|
||||
@@ -375,9 +373,7 @@ impl Hotkey {
|
||||
}
|
||||
|
||||
pub fn reset(&self) -> Result<()> {
|
||||
let app_handle = handle::Handle::global()
|
||||
.app_handle()
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to get app handle for hotkey registration"))?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let manager = app_handle.global_shortcut();
|
||||
manager.unregister_all()?;
|
||||
Ok(())
|
||||
@@ -390,9 +386,7 @@ impl Hotkey {
|
||||
}
|
||||
|
||||
pub fn unregister(&self, hotkey: &str) -> Result<()> {
|
||||
let app_handle = handle::Handle::global()
|
||||
.app_handle()
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to get app handle for hotkey registration"))?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let manager = app_handle.global_shortcut();
|
||||
manager.unregister(hotkey)?;
|
||||
logging!(debug, Type::Hotkey, "Unregister hotkey {}", hotkey);
|
||||
@@ -468,17 +462,7 @@ impl Hotkey {
|
||||
|
||||
impl Drop for Hotkey {
|
||||
fn drop(&mut self) {
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
logging!(
|
||||
error,
|
||||
Type::Hotkey,
|
||||
"Failed to get app handle during hotkey cleanup"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
if let Err(e) = app_handle.global_shortcut().unregister_all() {
|
||||
logging!(
|
||||
error,
|
||||
|
||||
37
src-tauri/src/core/logger.rs
Normal file
37
src-tauri/src/core/logger.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use std::{collections::VecDeque, sync::Arc};
|
||||
|
||||
use once_cell::sync::OnceCell;
|
||||
use parking_lot::{RwLock, RwLockReadGuard};
|
||||
|
||||
const LOGS_QUEUE_LEN: usize = 100;
|
||||
|
||||
pub struct Logger {
|
||||
logs: Arc<RwLock<VecDeque<String>>>,
|
||||
}
|
||||
|
||||
impl Logger {
|
||||
pub fn global() -> &'static Logger {
|
||||
static LOGGER: OnceCell<Logger> = OnceCell::new();
|
||||
|
||||
LOGGER.get_or_init(|| Logger {
|
||||
logs: Arc::new(RwLock::new(VecDeque::with_capacity(LOGS_QUEUE_LEN + 10))),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_logs(&self) -> RwLockReadGuard<'_, VecDeque<String>> {
|
||||
self.logs.read()
|
||||
}
|
||||
|
||||
pub fn append_log(&self, text: String) {
|
||||
let mut logs = self.logs.write();
|
||||
if logs.len() > LOGS_QUEUE_LEN {
|
||||
logs.pop_front();
|
||||
}
|
||||
logs.push_back(text);
|
||||
}
|
||||
|
||||
pub fn clear_logs(&self) {
|
||||
let mut logs = self.logs.write();
|
||||
logs.clear();
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ mod core;
|
||||
pub mod event_driven_proxy;
|
||||
pub mod handle;
|
||||
pub mod hotkey;
|
||||
pub mod logger;
|
||||
pub mod service;
|
||||
pub mod service_ipc;
|
||||
pub mod sysopt;
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use crate::{
|
||||
cache::{CacheService, SHORT_TERM_TTL},
|
||||
config::Config,
|
||||
core::service_ipc::{IpcCommand, send_ipc_request},
|
||||
logging, logging_error,
|
||||
@@ -333,28 +332,24 @@ pub async fn force_reinstall_service() -> Result<()> {
|
||||
|
||||
/// 检查服务版本 - 使用IPC通信
|
||||
async fn check_service_version() -> Result<String> {
|
||||
let cache = CacheService::global();
|
||||
let key = CacheService::make_key("service", "version");
|
||||
let version_arc = cache
|
||||
.get_or_fetch(key, SHORT_TERM_TTL, || async {
|
||||
logging!(info, Type::Service, true, "开始检查服务版本 (IPC)");
|
||||
let payload = serde_json::json!({});
|
||||
let response = send_ipc_request(IpcCommand::GetVersion, payload).await?;
|
||||
let version_arc: Result<String> = {
|
||||
logging!(info, Type::Service, true, "开始检查服务版本 (IPC)");
|
||||
let payload = serde_json::json!({});
|
||||
let response = send_ipc_request(IpcCommand::GetVersion, payload).await?;
|
||||
|
||||
let data = response
|
||||
.data
|
||||
.ok_or_else(|| anyhow::anyhow!("服务版本响应中没有数据"))?;
|
||||
|
||||
if let Some(nested_data) = data.get("data")
|
||||
&& let Some(version) = nested_data.get("version").and_then(|v| v.as_str())
|
||||
{
|
||||
// logging!(info, Type::Service, true, "获取到服务版本: {}", version);
|
||||
return Ok(version.to_string());
|
||||
}
|
||||
let data = response
|
||||
.data
|
||||
.ok_or_else(|| anyhow::anyhow!("服务版本响应中没有数据"))?;
|
||||
|
||||
if let Some(nested_data) = data.get("data")
|
||||
&& let Some(version) = nested_data.get("version").and_then(|v| v.as_str())
|
||||
{
|
||||
// logging!(info, Type::Service, true, "获取到服务版本: {}", version);
|
||||
Ok(version.to_string())
|
||||
} else {
|
||||
Ok("unknown".to_string())
|
||||
})
|
||||
.await;
|
||||
}
|
||||
};
|
||||
|
||||
match version_arc.as_ref() {
|
||||
Ok(v) => Ok(v.clone()),
|
||||
|
||||
@@ -262,10 +262,7 @@ impl Sysopt {
|
||||
|
||||
/// 尝试使用原来的自启动方法
|
||||
fn try_original_autostart_method(&self, is_enable: bool) {
|
||||
let Some(app_handle) = Handle::global().app_handle() else {
|
||||
log::error!(target: "app", "App handle not available for autostart");
|
||||
return;
|
||||
};
|
||||
let app_handle = Handle::app_handle();
|
||||
let autostart_manager = app_handle.autolaunch();
|
||||
|
||||
if is_enable {
|
||||
@@ -292,9 +289,7 @@ impl Sysopt {
|
||||
}
|
||||
|
||||
// 回退到原来的方法
|
||||
let app_handle = Handle::global()
|
||||
.app_handle()
|
||||
.ok_or_else(|| anyhow::anyhow!("App handle not available"))?;
|
||||
let app_handle = Handle::app_handle();
|
||||
let autostart_manager = app_handle.autolaunch();
|
||||
|
||||
match autostart_manager.is_enabled() {
|
||||
|
||||
@@ -139,6 +139,27 @@ impl Timer {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 每 3 秒更新系统托盘菜单,总共执行 3 次
|
||||
pub fn add_update_tray_menu_task(&self) -> Result<()> {
|
||||
let tid = self.timer_count.fetch_add(1, Ordering::SeqCst);
|
||||
let delay_timer = self.delay_timer.write();
|
||||
let task = TaskBuilder::default()
|
||||
.set_task_id(tid)
|
||||
.set_maximum_parallel_runnable_num(1)
|
||||
.set_frequency_count_down_by_seconds(3, 3)
|
||||
.spawn_async_routine(|| async move {
|
||||
logging!(info, Type::Timer, "Updating tray menu");
|
||||
crate::core::tray::Tray::global()
|
||||
.update_tray_display()
|
||||
.await
|
||||
})
|
||||
.context("failed to create update tray menu timer task")?;
|
||||
delay_timer
|
||||
.add_task(task)
|
||||
.context("failed to add update tray menu timer task")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Refresh timer tasks with better error handling
|
||||
pub async fn refresh(&self) -> Result<()> {
|
||||
// Generate diff outside of lock to minimize lock contention
|
||||
|
||||
@@ -3,16 +3,13 @@ use tauri::Emitter;
|
||||
use tauri::tray::TrayIconBuilder;
|
||||
#[cfg(target_os = "macos")]
|
||||
pub mod speed_rate;
|
||||
use crate::ipc::Rate;
|
||||
use crate::module::lightweight;
|
||||
use crate::process::AsyncHandler;
|
||||
use crate::utils::window_manager::WindowManager;
|
||||
use crate::{
|
||||
Type, cmd,
|
||||
config::Config,
|
||||
feat,
|
||||
ipc::IpcManager,
|
||||
logging,
|
||||
feat, logging,
|
||||
module::lightweight::is_in_lightweight_mode,
|
||||
singleton_lazy,
|
||||
utils::{dirs::find_target_icons, i18n::t},
|
||||
@@ -34,6 +31,8 @@ use tauri::{
|
||||
tray::{MouseButton, MouseButtonState, TrayIconEvent},
|
||||
};
|
||||
|
||||
// TODO: 是否需要将可变菜单抽离存储起来,后续直接更新对应菜单实例,无需重新创建菜单(待考虑)
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TrayState {}
|
||||
|
||||
@@ -54,7 +53,7 @@ fn should_handle_tray_click() -> bool {
|
||||
*last_click = now;
|
||||
true
|
||||
} else {
|
||||
log::debug!(target: "app", "托盘点击被防抖机制忽略,距离上次点击 {:?}ms",
|
||||
log::debug!(target: "app", "托盘点击被防抖机制忽略,距离上次点击 {:?}ms",
|
||||
now.duration_since(*last_click).as_millis());
|
||||
false
|
||||
}
|
||||
@@ -189,28 +188,25 @@ singleton_lazy!(Tray, TRAY, Tray::default);
|
||||
|
||||
impl Tray {
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
let app_handle = handle::Handle::global()
|
||||
.app_handle()
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to get app handle for tray initialization"))?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
|
||||
match self.create_tray_from_handle(&app_handle).await {
|
||||
match self.create_tray_from_handle(app_handle).await {
|
||||
Ok(_) => {
|
||||
log::info!(target: "app", "System tray created successfully");
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!(target: "app", "System tray creation failed: {}, Application will continue running without tray icon", e);
|
||||
// Don't return error, let application continue running without tray
|
||||
Ok(())
|
||||
log::warn!(target: "app", "System tray creation failed: {}, Application will continue running without tray icon", e);
|
||||
}
|
||||
}
|
||||
// TODO: 初始化时,暂时使用此方法更新系统托盘菜单,有效避免代理节点菜单空白
|
||||
crate::core::timer::Timer::global().add_update_tray_menu_task()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 更新托盘点击行为
|
||||
pub async fn update_click_behavior(&self) -> Result<()> {
|
||||
let app_handle = handle::Handle::global()
|
||||
.app_handle()
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to get app handle for tray update"))?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let tray_event = { Config::verge().await.latest_ref().tray_event.clone() };
|
||||
let tray_event: String = tray_event.unwrap_or("main_window".into());
|
||||
let tray = app_handle
|
||||
@@ -250,18 +246,12 @@ impl Tray {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
log::warn!(target: "app", "更新托盘菜单失败: app_handle不存在");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
|
||||
// 设置更新状态
|
||||
self.menu_updating.store(true, Ordering::Release);
|
||||
|
||||
let result = self.update_menu_internal(&app_handle).await;
|
||||
let result = self.update_menu_internal(app_handle).await;
|
||||
|
||||
{
|
||||
let mut last_update = self.last_menu_update.lock();
|
||||
@@ -318,14 +308,8 @@ impl Tray {
|
||||
|
||||
/// 更新托盘图标
|
||||
#[cfg(target_os = "macos")]
|
||||
pub async fn update_icon(&self, _rate: Option<Rate>) -> Result<()> {
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
log::warn!(target: "app", "更新托盘图标失败: app_handle不存在");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
pub async fn update_icon(&self) -> Result<()> {
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
|
||||
let tray = match app_handle.tray_by_id("main") {
|
||||
Some(tray) => tray,
|
||||
@@ -355,14 +339,8 @@ impl Tray {
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "macos"))]
|
||||
pub async fn update_icon(&self, _rate: Option<Rate>) -> Result<()> {
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
log::warn!(target: "app", "更新托盘图标失败: app_handle不存在");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
pub async fn update_icon(&self) -> Result<()> {
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
|
||||
let tray = match app_handle.tray_by_id("main") {
|
||||
Some(tray) => tray,
|
||||
@@ -389,9 +367,7 @@ impl Tray {
|
||||
|
||||
/// 更新托盘显示状态的函数
|
||||
pub async fn update_tray_display(&self) -> Result<()> {
|
||||
let app_handle = handle::Handle::global()
|
||||
.app_handle()
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to get app handle for tray update"))?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let _tray = app_handle
|
||||
.tray_by_id("main")
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to get main tray"))?;
|
||||
@@ -404,13 +380,7 @@ impl Tray {
|
||||
|
||||
/// 更新托盘提示
|
||||
pub async fn update_tooltip(&self) -> Result<()> {
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
log::warn!(target: "app", "更新托盘提示失败: app_handle不存在");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
|
||||
let verge = Config::verge().await.latest_ref().clone();
|
||||
let system_proxy = verge.enable_system_proxy.as_ref().unwrap_or(&false);
|
||||
@@ -464,7 +434,7 @@ impl Tray {
|
||||
// self.update_menu().await?;
|
||||
// 更新轻量模式显示状态
|
||||
self.update_tray_display().await?;
|
||||
self.update_icon(None).await?;
|
||||
self.update_icon().await?;
|
||||
self.update_tooltip().await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -550,7 +520,7 @@ impl Tray {
|
||||
// 确保所有状态更新完成
|
||||
self.update_tray_display().await?;
|
||||
// self.update_menu().await?;
|
||||
self.update_icon(None).await?;
|
||||
self.update_icon().await?;
|
||||
self.update_tooltip().await?;
|
||||
|
||||
Ok(())
|
||||
@@ -578,14 +548,7 @@ async fn create_tray_menu(
|
||||
.unwrap_or_default()
|
||||
};
|
||||
|
||||
let proxy_nodes_data = cmd::get_proxies().await.unwrap_or_else(|e| {
|
||||
logging!(
|
||||
error,
|
||||
Type::Cmd,
|
||||
"Failed to fetch proxies for tray menu: {e}"
|
||||
);
|
||||
serde_json::Value::Object(serde_json::Map::new())
|
||||
});
|
||||
let proxy_nodes_data = handle::Handle::mihomo().await.get_proxies().await;
|
||||
|
||||
let version = env!("CARGO_PKG_VERSION");
|
||||
|
||||
@@ -638,46 +601,43 @@ async fn create_tray_menu(
|
||||
let mut submenus = Vec::new();
|
||||
let mut group_name_submenus_hash = HashMap::new();
|
||||
|
||||
if let Some(proxies) = proxy_nodes_data.get("proxies").and_then(|v| v.as_object()) {
|
||||
for (group_name, group_data) in proxies.iter() {
|
||||
// TODO: 应用启动时,内核还未启动完全,无法获取代理节点信息
|
||||
if let Ok(proxy_nodes_data) = proxy_nodes_data {
|
||||
for (group_name, group_data) in proxy_nodes_data.proxies.iter() {
|
||||
// Filter groups based on mode
|
||||
let should_show = match mode {
|
||||
"global" => group_name == "GLOBAL",
|
||||
_ => group_name != "GLOBAL",
|
||||
} &&
|
||||
// Check if the group is hidden
|
||||
!group_data.get("hidden").and_then(|v| v.as_bool()).unwrap_or(false);
|
||||
!group_data.hidden.unwrap_or_default();
|
||||
|
||||
if !should_show {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(all_proxies) = group_data.get("all").and_then(|v| v.as_array()) else {
|
||||
let Some(all_proxies) = group_data.all.as_ref() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let now_proxy = group_data.get("now").and_then(|v| v.as_str()).unwrap_or("");
|
||||
let now_proxy = group_data.now.as_deref().unwrap_or_default();
|
||||
|
||||
// Create proxy items
|
||||
let group_items: Vec<CheckMenuItem<Wry>> = all_proxies
|
||||
.iter()
|
||||
.filter_map(|proxy_name| proxy_name.as_str())
|
||||
.filter_map(|proxy_str| {
|
||||
let is_selected = proxy_str == now_proxy;
|
||||
let is_selected = *proxy_str == now_proxy;
|
||||
let item_id = format!("proxy_{}_{}", group_name, proxy_str);
|
||||
|
||||
// Get delay for display
|
||||
let delay_text = proxies
|
||||
let delay_text = proxy_nodes_data
|
||||
.proxies
|
||||
.get(proxy_str)
|
||||
.and_then(|p| p.get("history"))
|
||||
.and_then(|h| h.as_array())
|
||||
.and_then(|h| h.last())
|
||||
.and_then(|r| r.get("delay"))
|
||||
.and_then(|d| d.as_i64())
|
||||
.map(|delay| match delay {
|
||||
-1 => "-ms".to_string(),
|
||||
.and_then(|h| h.history.last())
|
||||
.map(|h| match h.delay {
|
||||
0 => "-ms".to_string(),
|
||||
delay if delay >= 10000 => "-ms".to_string(),
|
||||
_ => format!("{}ms", delay),
|
||||
_ => format!("{}ms", h.delay),
|
||||
})
|
||||
.unwrap_or_else(|| "-ms".to_string());
|
||||
|
||||
@@ -1066,29 +1026,30 @@ fn on_menu_event(_: &AppHandle, event: MenuEvent) {
|
||||
let group_name = parts[1];
|
||||
let proxy_name = parts[2];
|
||||
|
||||
match cmd::proxy::update_proxy_and_sync(
|
||||
group_name.to_string(),
|
||||
proxy_name.to_string(),
|
||||
)
|
||||
.await
|
||||
match handle::Handle::mihomo()
|
||||
.await
|
||||
.select_node_for_group(group_name, proxy_name)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
log::info!(target: "app", "切换代理成功: {} -> {}", group_name, proxy_name);
|
||||
let _ = handle::Handle::app_handle()
|
||||
.emit("verge://refresh-proxy-config", ());
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!(target: "app", "切换代理失败: {} -> {}, 错误: {:?}", group_name, proxy_name, e);
|
||||
|
||||
// Fallback to IPC update
|
||||
if (IpcManager::global()
|
||||
.update_proxy(group_name, proxy_name)
|
||||
if (handle::Handle::mihomo()
|
||||
.await
|
||||
.select_node_for_group(group_name, proxy_name)
|
||||
.await)
|
||||
.is_ok()
|
||||
{
|
||||
log::info!(target: "app", "代理切换回退成功: {} -> {}", group_name, proxy_name);
|
||||
|
||||
if let Some(app_handle) = handle::Handle::global().app_handle() {
|
||||
let _ = app_handle.emit("verge://force-refresh-proxies", ());
|
||||
}
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let _ = app_handle.emit("verge://force-refresh-proxies", ());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
use crate::{
|
||||
config::Config,
|
||||
core::{CoreManager, handle, tray},
|
||||
ipc::IpcManager,
|
||||
logging_error,
|
||||
process::AsyncHandler,
|
||||
utils::{logging::Type, resolve},
|
||||
};
|
||||
use serde_yaml_ng::{Mapping, Value};
|
||||
use std::env;
|
||||
use std::process::{Command, exit};
|
||||
|
||||
/// Restart the Clash core
|
||||
pub async fn restart_clash_core() {
|
||||
@@ -35,55 +32,57 @@ pub async fn restart_app() {
|
||||
return;
|
||||
}
|
||||
|
||||
handle::Handle::notice_message("restart_app::info", "Restarting application...");
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
app_handle.restart();
|
||||
// TODO: PR Ref: https://github.com/clash-verge-rev/clash-verge-rev/pull/4960
|
||||
// handle::Handle::notice_message("restart_app::info", "Restarting application...");
|
||||
|
||||
// Use the manual restart method consistently to ensure reliability across platforms
|
||||
// This addresses the issue where app_handle.restart() doesn't work properly on Windows
|
||||
let current_exe = match env::current_exe() {
|
||||
Ok(path) => path,
|
||||
Err(_) => {
|
||||
// If we can't get the current executable path, try to use the fallback method
|
||||
if let Some(app_handle) = handle::Handle::global().app_handle() {
|
||||
app_handle.restart();
|
||||
}
|
||||
exit(1); // If we reach here, either app_handle was None or restart() failed to restart
|
||||
}
|
||||
};
|
||||
// // Use the manual restart method consistently to ensure reliability across platforms
|
||||
// // This addresses the issue where app_handle.restart() doesn't work properly on Windows
|
||||
// let current_exe = match env::current_exe() {
|
||||
// Ok(path) => path,
|
||||
// Err(_) => {
|
||||
// // If we can't get the current executable path, try to use the fallback method
|
||||
// if let Some(app_handle) = handle::Handle::global().app_handle() {
|
||||
// app_handle.restart();
|
||||
// }
|
||||
// exit(1); // If we reach here, either app_handle was None or restart() failed to restart
|
||||
// }
|
||||
// };
|
||||
|
||||
let mut cmd = Command::new(current_exe);
|
||||
cmd.args(env::args().skip(1));
|
||||
// let mut cmd = Command::new(current_exe);
|
||||
// cmd.args(env::args().skip(1));
|
||||
|
||||
match cmd.spawn() {
|
||||
Ok(child) => {
|
||||
log::info!(target: "app", "New application instance started with PID: {}", child.id());
|
||||
// Successfully started new process, now exit current process
|
||||
if let Some(app_handle) = handle::Handle::global().app_handle() {
|
||||
app_handle.exit(0);
|
||||
} else {
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!(target: "app", "Failed to start new application instance: {}", e);
|
||||
// If manual spawn fails, try the original restart method as a last resort
|
||||
if let Some(app_handle) = handle::Handle::global().app_handle() {
|
||||
app_handle.restart();
|
||||
} else {
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
// match cmd.spawn() {
|
||||
// Ok(child) => {
|
||||
// log::info!(target: "app", "New application instance started with PID: {}", child.id());
|
||||
// // Successfully started new process, now exit current process
|
||||
// if let Some(app_handle) = handle::Handle::global().app_handle() {
|
||||
// app_handle.exit(0);
|
||||
// } else {
|
||||
// exit(0);
|
||||
// }
|
||||
// }
|
||||
// Err(e) => {
|
||||
// log::error!(target: "app", "Failed to start new application instance: {}", e);
|
||||
// // If manual spawn fails, try the original restart method as a last resort
|
||||
// if let Some(app_handle) = handle::Handle::global().app_handle() {
|
||||
// app_handle.restart();
|
||||
// } else {
|
||||
// exit(1);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
fn after_change_clash_mode() {
|
||||
AsyncHandler::spawn(move || async {
|
||||
match IpcManager::global().get_connections().await {
|
||||
let mihomo = handle::Handle::mihomo().await;
|
||||
match mihomo.get_connections().await {
|
||||
Ok(connections) => {
|
||||
if let Some(connections_array) = connections["connections"].as_array() {
|
||||
if let Some(connections_array) = connections.connections {
|
||||
for connection in connections_array {
|
||||
if let Some(id) = connection["id"].as_str() {
|
||||
let _ = IpcManager::global().delete_connection(id).await;
|
||||
}
|
||||
let _ = mihomo.close_connection(&connection.id).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -103,7 +102,11 @@ pub async fn change_clash_mode(mode: String) {
|
||||
"mode": mode
|
||||
});
|
||||
log::debug!(target: "app", "change clash mode to {mode}");
|
||||
match IpcManager::global().patch_configs(json_value).await {
|
||||
match handle::Handle::mihomo()
|
||||
.await
|
||||
.patch_base_config(&json_value)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
// 更新订阅
|
||||
Config::clash().await.data_mut().patch_config(mapping);
|
||||
@@ -113,11 +116,7 @@ pub async fn change_clash_mode(mode: String) {
|
||||
if clash_data.save_config().await.is_ok() {
|
||||
handle::Handle::refresh_clash();
|
||||
logging_error!(Type::Tray, true, tray::Tray::global().update_menu().await);
|
||||
logging_error!(
|
||||
Type::Tray,
|
||||
true,
|
||||
tray::Tray::global().update_icon(None).await
|
||||
);
|
||||
logging_error!(Type::Tray, true, tray::Tray::global().update_icon().await);
|
||||
}
|
||||
|
||||
let is_auto_close_connection = Config::verge()
|
||||
|
||||
@@ -23,11 +23,7 @@ pub async fn patch_clash(patch: Mapping) -> Result<()> {
|
||||
} else {
|
||||
if patch.get("mode").is_some() {
|
||||
logging_error!(Type::Tray, true, tray::Tray::global().update_menu().await);
|
||||
logging_error!(
|
||||
Type::Tray,
|
||||
true,
|
||||
tray::Tray::global().update_icon(None).await
|
||||
);
|
||||
logging_error!(Type::Tray, true, tray::Tray::global().update_icon().await);
|
||||
}
|
||||
Config::runtime().await.draft_mut().patch_config(patch);
|
||||
CoreManager::global().update_config().await?;
|
||||
@@ -211,7 +207,7 @@ pub async fn patch_verge(patch: IVerge, not_save_file: bool) -> Result<()> {
|
||||
tray::Tray::global().update_menu().await?;
|
||||
}
|
||||
if (update_flags & (UpdateFlags::SystrayIcon as i32)) != 0 {
|
||||
tray::Tray::global().update_icon(None).await?;
|
||||
tray::Tray::global().update_icon().await?;
|
||||
}
|
||||
if (update_flags & (UpdateFlags::SystrayTooltip as i32)) != 0 {
|
||||
tray::Tray::global().update_tooltip().await?;
|
||||
|
||||
@@ -143,15 +143,15 @@ pub async fn update_profile(
|
||||
Ok(_) => {
|
||||
logging!(info, Type::Config, true, "[订阅更新] 更新成功");
|
||||
handle::Handle::refresh_clash();
|
||||
if let Err(err) = cmd::proxy::force_refresh_proxies().await {
|
||||
logging!(
|
||||
error,
|
||||
Type::Config,
|
||||
true,
|
||||
"[订阅更新] 代理组刷新失败: {}",
|
||||
err
|
||||
);
|
||||
}
|
||||
// if let Err(err) = cmd::proxy::force_refresh_proxies().await {
|
||||
// logging!(
|
||||
// error,
|
||||
// Type::Config,
|
||||
// true,
|
||||
// "[订阅更新] 代理组刷新失败: {}",
|
||||
// err
|
||||
// );
|
||||
// }
|
||||
}
|
||||
Err(err) => {
|
||||
logging!(error, Type::Config, true, "[订阅更新] 更新失败: {}", err);
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
use crate::{
|
||||
config::{Config, IVerge},
|
||||
core::handle,
|
||||
ipc::IpcManager,
|
||||
logging,
|
||||
utils::logging::Type,
|
||||
};
|
||||
use std::env;
|
||||
use tauri_plugin_clipboard_manager::ClipboardExt;
|
||||
@@ -26,7 +23,7 @@ pub async fn toggle_system_proxy() {
|
||||
// 如果当前系统代理即将关闭,且自动关闭连接设置为true,则关闭所有连接
|
||||
if enable
|
||||
&& auto_close_connection
|
||||
&& let Err(err) = IpcManager::global().close_all_connections().await
|
||||
&& let Err(err) = handle::Handle::mihomo().await.close_all_connections().await
|
||||
{
|
||||
log::error!(target: "app", "Failed to close all connections: {err}");
|
||||
}
|
||||
@@ -78,14 +75,7 @@ pub async fn copy_clash_env() {
|
||||
.unwrap_or_else(|| "127.0.0.1".to_string()),
|
||||
};
|
||||
|
||||
let Some(app_handle) = handle::Handle::global().app_handle() else {
|
||||
logging!(
|
||||
error,
|
||||
Type::System,
|
||||
"Failed to get app handle for proxy operation"
|
||||
);
|
||||
return;
|
||||
};
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let port = {
|
||||
Config::verge()
|
||||
.await
|
||||
|
||||
@@ -2,7 +2,6 @@ use crate::utils::window_manager::WindowManager;
|
||||
use crate::{
|
||||
config::Config,
|
||||
core::{CoreManager, handle, sysopt},
|
||||
ipc::IpcManager,
|
||||
logging,
|
||||
module::lightweight,
|
||||
utils::logging::Type,
|
||||
@@ -23,17 +22,12 @@ async fn open_or_close_dashboard_internal() {
|
||||
pub async fn quit() {
|
||||
logging!(debug, Type::System, true, "启动退出流程");
|
||||
|
||||
let Some(app_handle) = handle::Handle::global().app_handle() else {
|
||||
logging!(
|
||||
error,
|
||||
Type::System,
|
||||
"Failed to get app handle for quit operation"
|
||||
);
|
||||
return;
|
||||
};
|
||||
// 获取应用句柄并设置退出标志
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
handle::Handle::global().set_is_exiting();
|
||||
|
||||
if let Some(window) = handle::Handle::global().get_window() {
|
||||
// 优先关闭窗口,提供立即反馈
|
||||
if let Some(window) = handle::Handle::get_window() {
|
||||
let _ = window.hide();
|
||||
log::info!(target: "app", "窗口已隐藏");
|
||||
}
|
||||
@@ -69,7 +63,14 @@ async fn clean_async() -> bool {
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
let tun_timeout = Duration::from_secs(2);
|
||||
|
||||
match timeout(tun_timeout, IpcManager::global().patch_configs(disable_tun)).await {
|
||||
match timeout(
|
||||
tun_timeout,
|
||||
handle::Handle::mihomo()
|
||||
.await
|
||||
.patch_base_config(&disable_tun),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(_)) => {
|
||||
log::info!(target: "app", "TUN模式已禁用");
|
||||
tokio::time::sleep(Duration::from_millis(300)).await;
|
||||
@@ -314,7 +315,7 @@ pub async fn hide() {
|
||||
add_light_weight_timer().await;
|
||||
}
|
||||
|
||||
if let Some(window) = handle::Handle::global().get_window()
|
||||
if let Some(window) = handle::Handle::get_window()
|
||||
&& window.is_visible().unwrap_or(false)
|
||||
{
|
||||
let _ = window.hide();
|
||||
|
||||
@@ -1,376 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use kode_bridge::{
|
||||
ClientConfig, IpcHttpClient, LegacyResponse,
|
||||
errors::{AnyError, AnyResult},
|
||||
};
|
||||
use percent_encoding::{AsciiSet, CONTROLS, utf8_percent_encode};
|
||||
|
||||
use crate::{
|
||||
logging, singleton_with_logging,
|
||||
utils::{dirs::ipc_path, logging::Type},
|
||||
};
|
||||
|
||||
// 定义用于URL路径的编码集合,只编码真正必要的字符
|
||||
const URL_PATH_ENCODE_SET: &AsciiSet = &CONTROLS
|
||||
.add(b' ') // 空格
|
||||
.add(b'/') // 斜杠
|
||||
.add(b'?') // 问号
|
||||
.add(b'#') // 井号
|
||||
.add(b'&') // 和号
|
||||
.add(b'%'); // 百分号
|
||||
|
||||
// Helper function to create AnyError from string
|
||||
fn create_error(msg: impl Into<String>) -> AnyError {
|
||||
Box::new(std::io::Error::other(msg.into()))
|
||||
}
|
||||
|
||||
pub struct IpcManager {
|
||||
client: IpcHttpClient,
|
||||
}
|
||||
|
||||
impl IpcManager {
|
||||
pub fn new() -> Self {
|
||||
logging!(info, Type::Ipc, true, "Creating new IpcManager instance");
|
||||
let ipc_path_buf = ipc_path().unwrap_or_else(|e| {
|
||||
logging!(error, Type::Ipc, true, "Failed to get IPC path: {}", e);
|
||||
std::path::PathBuf::from("/tmp/clash-verge-ipc") // fallback path
|
||||
});
|
||||
let ipc_path = ipc_path_buf.to_str().unwrap_or_default();
|
||||
let config = ClientConfig {
|
||||
default_timeout: Duration::from_secs(5),
|
||||
enable_pooling: false,
|
||||
max_retries: 4,
|
||||
retry_delay: Duration::from_millis(125),
|
||||
max_concurrent_requests: 16,
|
||||
max_requests_per_second: Some(64.0),
|
||||
..Default::default()
|
||||
};
|
||||
#[allow(clippy::unwrap_used)]
|
||||
let client = IpcHttpClient::with_config(ipc_path, config).unwrap();
|
||||
Self { client }
|
||||
}
|
||||
}
|
||||
|
||||
impl IpcManager {
|
||||
pub async fn request(
|
||||
&self,
|
||||
method: &str,
|
||||
path: &str,
|
||||
body: Option<&serde_json::Value>,
|
||||
) -> AnyResult<LegacyResponse> {
|
||||
self.client.request(method, path, body).await
|
||||
}
|
||||
}
|
||||
|
||||
impl IpcManager {
|
||||
pub async fn send_request(
|
||||
&self,
|
||||
method: &str,
|
||||
path: &str,
|
||||
body: Option<&serde_json::Value>,
|
||||
) -> AnyResult<serde_json::Value> {
|
||||
let response = IpcManager::global().request(method, path, body).await?;
|
||||
match method {
|
||||
"GET" => Ok(response.json()?),
|
||||
"PATCH" => {
|
||||
if response.status == 204 {
|
||||
Ok(serde_json::json!({"code": 204}))
|
||||
} else {
|
||||
Ok(response.json()?)
|
||||
}
|
||||
}
|
||||
"PUT" | "DELETE" => {
|
||||
if response.status == 204 {
|
||||
Ok(serde_json::json!({"code": 204}))
|
||||
} else {
|
||||
match response.json() {
|
||||
Ok(json) => Ok(json),
|
||||
Err(_) => Ok(serde_json::json!({
|
||||
"code": response.status,
|
||||
"message": response.body,
|
||||
"error": "failed to parse response as JSON"
|
||||
})),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => match response.json() {
|
||||
Ok(json) => Ok(json),
|
||||
Err(_) => Ok(serde_json::json!({
|
||||
"code": response.status,
|
||||
"message": response.body,
|
||||
"error": "failed to parse response as JSON"
|
||||
})),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// 基础代理信息获取
|
||||
pub async fn get_proxies(&self) -> AnyResult<serde_json::Value> {
|
||||
let url = "/proxies";
|
||||
self.send_request("GET", url, None).await
|
||||
}
|
||||
|
||||
// 代理提供者信息获取
|
||||
pub async fn get_providers_proxies(&self) -> AnyResult<serde_json::Value> {
|
||||
let url = "/providers/proxies";
|
||||
self.send_request("GET", url, None).await
|
||||
}
|
||||
|
||||
// 连接管理
|
||||
pub async fn get_connections(&self) -> AnyResult<serde_json::Value> {
|
||||
let url = "/connections";
|
||||
self.send_request("GET", url, None).await
|
||||
}
|
||||
|
||||
pub async fn delete_connection(&self, id: &str) -> AnyResult<()> {
|
||||
let encoded_id = utf8_percent_encode(id, URL_PATH_ENCODE_SET).to_string();
|
||||
let url = format!("/connections/{encoded_id}");
|
||||
let response = self.send_request("DELETE", &url, None).await?;
|
||||
if response["code"] == 204 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(create_error(
|
||||
response["message"].as_str().unwrap_or("unknown error"),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn close_all_connections(&self) -> AnyResult<()> {
|
||||
let url = "/connections";
|
||||
let response = self.send_request("DELETE", url, None).await?;
|
||||
if response["code"] == 204 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(create_error(
|
||||
response["message"]
|
||||
.as_str()
|
||||
.unwrap_or("unknown error")
|
||||
.to_owned(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IpcManager {
|
||||
#[allow(dead_code)]
|
||||
pub async fn is_mihomo_running(&self) -> AnyResult<()> {
|
||||
let url = "/version";
|
||||
let _response = self.send_request("GET", url, None).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn put_configs_force(&self, clash_config_path: &str) -> AnyResult<()> {
|
||||
let url = "/configs?force=true";
|
||||
let payload = serde_json::json!({
|
||||
"path": clash_config_path,
|
||||
});
|
||||
let _response = self.send_request("PUT", url, Some(&payload)).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn patch_configs(&self, config: serde_json::Value) -> AnyResult<()> {
|
||||
let url = "/configs";
|
||||
let response = self.send_request("PATCH", url, Some(&config)).await?;
|
||||
if response["code"] == 204 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(create_error(
|
||||
response["message"]
|
||||
.as_str()
|
||||
.unwrap_or("unknown error")
|
||||
.to_owned(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn test_proxy_delay(
|
||||
&self,
|
||||
name: &str,
|
||||
test_url: Option<String>,
|
||||
timeout: i32,
|
||||
) -> AnyResult<serde_json::Value> {
|
||||
let test_url =
|
||||
test_url.unwrap_or_else(|| "https://cp.cloudflare.com/generate_204".to_string());
|
||||
|
||||
let encoded_name = utf8_percent_encode(name, URL_PATH_ENCODE_SET).to_string();
|
||||
// 测速URL不再编码,直接传递
|
||||
let url = format!("/proxies/{encoded_name}/delay?url={test_url}&timeout={timeout}");
|
||||
|
||||
self.send_request("GET", &url, None).await
|
||||
}
|
||||
|
||||
// 版本和配置相关
|
||||
pub async fn get_version(&self) -> AnyResult<serde_json::Value> {
|
||||
let url = "/version";
|
||||
self.send_request("GET", url, None).await
|
||||
}
|
||||
|
||||
pub async fn get_config(&self) -> AnyResult<serde_json::Value> {
|
||||
let url = "/configs";
|
||||
self.send_request("GET", url, None).await
|
||||
}
|
||||
|
||||
pub async fn update_geo_data(&self) -> AnyResult<()> {
|
||||
let url = "/configs/geo";
|
||||
let response = self.send_request("POST", url, None).await?;
|
||||
if response["code"] == 204 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(create_error(
|
||||
response["message"]
|
||||
.as_str()
|
||||
.unwrap_or("unknown error")
|
||||
.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn upgrade_core(&self) -> AnyResult<()> {
|
||||
let url = "/upgrade";
|
||||
let response = self.send_request("POST", url, None).await?;
|
||||
if response["code"] == 204 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(create_error(
|
||||
response["message"]
|
||||
.as_str()
|
||||
.unwrap_or("unknown error")
|
||||
.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// 规则相关
|
||||
pub async fn get_rules(&self) -> AnyResult<serde_json::Value> {
|
||||
let url = "/rules";
|
||||
self.send_request("GET", url, None).await
|
||||
}
|
||||
|
||||
pub async fn get_rule_providers(&self) -> AnyResult<serde_json::Value> {
|
||||
let url = "/providers/rules";
|
||||
self.send_request("GET", url, None).await
|
||||
}
|
||||
|
||||
pub async fn update_rule_provider(&self, name: &str) -> AnyResult<()> {
|
||||
let encoded_name = utf8_percent_encode(name, URL_PATH_ENCODE_SET).to_string();
|
||||
let url = format!("/providers/rules/{encoded_name}");
|
||||
let response = self.send_request("PUT", &url, None).await?;
|
||||
if response["code"] == 204 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(create_error(
|
||||
response["message"]
|
||||
.as_str()
|
||||
.unwrap_or("unknown error")
|
||||
.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// 代理相关
|
||||
pub async fn update_proxy(&self, group: &str, proxy: &str) -> AnyResult<()> {
|
||||
// 使用 percent-encoding 进行正确的 URL 编码
|
||||
let encoded_group = utf8_percent_encode(group, URL_PATH_ENCODE_SET).to_string();
|
||||
let url = format!("/proxies/{encoded_group}");
|
||||
let payload = serde_json::json!({
|
||||
"name": proxy
|
||||
});
|
||||
|
||||
// println!("group: {}, proxy: {}", group, proxy);
|
||||
match self.send_request("PUT", &url, Some(&payload)).await {
|
||||
Ok(_) => {
|
||||
// println!("updateProxy response: {:?}", response);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
// println!("updateProxy encountered error: {}", e);
|
||||
logging!(
|
||||
error,
|
||||
crate::utils::logging::Type::Ipc,
|
||||
true,
|
||||
"IPC: updateProxy encountered error: {} (ignored, always returning true)",
|
||||
e
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn proxy_provider_health_check(&self, name: &str) -> AnyResult<()> {
|
||||
let encoded_name = utf8_percent_encode(name, URL_PATH_ENCODE_SET).to_string();
|
||||
let url = format!("/providers/proxies/{encoded_name}/healthcheck");
|
||||
let response = self.send_request("GET", &url, None).await?;
|
||||
if response["code"] == 204 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(create_error(
|
||||
response["message"]
|
||||
.as_str()
|
||||
.unwrap_or("unknown error")
|
||||
.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update_proxy_provider(&self, name: &str) -> AnyResult<()> {
|
||||
let encoded_name = utf8_percent_encode(name, URL_PATH_ENCODE_SET).to_string();
|
||||
let url = format!("/providers/proxies/{encoded_name}");
|
||||
let response = self.send_request("PUT", &url, None).await?;
|
||||
if response["code"] == 204 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(create_error(
|
||||
response["message"]
|
||||
.as_str()
|
||||
.unwrap_or("unknown error")
|
||||
.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// 延迟测试相关
|
||||
pub async fn get_group_proxy_delays(
|
||||
&self,
|
||||
group_name: &str,
|
||||
url: Option<String>,
|
||||
timeout: i32,
|
||||
) -> AnyResult<serde_json::Value> {
|
||||
let test_url = url.unwrap_or_else(|| "https://cp.cloudflare.com/generate_204".to_string());
|
||||
|
||||
let encoded_group_name = utf8_percent_encode(group_name, URL_PATH_ENCODE_SET).to_string();
|
||||
// 测速URL不再编码,直接传递
|
||||
let url = format!("/group/{encoded_group_name}/delay?url={test_url}&timeout={timeout}");
|
||||
|
||||
self.send_request("GET", &url, None).await
|
||||
}
|
||||
|
||||
// 调试相关
|
||||
pub async fn is_debug_enabled(&self) -> AnyResult<bool> {
|
||||
let url = "/debug/pprof";
|
||||
match self.send_request("GET", url, None).await {
|
||||
Ok(_) => Ok(true),
|
||||
Err(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn gc(&self) -> AnyResult<()> {
|
||||
let url = "/debug/gc";
|
||||
let response = self.send_request("PUT", url, None).await?;
|
||||
if response["code"] == 204 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(create_error(
|
||||
response["message"]
|
||||
.as_str()
|
||||
.unwrap_or("unknown error")
|
||||
.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// 日志相关功能已迁移到 logs.rs 模块,使用流式处理
|
||||
}
|
||||
|
||||
// Use singleton macro with logging
|
||||
singleton_with_logging!(IpcManager, INSTANCE, "IpcManager");
|
||||
@@ -1,330 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::VecDeque, sync::Arc, time::Instant};
|
||||
use tauri::async_runtime::JoinHandle;
|
||||
use tokio::{sync::RwLock, time::Duration};
|
||||
|
||||
use crate::{
|
||||
ipc::monitor::MonitorData,
|
||||
logging,
|
||||
process::AsyncHandler,
|
||||
singleton_with_logging,
|
||||
utils::{dirs::ipc_path, logging::Type},
|
||||
};
|
||||
|
||||
const MAX_LOGS: usize = 1000; // Maximum number of logs to keep in memory
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct LogData {
|
||||
#[serde(rename = "type")]
|
||||
pub log_type: String,
|
||||
pub payload: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LogItem {
|
||||
pub log_type: String,
|
||||
pub payload: String,
|
||||
pub time: String,
|
||||
}
|
||||
|
||||
impl LogItem {
|
||||
fn new(log_type: String, payload: String) -> Self {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_else(|_| std::time::Duration::from_secs(0))
|
||||
.as_secs();
|
||||
|
||||
// Simple time formatting (HH:MM:SS)
|
||||
let hours = (now / 3600) % 24;
|
||||
let minutes = (now / 60) % 60;
|
||||
let seconds = now % 60;
|
||||
let time_str = format!("{hours:02}:{minutes:02}:{seconds:02}");
|
||||
|
||||
Self {
|
||||
log_type,
|
||||
payload,
|
||||
time: time_str,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CurrentLogs {
|
||||
pub logs: VecDeque<LogItem>,
|
||||
// pub level: String,
|
||||
pub last_updated: Instant,
|
||||
}
|
||||
|
||||
impl Default for CurrentLogs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
logs: VecDeque::with_capacity(MAX_LOGS),
|
||||
// level: "info".to_string(),
|
||||
last_updated: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MonitorData for CurrentLogs {
|
||||
fn mark_fresh(&mut self) {
|
||||
self.last_updated = Instant::now();
|
||||
}
|
||||
|
||||
fn is_fresh_within(&self, duration: Duration) -> bool {
|
||||
self.last_updated.elapsed() < duration
|
||||
}
|
||||
}
|
||||
|
||||
// Logs monitor with streaming support
|
||||
pub struct LogsMonitor {
|
||||
current: Arc<RwLock<CurrentLogs>>,
|
||||
task_handle: Arc<RwLock<Option<JoinHandle<()>>>>,
|
||||
current_monitoring_level: Arc<RwLock<Option<String>>>,
|
||||
}
|
||||
|
||||
// Use singleton_with_logging macro
|
||||
singleton_with_logging!(LogsMonitor, INSTANCE, "LogsMonitor");
|
||||
|
||||
impl LogsMonitor {
|
||||
fn new() -> Self {
|
||||
let current = Arc::new(RwLock::new(CurrentLogs::default()));
|
||||
|
||||
Self {
|
||||
current,
|
||||
task_handle: Arc::new(RwLock::new(None)),
|
||||
current_monitoring_level: Arc::new(RwLock::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_monitoring(&self, level: Option<String>) {
|
||||
let filter_level = level.clone().unwrap_or_else(|| "info".to_string());
|
||||
|
||||
// Check if we're already monitoring the same level
|
||||
// let level_changed = {
|
||||
// let current_level = self.current_monitoring_level.read().await;
|
||||
// if let Some(existing_level) = current_level.as_ref() {
|
||||
// if existing_level == &filter_level {
|
||||
// logging!(
|
||||
// info,
|
||||
// Type::Ipc,
|
||||
// true,
|
||||
// "LogsMonitor: Already monitoring level '{}', skipping duplicate request",
|
||||
// filter_level
|
||||
// );
|
||||
// return;
|
||||
// }
|
||||
// true // Level changed
|
||||
// } else {
|
||||
// true // First time or was stopped
|
||||
// }
|
||||
// };
|
||||
|
||||
// Stop existing monitoring task if level changed or first time
|
||||
{
|
||||
let mut handle = self.task_handle.write().await;
|
||||
if let Some(task) = handle.take() {
|
||||
task.abort();
|
||||
logging!(
|
||||
info,
|
||||
Type::Ipc,
|
||||
true,
|
||||
"LogsMonitor: Stopped previous monitoring task (level changed)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// We want to keep the logs cache even if the level changes,
|
||||
// so we don't clear it here. The cache will be cleared only when the level changes
|
||||
// and a new task is started. This allows us to keep logs from previous levels
|
||||
// even if the level changes during monitoring.
|
||||
// Clear logs cache when level changes to ensure fresh data
|
||||
// if level_changed {
|
||||
// let mut current = self.current.write().await;
|
||||
// current.logs.clear();
|
||||
// current.level = filter_level.clone();
|
||||
// current.mark_fresh();
|
||||
// logging!(
|
||||
// info,
|
||||
// Type::Ipc,
|
||||
// true,
|
||||
// "LogsMonitor: Cleared logs cache due to level change to '{}'",
|
||||
// filter_level
|
||||
// );
|
||||
// }
|
||||
|
||||
// Update current monitoring level
|
||||
{
|
||||
let mut current_level = self.current_monitoring_level.write().await;
|
||||
*current_level = Some(filter_level.clone());
|
||||
}
|
||||
|
||||
let monitor_current = Arc::clone(&self.current);
|
||||
|
||||
let task = AsyncHandler::spawn(move || async move {
|
||||
loop {
|
||||
// Get fresh IPC path and client for each connection attempt
|
||||
let (_ipc_path_buf, client) = match Self::create_ipc_client() {
|
||||
Ok((path, client)) => (path, client),
|
||||
Err(e) => {
|
||||
logging!(error, Type::Ipc, true, "Failed to create IPC client: {}", e);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let url = if filter_level == "all" {
|
||||
"/logs".to_string()
|
||||
} else {
|
||||
format!("/logs?level={filter_level}")
|
||||
};
|
||||
|
||||
logging!(
|
||||
info,
|
||||
Type::Ipc,
|
||||
true,
|
||||
"LogsMonitor: Starting stream for {}",
|
||||
url
|
||||
);
|
||||
|
||||
let _ = client
|
||||
.get(&url)
|
||||
.timeout(Duration::from_secs(30))
|
||||
.process_lines(|line| {
|
||||
Self::process_log_line(line, Arc::clone(&monitor_current))
|
||||
})
|
||||
.await;
|
||||
|
||||
// Wait before retrying
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
});
|
||||
|
||||
// Store the task handle
|
||||
{
|
||||
let mut handle = self.task_handle.write().await;
|
||||
*handle = Some(task);
|
||||
}
|
||||
|
||||
logging!(
|
||||
info,
|
||||
Type::Ipc,
|
||||
true,
|
||||
"LogsMonitor: Started new monitoring task for level: {:?}",
|
||||
level
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn stop_monitoring(&self) {
|
||||
// Stop monitoring task but keep logs
|
||||
{
|
||||
let mut handle = self.task_handle.write().await;
|
||||
if let Some(task) = handle.take() {
|
||||
task.abort();
|
||||
logging!(
|
||||
info,
|
||||
Type::Ipc,
|
||||
true,
|
||||
"LogsMonitor: Stopped monitoring task"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Reset monitoring level
|
||||
{
|
||||
let mut monitoring_level = self.current_monitoring_level.write().await;
|
||||
*monitoring_level = None;
|
||||
}
|
||||
}
|
||||
|
||||
fn create_ipc_client() -> Result<
|
||||
(std::path::PathBuf, kode_bridge::IpcStreamClient),
|
||||
Box<dyn std::error::Error + Send + Sync>,
|
||||
> {
|
||||
use kode_bridge::IpcStreamClient;
|
||||
|
||||
let ipc_path_buf = ipc_path()?;
|
||||
let ipc_path = ipc_path_buf.to_str().ok_or("Invalid IPC path")?;
|
||||
let client = IpcStreamClient::new(ipc_path)?;
|
||||
Ok((ipc_path_buf, client))
|
||||
}
|
||||
|
||||
fn process_log_line(
|
||||
line: &str,
|
||||
current: Arc<RwLock<CurrentLogs>>,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
if let Ok(log_data) = serde_json::from_str::<LogData>(line.trim()) {
|
||||
// Server-side filtering via query parameters handles the level filtering
|
||||
// We only need to accept all logs since filtering is done at the endpoint level
|
||||
let log_item = LogItem::new(log_data.log_type, log_data.payload);
|
||||
|
||||
AsyncHandler::spawn(move || async move {
|
||||
let mut logs = current.write().await;
|
||||
|
||||
// Add new log
|
||||
logs.logs.push_back(log_item);
|
||||
|
||||
// Keep only the last 1000 logs
|
||||
if logs.logs.len() > 1000 {
|
||||
logs.logs.pop_front();
|
||||
}
|
||||
|
||||
logs.mark_fresh();
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn current(&self) -> CurrentLogs {
|
||||
self.current.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn clear_logs(&self) {
|
||||
let mut current = self.current.write().await;
|
||||
current.logs.clear();
|
||||
current.mark_fresh();
|
||||
logging!(
|
||||
info,
|
||||
Type::Ipc,
|
||||
true,
|
||||
"LogsMonitor: Cleared frontend logs (monitoring continues)"
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn get_logs_as_json(&self) -> serde_json::Value {
|
||||
let current = self.current().await;
|
||||
|
||||
// Simply return all cached logs since filtering is handled by start_monitoring
|
||||
// and the cache is cleared when level changes
|
||||
let logs: Vec<serde_json::Value> = current
|
||||
.logs
|
||||
.iter()
|
||||
.map(|log| {
|
||||
serde_json::json!({
|
||||
"type": log.log_type,
|
||||
"payload": log.payload,
|
||||
"time": log.time
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
serde_json::Value::Array(logs)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_logs_monitoring(level: Option<String>) {
|
||||
LogsMonitor::global().start_monitoring(level).await;
|
||||
}
|
||||
|
||||
pub async fn stop_logs_monitoring() {
|
||||
LogsMonitor::global().stop_monitoring().await;
|
||||
}
|
||||
|
||||
pub async fn clear_logs() {
|
||||
LogsMonitor::global().clear_logs().await;
|
||||
}
|
||||
|
||||
pub async fn get_logs_json() -> serde_json::Value {
|
||||
LogsMonitor::global().get_logs_as_json().await
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{sync::Arc, time::Instant};
|
||||
use tokio::{sync::RwLock, time::Duration};
|
||||
|
||||
use crate::{
|
||||
ipc::monitor::{IpcStreamMonitor, MonitorData, StreamingParser},
|
||||
process::AsyncHandler,
|
||||
singleton_lazy_with_logging,
|
||||
utils::format::fmt_bytes,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct MemoryData {
|
||||
pub inuse: u64,
|
||||
pub oslimit: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CurrentMemory {
|
||||
pub inuse: u64,
|
||||
pub oslimit: u64,
|
||||
pub last_updated: Instant,
|
||||
}
|
||||
|
||||
impl Default for CurrentMemory {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inuse: 0,
|
||||
oslimit: 0,
|
||||
last_updated: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MonitorData for CurrentMemory {
|
||||
fn mark_fresh(&mut self) {
|
||||
self.last_updated = Instant::now();
|
||||
}
|
||||
|
||||
fn is_fresh_within(&self, duration: Duration) -> bool {
|
||||
self.last_updated.elapsed() < duration
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamingParser for CurrentMemory {
|
||||
fn parse_and_update(
|
||||
line: &str,
|
||||
current: Arc<RwLock<Self>>,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
if let Ok(memory) = serde_json::from_str::<MemoryData>(line.trim()) {
|
||||
AsyncHandler::spawn(move || async move {
|
||||
let mut current_guard = current.write().await;
|
||||
current_guard.inuse = memory.inuse;
|
||||
current_guard.oslimit = memory.oslimit;
|
||||
current_guard.mark_fresh();
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Minimal memory monitor using the new architecture
|
||||
pub struct MemoryMonitor {
|
||||
monitor: IpcStreamMonitor<CurrentMemory>,
|
||||
}
|
||||
|
||||
impl Default for MemoryMonitor {
|
||||
fn default() -> Self {
|
||||
MemoryMonitor {
|
||||
monitor: IpcStreamMonitor::new(
|
||||
"/memory".to_string(),
|
||||
Duration::from_secs(10),
|
||||
Duration::from_secs(2),
|
||||
Duration::from_secs(10),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use simplified singleton_lazy_with_logging macro
|
||||
singleton_lazy_with_logging!(
|
||||
MemoryMonitor,
|
||||
INSTANCE,
|
||||
"MemoryMonitor",
|
||||
MemoryMonitor::default
|
||||
);
|
||||
|
||||
impl MemoryMonitor {
|
||||
pub async fn current(&self) -> CurrentMemory {
|
||||
self.monitor.current().await
|
||||
}
|
||||
|
||||
pub async fn is_fresh(&self) -> bool {
|
||||
self.monitor.is_fresh().await
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_current_memory() -> CurrentMemory {
|
||||
MemoryMonitor::global().current().await
|
||||
}
|
||||
|
||||
pub async fn get_formatted_memory() -> (String, String, f64, bool) {
|
||||
let monitor = MemoryMonitor::global();
|
||||
let memory = monitor.current().await;
|
||||
let is_fresh = monitor.is_fresh().await;
|
||||
|
||||
let usage_percent = if memory.oslimit > 0 {
|
||||
(memory.inuse as f64 / memory.oslimit as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
(
|
||||
fmt_bytes(memory.inuse),
|
||||
fmt_bytes(memory.oslimit),
|
||||
usage_percent,
|
||||
is_fresh,
|
||||
)
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
pub mod general;
|
||||
pub mod logs;
|
||||
pub mod memory;
|
||||
pub mod monitor;
|
||||
pub mod traffic;
|
||||
|
||||
pub use general::IpcManager;
|
||||
pub use logs::{clear_logs, get_logs_json, start_logs_monitoring, stop_logs_monitoring};
|
||||
pub use memory::{get_current_memory, get_formatted_memory};
|
||||
pub use traffic::{get_current_traffic, get_formatted_traffic};
|
||||
|
||||
pub struct Rate {
|
||||
// pub up: usize,
|
||||
// pub down: usize,
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
use kode_bridge::IpcStreamClient;
|
||||
use std::sync::Arc;
|
||||
use tokio::{sync::RwLock, time::Duration};
|
||||
|
||||
use crate::{
|
||||
logging,
|
||||
process::AsyncHandler,
|
||||
utils::{dirs::ipc_path, logging::Type},
|
||||
};
|
||||
|
||||
/// Generic base structure for IPC monitoring data with freshness tracking
|
||||
pub trait MonitorData: Clone + Send + Sync + 'static {
|
||||
/// Update the last_updated timestamp to now
|
||||
fn mark_fresh(&mut self);
|
||||
|
||||
/// Check if data is fresh based on the given duration
|
||||
fn is_fresh_within(&self, duration: Duration) -> bool;
|
||||
}
|
||||
|
||||
/// Trait for parsing streaming data and updating monitor state
|
||||
pub trait StreamingParser: MonitorData {
|
||||
/// Parse a line of streaming data and update the current state
|
||||
fn parse_and_update(
|
||||
line: &str,
|
||||
current: Arc<RwLock<Self>>,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>>;
|
||||
}
|
||||
|
||||
/// Generic IPC stream monitor that handles the common streaming pattern
|
||||
pub struct IpcStreamMonitor<T>
|
||||
where
|
||||
T: MonitorData + StreamingParser + Default,
|
||||
{
|
||||
current: Arc<RwLock<T>>,
|
||||
#[allow(dead_code)]
|
||||
endpoint: String,
|
||||
#[allow(dead_code)]
|
||||
timeout: Duration,
|
||||
#[allow(dead_code)]
|
||||
retry_interval: Duration,
|
||||
freshness_duration: Duration,
|
||||
}
|
||||
|
||||
impl<T> IpcStreamMonitor<T>
|
||||
where
|
||||
T: MonitorData + StreamingParser + Default,
|
||||
{
|
||||
pub fn new(
|
||||
endpoint: String,
|
||||
timeout: Duration,
|
||||
retry_interval: Duration,
|
||||
freshness_duration: Duration,
|
||||
) -> Self {
|
||||
let current = Arc::new(RwLock::new(T::default()));
|
||||
let monitor_current = Arc::clone(¤t);
|
||||
let endpoint_clone = endpoint.clone();
|
||||
|
||||
// Start the monitoring task
|
||||
AsyncHandler::spawn(move || async move {
|
||||
Self::streaming_task(monitor_current, endpoint_clone, timeout, retry_interval).await;
|
||||
});
|
||||
|
||||
Self {
|
||||
current,
|
||||
endpoint,
|
||||
timeout,
|
||||
retry_interval,
|
||||
freshness_duration,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn current(&self) -> T {
|
||||
self.current.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn is_fresh(&self) -> bool {
|
||||
self.current
|
||||
.read()
|
||||
.await
|
||||
.is_fresh_within(self.freshness_duration)
|
||||
}
|
||||
|
||||
/// The core streaming task that can be specialized per monitor type
|
||||
async fn streaming_task(
|
||||
current: Arc<RwLock<T>>,
|
||||
endpoint: String,
|
||||
timeout: Duration,
|
||||
retry_interval: Duration,
|
||||
) {
|
||||
loop {
|
||||
let ipc_path_buf = match ipc_path() {
|
||||
Ok(path) => path,
|
||||
Err(e) => {
|
||||
logging!(error, Type::Ipc, true, "Failed to get IPC path: {}", e);
|
||||
tokio::time::sleep(retry_interval).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let ipc_path = ipc_path_buf.to_str().unwrap_or_default();
|
||||
|
||||
let client = match IpcStreamClient::new(ipc_path) {
|
||||
Ok(client) => client,
|
||||
Err(e) => {
|
||||
logging!(error, Type::Ipc, true, "Failed to create IPC client: {}", e);
|
||||
tokio::time::sleep(retry_interval).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let _ = client
|
||||
.get(&endpoint)
|
||||
.timeout(timeout)
|
||||
.process_lines(|line| T::parse_and_update(line, Arc::clone(¤t)))
|
||||
.await;
|
||||
|
||||
tokio::time::sleep(retry_interval).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,153 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{sync::Arc, time::Instant};
|
||||
use tokio::{sync::RwLock, time::Duration};
|
||||
|
||||
use crate::{
|
||||
ipc::monitor::{IpcStreamMonitor, MonitorData, StreamingParser},
|
||||
process::AsyncHandler,
|
||||
singleton_lazy_with_logging,
|
||||
utils::format::fmt_bytes,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct TrafficData {
|
||||
pub up: u64,
|
||||
pub down: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CurrentTraffic {
|
||||
pub up_rate: u64,
|
||||
pub down_rate: u64,
|
||||
pub total_up: u64,
|
||||
pub total_down: u64,
|
||||
pub last_updated: Instant,
|
||||
}
|
||||
|
||||
impl Default for CurrentTraffic {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
up_rate: 0,
|
||||
down_rate: 0,
|
||||
total_up: 0,
|
||||
total_down: 0,
|
||||
last_updated: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MonitorData for CurrentTraffic {
|
||||
fn mark_fresh(&mut self) {
|
||||
self.last_updated = Instant::now();
|
||||
}
|
||||
|
||||
fn is_fresh_within(&self, duration: Duration) -> bool {
|
||||
self.last_updated.elapsed() < duration
|
||||
}
|
||||
}
|
||||
|
||||
// Traffic monitoring state for calculating rates
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct TrafficMonitorState {
|
||||
pub current: CurrentTraffic,
|
||||
pub last_traffic: Option<TrafficData>,
|
||||
}
|
||||
|
||||
impl MonitorData for TrafficMonitorState {
|
||||
fn mark_fresh(&mut self) {
|
||||
self.current.mark_fresh();
|
||||
}
|
||||
|
||||
fn is_fresh_within(&self, duration: Duration) -> bool {
|
||||
self.current.is_fresh_within(duration)
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamingParser for TrafficMonitorState {
|
||||
fn parse_and_update(
|
||||
line: &str,
|
||||
current: Arc<RwLock<Self>>,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
if let Ok(traffic) = serde_json::from_str::<TrafficData>(line.trim()) {
|
||||
AsyncHandler::spawn(move || async move {
|
||||
let mut state_guard = current.write().await;
|
||||
|
||||
let (up_rate, down_rate) = state_guard
|
||||
.last_traffic
|
||||
.as_ref()
|
||||
.map(|l| {
|
||||
(
|
||||
traffic.up.saturating_sub(l.up),
|
||||
traffic.down.saturating_sub(l.down),
|
||||
)
|
||||
})
|
||||
.unwrap_or((0, 0));
|
||||
|
||||
state_guard.current = CurrentTraffic {
|
||||
up_rate,
|
||||
down_rate,
|
||||
total_up: traffic.up,
|
||||
total_down: traffic.down,
|
||||
last_updated: Instant::now(),
|
||||
};
|
||||
|
||||
state_guard.last_traffic = Some(traffic);
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Minimal traffic monitor using the new architecture
|
||||
pub struct TrafficMonitor {
|
||||
monitor: IpcStreamMonitor<TrafficMonitorState>,
|
||||
}
|
||||
|
||||
impl Default for TrafficMonitor {
|
||||
fn default() -> Self {
|
||||
TrafficMonitor {
|
||||
monitor: IpcStreamMonitor::new(
|
||||
"/traffic".to_string(),
|
||||
Duration::from_secs(10),
|
||||
Duration::from_secs(1),
|
||||
Duration::from_secs(5),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use simplified singleton_lazy_with_logging macro
|
||||
singleton_lazy_with_logging!(
|
||||
TrafficMonitor,
|
||||
INSTANCE,
|
||||
"TrafficMonitor",
|
||||
TrafficMonitor::default
|
||||
);
|
||||
|
||||
impl TrafficMonitor {
|
||||
pub async fn current(&self) -> CurrentTraffic {
|
||||
self.monitor.current().await.current
|
||||
}
|
||||
|
||||
pub async fn is_fresh(&self) -> bool {
|
||||
self.monitor.is_fresh().await
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_current_traffic() -> CurrentTraffic {
|
||||
TrafficMonitor::global().current().await
|
||||
}
|
||||
|
||||
pub async fn get_formatted_traffic() -> (String, String, String, String, bool) {
|
||||
let monitor = TrafficMonitor::global();
|
||||
let traffic = monitor.current().await;
|
||||
let is_fresh = monitor.is_fresh().await;
|
||||
|
||||
(
|
||||
fmt_bytes(traffic.up_rate),
|
||||
fmt_bytes(traffic.down_rate),
|
||||
fmt_bytes(traffic.total_up),
|
||||
fmt_bytes(traffic.total_down),
|
||||
is_fresh,
|
||||
)
|
||||
}
|
||||
@@ -1,34 +1,32 @@
|
||||
#![allow(non_snake_case)]
|
||||
#![recursion_limit = "512"]
|
||||
|
||||
mod cache;
|
||||
mod cmd;
|
||||
pub mod config;
|
||||
mod core;
|
||||
mod enhance;
|
||||
mod feat;
|
||||
mod ipc;
|
||||
mod module;
|
||||
mod process;
|
||||
mod utils;
|
||||
#[cfg(target_os = "macos")]
|
||||
use crate::utils::window_manager::WindowManager;
|
||||
use crate::{
|
||||
core::handle,
|
||||
core::hotkey,
|
||||
core::{handle, hotkey},
|
||||
process::AsyncHandler,
|
||||
utils::{resolve, server},
|
||||
};
|
||||
use config::Config;
|
||||
use tauri::AppHandle;
|
||||
#[cfg(target_os = "macos")]
|
||||
use tauri::Manager;
|
||||
use once_cell::sync::OnceCell;
|
||||
use tauri::{AppHandle, Manager};
|
||||
#[cfg(target_os = "macos")]
|
||||
use tauri_plugin_autostart::MacosLauncher;
|
||||
use tauri_plugin_deep_link::DeepLinkExt;
|
||||
use tokio::time::{Duration, timeout};
|
||||
use utils::logging::Type;
|
||||
|
||||
pub static APP_HANDLE: OnceCell<AppHandle> = OnceCell::new();
|
||||
|
||||
/// Application initialization helper functions
|
||||
mod app_init {
|
||||
use super::*;
|
||||
@@ -41,7 +39,7 @@ mod app_init {
|
||||
Ok(result) => {
|
||||
if result.is_err() {
|
||||
logging!(info, Type::Setup, true, "检测到已有应用实例运行");
|
||||
if let Some(app_handle) = handle::Handle::global().app_handle() {
|
||||
if let Some(app_handle) = APP_HANDLE.get() {
|
||||
app_handle.exit(0);
|
||||
} else {
|
||||
std::process::exit(0);
|
||||
@@ -75,7 +73,13 @@ mod app_init {
|
||||
.plugin(tauri_plugin_dialog::init())
|
||||
.plugin(tauri_plugin_shell::init())
|
||||
.plugin(tauri_plugin_deep_link::init())
|
||||
.plugin(tauri_plugin_http::init());
|
||||
.plugin(tauri_plugin_http::init())
|
||||
.plugin(
|
||||
tauri_plugin_mihomo::Builder::new()
|
||||
.protocol(tauri_plugin_mihomo::models::Protocol::LocalSocket)
|
||||
.socket_path(crate::config::IClashTemp::guard_external_controller_ipc())
|
||||
.build(),
|
||||
);
|
||||
|
||||
// Devtools plugin only in debug mode with feature tauri-dev
|
||||
// to avoid duplicated registering of logger since the devtools plugin also registers a logger
|
||||
@@ -184,46 +188,13 @@ mod app_init {
|
||||
cmd::update_proxy_chain_config_in_runtime,
|
||||
cmd::invoke_uwp_tool,
|
||||
cmd::copy_clash_env,
|
||||
cmd::get_proxies,
|
||||
cmd::force_refresh_proxies,
|
||||
cmd::get_providers_proxies,
|
||||
cmd::sync_tray_proxy_selection,
|
||||
cmd::update_proxy_and_sync,
|
||||
cmd::save_dns_config,
|
||||
cmd::apply_dns_config,
|
||||
cmd::check_dns_config_exists,
|
||||
cmd::get_dns_config_content,
|
||||
cmd::validate_dns_config,
|
||||
cmd::get_clash_version,
|
||||
cmd::get_clash_config,
|
||||
cmd::force_refresh_clash_config,
|
||||
cmd::update_geo_data,
|
||||
cmd::upgrade_clash_core,
|
||||
cmd::get_clash_rules,
|
||||
cmd::update_proxy_choice,
|
||||
cmd::get_proxy_providers,
|
||||
cmd::get_rule_providers,
|
||||
cmd::proxy_provider_health_check,
|
||||
cmd::update_proxy_provider,
|
||||
cmd::update_rule_provider,
|
||||
cmd::get_clash_connections,
|
||||
cmd::delete_clash_connection,
|
||||
cmd::close_all_clash_connections,
|
||||
cmd::get_group_proxy_delays,
|
||||
cmd::is_clash_debug_enabled,
|
||||
cmd::clash_gc,
|
||||
// Logging and monitoring
|
||||
cmd::get_clash_logs,
|
||||
cmd::start_logs_monitoring,
|
||||
cmd::stop_logs_monitoring,
|
||||
cmd::clear_logs,
|
||||
cmd::get_traffic_data,
|
||||
cmd::get_memory_data,
|
||||
cmd::get_formatted_traffic_data,
|
||||
cmd::get_formatted_memory_data,
|
||||
cmd::get_system_monitor_overview,
|
||||
cmd::start_traffic_service,
|
||||
cmd::stop_traffic_service,
|
||||
// Verge configuration
|
||||
cmd::get_verge_config,
|
||||
cmd::patch_verge_config,
|
||||
@@ -251,8 +222,6 @@ mod app_init {
|
||||
// Script validation
|
||||
cmd::script_validate_notice,
|
||||
cmd::validate_script_file,
|
||||
// Clash API
|
||||
cmd::clash_api_get_proxy_delay,
|
||||
// Backup and WebDAV
|
||||
cmd::create_webdav_backup,
|
||||
cmd::save_webdav_config,
|
||||
@@ -321,6 +290,11 @@ pub fn run() {
|
||||
.setup(|app| {
|
||||
logging!(info, Type::Setup, true, "开始应用初始化...");
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
APP_HANDLE
|
||||
.set(app.app_handle().clone())
|
||||
.expect("failed to set global app handle");
|
||||
|
||||
// Setup autostart plugin
|
||||
if let Err(e) = app_init::setup_autostart(app) {
|
||||
logging!(error, Type::Setup, true, "Failed to setup autostart: {}", e);
|
||||
@@ -348,11 +322,9 @@ pub fn run() {
|
||||
);
|
||||
}
|
||||
|
||||
let app_handle = app.handle().clone();
|
||||
|
||||
logging!(info, Type::Setup, true, "执行主要设置操作...");
|
||||
|
||||
resolve::resolve_setup_handle(app_handle);
|
||||
resolve::resolve_setup_handle();
|
||||
resolve::resolve_setup_async();
|
||||
resolve::resolve_setup_sync();
|
||||
|
||||
@@ -368,7 +340,7 @@ pub fn run() {
|
||||
use super::*;
|
||||
|
||||
/// Handle application ready/resumed events
|
||||
pub fn handle_ready_resumed(app_handle: &AppHandle) {
|
||||
pub fn handle_ready_resumed(_app_handle: &AppHandle) {
|
||||
// 双重检查:确保不在退出状态
|
||||
if handle::Handle::global().is_exiting() {
|
||||
logging!(
|
||||
@@ -381,11 +353,11 @@ pub fn run() {
|
||||
}
|
||||
|
||||
logging!(info, Type::System, true, "应用就绪或恢复");
|
||||
handle::Handle::global().init(app_handle.clone());
|
||||
handle::Handle::global().init();
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
if let Some(window) = app_handle.get_webview_window("main") {
|
||||
if let Some(window) = _app_handle.get_webview_window("main") {
|
||||
logging!(info, Type::Window, true, "设置macOS窗口标题");
|
||||
let _ = window.set_title("Clash Verge");
|
||||
}
|
||||
@@ -394,7 +366,7 @@ pub fn run() {
|
||||
|
||||
/// Handle application reopen events (macOS)
|
||||
#[cfg(target_os = "macos")]
|
||||
pub async fn handle_reopen(app_handle: &AppHandle, has_visible_windows: bool) {
|
||||
pub async fn handle_reopen(has_visible_windows: bool) {
|
||||
logging!(
|
||||
info,
|
||||
Type::System,
|
||||
@@ -403,7 +375,7 @@ pub fn run() {
|
||||
has_visible_windows
|
||||
);
|
||||
|
||||
handle::Handle::global().init(app_handle.clone());
|
||||
handle::Handle::global().init();
|
||||
|
||||
if !has_visible_windows {
|
||||
// 当没有可见窗口时,设置为 regular 模式并显示主窗口
|
||||
@@ -436,7 +408,7 @@ pub fn run() {
|
||||
log::info!(target: "app", "closing window...");
|
||||
if let tauri::WindowEvent::CloseRequested { api, .. } = api {
|
||||
api.prevent_close();
|
||||
if let Some(window) = core::handle::Handle::global().get_window() {
|
||||
if let Some(window) = core::handle::Handle::get_window() {
|
||||
let _ = window.hide();
|
||||
} else {
|
||||
logging!(warn, Type::Window, true, "尝试隐藏窗口但窗口不存在");
|
||||
@@ -583,12 +555,17 @@ pub fn run() {
|
||||
logging!(debug, Type::System, true, "忽略 Reopen 事件,应用正在退出");
|
||||
return;
|
||||
}
|
||||
let app_handle = app_handle.clone();
|
||||
AsyncHandler::spawn(move || async move {
|
||||
event_handlers::handle_reopen(&app_handle, has_visible_windows).await;
|
||||
event_handlers::handle_reopen(has_visible_windows).await;
|
||||
});
|
||||
}
|
||||
tauri::RunEvent::ExitRequested { api, code, .. } => {
|
||||
tauri::async_runtime::block_on(async {
|
||||
let _ = handle::Handle::mihomo()
|
||||
.await
|
||||
.clear_all_ws_connections()
|
||||
.await;
|
||||
});
|
||||
// 如果已经在退出流程中,不要阻止退出
|
||||
if core::handle::Handle::global().is_exiting() {
|
||||
logging!(
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use crate::{
|
||||
cache::CacheProxy,
|
||||
config::Config,
|
||||
core::{handle, timer::Timer, tray::Tray},
|
||||
log_err, logging,
|
||||
@@ -176,7 +175,6 @@ pub async fn entry_lightweight_mode() -> bool {
|
||||
// 回到 In
|
||||
set_state(LightweightState::In);
|
||||
|
||||
CacheProxy::global().clean_default_keys();
|
||||
true
|
||||
}
|
||||
|
||||
@@ -219,7 +217,7 @@ pub async fn add_light_weight_timer() {
|
||||
}
|
||||
|
||||
fn setup_window_close_listener() {
|
||||
if let Some(window) = handle::Handle::global().get_window() {
|
||||
if let Some(window) = handle::Handle::get_window() {
|
||||
let handler = window.listen("tauri://close-requested", move |_event| {
|
||||
std::mem::drop(AsyncHandler::spawn(|| async {
|
||||
if let Err(e) = setup_light_weight_timer().await {
|
||||
@@ -239,7 +237,7 @@ fn setup_window_close_listener() {
|
||||
}
|
||||
|
||||
fn cancel_window_close_listener() {
|
||||
if let Some(window) = handle::Handle::global().get_window() {
|
||||
if let Some(window) = handle::Handle::get_window() {
|
||||
let handler = WINDOW_CLOSE_HANDLER.swap(0, Ordering::AcqRel);
|
||||
if handler != 0 {
|
||||
window.unlisten(handler);
|
||||
@@ -249,7 +247,7 @@ fn cancel_window_close_listener() {
|
||||
}
|
||||
|
||||
fn setup_webview_focus_listener() {
|
||||
if let Some(window) = handle::Handle::global().get_window() {
|
||||
if let Some(window) = handle::Handle::get_window() {
|
||||
let handler = window.listen("tauri://focus", move |_event| {
|
||||
log_err!(cancel_light_weight_timer());
|
||||
logging!(
|
||||
@@ -264,7 +262,7 @@ fn setup_webview_focus_listener() {
|
||||
}
|
||||
|
||||
fn cancel_webview_focus_listener() {
|
||||
if let Some(window) = handle::Handle::global().get_window() {
|
||||
if let Some(window) = handle::Handle::get_window() {
|
||||
let handler = WEBVIEW_FOCUS_HANDLER.swap(0, Ordering::AcqRel);
|
||||
if handler != 0 {
|
||||
window.unlisten(handler);
|
||||
|
||||
@@ -38,17 +38,7 @@ impl PlatformSpecification {
|
||||
let system_kernel_version = System::kernel_version().unwrap_or("Null".into());
|
||||
let system_arch = System::cpu_arch();
|
||||
|
||||
let Some(handler) = handle::Handle::global().app_handle() else {
|
||||
return Self {
|
||||
system_name,
|
||||
system_version,
|
||||
system_kernel_version,
|
||||
system_arch,
|
||||
verge_version: "unknown".into(),
|
||||
running_mode: "NotRunning".to_string(),
|
||||
is_admin: false,
|
||||
};
|
||||
};
|
||||
let handler = handle::Handle::app_handle();
|
||||
let verge_version = handler.package_info().version.to_string();
|
||||
|
||||
// 使用默认值避免在同步上下文中执行异步操作
|
||||
|
||||
@@ -51,53 +51,7 @@ pub fn app_home_dir() -> Result<PathBuf> {
|
||||
}
|
||||
|
||||
// 避免在Handle未初始化时崩溃
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
log::warn!(target: "app", "app_handle not initialized, using default path");
|
||||
// 使用可执行文件目录作为备用
|
||||
let exe_path = tauri::utils::platform::current_exe()?;
|
||||
let exe_dir = exe_path
|
||||
.parent()
|
||||
.ok_or(anyhow::anyhow!("failed to get executable directory"))?;
|
||||
|
||||
// 使用系统临时目录 + 应用ID
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
if let Some(local_app_data) = std::env::var_os("LOCALAPPDATA") {
|
||||
let path = PathBuf::from(local_app_data).join(APP_ID);
|
||||
return Ok(path);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
if let Some(home) = std::env::var_os("HOME") {
|
||||
let path = PathBuf::from(home)
|
||||
.join("Library")
|
||||
.join("Application Support")
|
||||
.join(APP_ID);
|
||||
return Ok(path);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
if let Some(home) = std::env::var_os("HOME") {
|
||||
let path = PathBuf::from(home)
|
||||
.join(".local")
|
||||
.join("share")
|
||||
.join(APP_ID);
|
||||
return Ok(path);
|
||||
}
|
||||
}
|
||||
|
||||
// 如果无法获取系统目录,则回退到可执行文件目录
|
||||
let fallback_dir = PathBuf::from(exe_dir).join(".config").join(APP_ID);
|
||||
log::warn!(target: "app", "Using fallback data directory: {fallback_dir:?}");
|
||||
return Ok(fallback_dir);
|
||||
}
|
||||
};
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
|
||||
match app_handle.path().data_dir() {
|
||||
Ok(dir) => Ok(dir.join(APP_ID)),
|
||||
@@ -111,18 +65,7 @@ pub fn app_home_dir() -> Result<PathBuf> {
|
||||
/// get the resources dir
|
||||
pub fn app_resources_dir() -> Result<PathBuf> {
|
||||
// 避免在Handle未初始化时崩溃
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
log::warn!(target: "app", "app_handle not initialized in app_resources_dir, using fallback");
|
||||
// 使用可执行文件目录作为备用
|
||||
let exe_dir = tauri::utils::platform::current_exe()?
|
||||
.parent()
|
||||
.ok_or(anyhow::anyhow!("failed to get executable directory"))?
|
||||
.to_path_buf();
|
||||
return Ok(exe_dir.join("resources"));
|
||||
}
|
||||
};
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
|
||||
match app_handle.path().resource_dir() {
|
||||
Ok(dir) => Ok(dir.join("resources")),
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
/// Format bytes into human readable string (B, KB, MB, GB)
|
||||
#[allow(unused)]
|
||||
pub fn fmt_bytes(bytes: u64) -> String {
|
||||
const UNITS: &[&str] = &["B", "KB", "MB", "GB"];
|
||||
let (mut val, mut unit) = (bytes as f64, 0);
|
||||
|
||||
@@ -494,15 +494,7 @@ pub fn init_scheme() -> Result<()> {
|
||||
}
|
||||
|
||||
pub async fn startup_script() -> Result<()> {
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
return Err(anyhow::anyhow!(
|
||||
"app_handle not available for startup script execution"
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
let script_path = {
|
||||
let verge = Config::verge().await;
|
||||
let verge = verge.latest_ref();
|
||||
|
||||
@@ -28,7 +28,7 @@ pub enum Type {
|
||||
Lightweight,
|
||||
Network,
|
||||
ProxyMode,
|
||||
Ipc,
|
||||
// Ipc,
|
||||
// Cache,
|
||||
ClashVergeRev,
|
||||
}
|
||||
@@ -51,7 +51,7 @@ impl fmt::Display for Type {
|
||||
Type::Lightweight => write!(f, "[Lightweight]"),
|
||||
Type::Network => write!(f, "[Network]"),
|
||||
Type::ProxyMode => write!(f, "[ProxMode]"),
|
||||
Type::Ipc => write!(f, "[IPC]"),
|
||||
// Type::Ipc => write!(f, "[IPC]"),
|
||||
// Type::Cache => write!(f, "[Cache]"),
|
||||
Type::ClashVergeRev => write!(f, "[ClashVergeRev]"),
|
||||
}
|
||||
|
||||
@@ -2,13 +2,7 @@
|
||||
pub async fn set_public_dns(dns_server: String) {
|
||||
use crate::{core::handle, utils::dirs};
|
||||
use tauri_plugin_shell::ShellExt;
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
log::error!(target: "app", "app_handle not available for DNS configuration");
|
||||
return;
|
||||
}
|
||||
};
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
|
||||
log::info!(target: "app", "try to set system dns");
|
||||
let resource_dir = match dirs::app_resources_dir() {
|
||||
@@ -50,13 +44,7 @@ pub async fn set_public_dns(dns_server: String) {
|
||||
pub async fn restore_public_dns() {
|
||||
use crate::{core::handle, utils::dirs};
|
||||
use tauri_plugin_shell::ShellExt;
|
||||
let app_handle = match handle::Handle::global().app_handle() {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
log::error!(target: "app", "app_handle not available for DNS restoration");
|
||||
return;
|
||||
}
|
||||
};
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
log::info!(target: "app", "try to unset system dns");
|
||||
let resource_dir = match dirs::app_resources_dir() {
|
||||
Ok(dir) => dir,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::Result;
|
||||
use tauri::AppHandle;
|
||||
|
||||
use crate::{
|
||||
config::Config,
|
||||
@@ -18,8 +17,8 @@ pub mod ui;
|
||||
pub mod window;
|
||||
pub mod window_script;
|
||||
|
||||
pub fn resolve_setup_handle(app_handle: AppHandle) {
|
||||
init_handle(app_handle);
|
||||
pub fn resolve_setup_handle() {
|
||||
init_handle();
|
||||
}
|
||||
|
||||
pub fn resolve_setup_sync() {
|
||||
@@ -121,9 +120,9 @@ pub async fn resolve_reset_async() -> Result<(), anyhow::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn init_handle(app_handle: AppHandle) {
|
||||
pub fn init_handle() {
|
||||
logging!(info, Type::Setup, true, "Initializing app handle...");
|
||||
handle::Handle::global().init(app_handle);
|
||||
handle::Handle::global().init();
|
||||
}
|
||||
|
||||
pub(super) fn init_scheme() {
|
||||
|
||||
@@ -2,7 +2,7 @@ use tauri::WebviewWindow;
|
||||
|
||||
use crate::{
|
||||
core::handle,
|
||||
logging, logging_error,
|
||||
logging_error,
|
||||
utils::{
|
||||
logging::Type,
|
||||
resolve::window_script::{INITIAL_LOADING_OVERLAY, WINDOW_INITIAL_SCRIPT},
|
||||
@@ -18,18 +18,10 @@ const MINIMAL_HEIGHT: f64 = 520.0;
|
||||
|
||||
/// 构建新的 WebView 窗口
|
||||
pub fn build_new_window() -> Result<WebviewWindow, String> {
|
||||
let app_handle = handle::Handle::global().app_handle().ok_or_else(|| {
|
||||
logging!(
|
||||
error,
|
||||
Type::Window,
|
||||
true,
|
||||
"无法获取app_handle,窗口创建失败"
|
||||
);
|
||||
"无法获取app_handle".to_string()
|
||||
})?;
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
|
||||
match tauri::WebviewWindowBuilder::new(
|
||||
&app_handle,
|
||||
app_handle,
|
||||
"main", /* the unique window label */
|
||||
tauri::WebviewUrl::App("index.html".into()),
|
||||
)
|
||||
|
||||
@@ -67,7 +67,7 @@ fn should_handle_window_operation() -> bool {
|
||||
let now = Instant::now();
|
||||
let elapsed = now.duration_since(*last_operation);
|
||||
|
||||
log::debug!(target: "app", "[防抖] 检查窗口操作间隔: {}ms (需要>={}ms)",
|
||||
log::debug!(target: "app", "[防抖] 检查窗口操作间隔: {}ms (需要>={}ms)",
|
||||
elapsed.as_millis(), WINDOW_OPERATION_DEBOUNCE_MS);
|
||||
|
||||
if elapsed >= Duration::from_millis(WINDOW_OPERATION_DEBOUNCE_MS) {
|
||||
@@ -76,7 +76,7 @@ fn should_handle_window_operation() -> bool {
|
||||
log::info!(target: "app", "[防抖] 窗口操作被允许执行");
|
||||
true
|
||||
} else {
|
||||
log::warn!(target: "app", "[防抖] 窗口操作被防抖机制忽略,距离上次操作 {}ms < {}ms",
|
||||
log::warn!(target: "app", "[防抖] 窗口操作被防抖机制忽略,距离上次操作 {}ms < {}ms",
|
||||
elapsed.as_millis(), WINDOW_OPERATION_DEBOUNCE_MS);
|
||||
false
|
||||
}
|
||||
@@ -117,9 +117,8 @@ impl WindowManager {
|
||||
|
||||
/// 获取主窗口实例
|
||||
pub fn get_main_window() -> Option<WebviewWindow<Wry>> {
|
||||
handle::Handle::global()
|
||||
.app_handle()
|
||||
.and_then(|app| app.get_webview_window("main"))
|
||||
let app_handle = handle::Handle::app_handle();
|
||||
app_handle.get_webview_window("main")
|
||||
}
|
||||
|
||||
/// 智能显示主窗口
|
||||
|
||||
Reference in New Issue
Block a user