feat(auto-backup): implement centralized auto-backup manager and UI (#5374)

* feat(auto-backup): implement centralized auto-backup manager and UI

- Introduced AutoBackupManager to handle verge settings, run a background scheduler, debounce change-driven backups, and trim auto-labeled archives (keeps 20); wired into startup and config refresh hooks
  (src-tauri/src/module/auto_backup.rs:28-209, src-tauri/src/utils/resolve/mod.rs:64-136, src-tauri/src/feat/config.rs:102-238)

- Extended verge schema and backup helpers so scheduled/change-based settings persist, create_local_backup can rename archives, and profile/global-extend mutations now trigger backups
  (src-tauri/src/config/verge.rs:162-536, src/types/types.d.ts:857-859, src-tauri/src/feat/backup.rs:125-189, src-tauri/src/cmd/profile.rs:66-476, src-tauri/src/cmd/save_profile.rs:21-82)

- Added Auto Backup settings panel in backup dialog with dual toggles + interval selector; localized new strings across all locales
  (src/components/setting/mods/auto-backup-settings.tsx:1-138, src/components/setting/mods/backup-viewer.tsx:28-309, src/locales/en/settings.json:312-326 and mirrored entries)

- Regenerated typed i18n resources for strong typing in React
  (src/types/generated/i18n-keys.ts, src/types/generated/i18n-resources.ts)

* refactor(setting/backup): restructure backup dialog for consistent layout

* refactor(ui): unify settings dialog style

* fix(backup): only trigger auto-backup on valid saves & restore restarts app safely

* fix(backup): scrub console.log leak and rewire WebDAV dialog to actually probe server

* refactor: rename SubscriptionChange to ProfileChange

* chore: update i18n

* chore: WebDAV i18n improvements

* refactor(backup): error handling

* refactor(auto-backup): wrap scheduler startup with maybe_start_runner

* refactor: remove the redundant throw in handleExport

* feat(backup-history-viewer): improve WebDAV handling and UI fallback

* feat(auto-backup): trigger backups on all profile edits & improve interval input UX

* refactor: use InputAdornment

* docs: Changelog.md
This commit is contained in:
Sline
2025-11-10 13:49:14 +08:00
committed by GitHub
parent 78d5cb5eca
commit 838e401796
45 changed files with 1714 additions and 794 deletions

View File

@@ -11,6 +11,7 @@ use crate::{
},
core::{CoreManager, handle, timer::Timer, tray::Tray},
feat, logging,
module::auto_backup::{AutoBackupManager, AutoBackupTrigger},
process::AsyncHandler,
ret_err,
utils::{dirs, help, logging::Type},
@@ -90,6 +91,7 @@ pub async fn import_profile(url: std::string::String, option: Option<PrfOption>)
}
logging!(info, Type::Cmd, "[导入订阅] 导入完成: {}", url);
AutoBackupManager::trigger_backup(AutoBackupTrigger::ProfileChange);
Ok(())
}
@@ -122,6 +124,7 @@ pub async fn create_profile(item: PrfItem, file_data: Option<String>) -> CmdResu
handle::Handle::notify_profile_changed(uid.clone());
}
Config::profiles().await.apply();
AutoBackupManager::trigger_backup(AutoBackupTrigger::ProfileChange);
Ok(())
}
Err(err) => {
@@ -164,6 +167,7 @@ pub async fn delete_profile(index: String) -> CmdResult {
// 发送配置变更通知
logging!(info, Type::Cmd, "[删除订阅] 发送配置变更通知: {}", index);
handle::Handle::notify_profile_changed(index);
AutoBackupManager::trigger_backup(AutoBackupTrigger::ProfileChange);
}
Err(e) => {
logging!(error, Type::Cmd, "{}", e);
@@ -460,6 +464,7 @@ pub async fn patch_profile(index: String, profile: PrfItem) -> CmdResult {
});
}
AutoBackupManager::trigger_backup(AutoBackupTrigger::ProfileChange);
Ok(())
}

View File

@@ -4,6 +4,7 @@ use crate::{
config::{Config, PrfItem},
core::{CoreManager, handle, validate::CoreConfigValidator},
logging,
module::auto_backup::{AutoBackupManager, AutoBackupTrigger},
utils::{dirs, logging::Type},
};
use smartstring::alias::String;
@@ -17,6 +18,12 @@ pub async fn save_profile_file(index: String, file_data: Option<String>) -> CmdR
None => return Ok(()),
};
let backup_trigger = match index.as_str() {
"Merge" => Some(AutoBackupTrigger::GlobalMerge),
"Script" => Some(AutoBackupTrigger::GlobalScript),
_ => Some(AutoBackupTrigger::ProfileChange),
};
// 在异步操作前获取必要元数据并释放锁
let (rel_path, is_merge_file) = {
let profiles = Config::profiles().await;
@@ -51,11 +58,17 @@ pub async fn save_profile_file(index: String, file_data: Option<String>) -> CmdR
is_merge_file
);
if is_merge_file {
return handle_merge_file(&file_path_str, &file_path, &original_content).await;
let changes_applied = if is_merge_file {
handle_merge_file(&file_path_str, &file_path, &original_content).await?
} else {
handle_full_validation(&file_path_str, &file_path, &original_content).await?
};
if changes_applied && let Some(trigger) = backup_trigger {
AutoBackupManager::trigger_backup(trigger);
}
handle_full_validation(&file_path_str, &file_path, &original_content).await
Ok(())
}
async fn restore_original(
@@ -76,7 +89,7 @@ async fn handle_merge_file(
file_path_str: &str,
file_path: &std::path::Path,
original_content: &str,
) -> CmdResult {
) -> CmdResult<bool> {
logging!(
info,
Type::Config,
@@ -96,7 +109,7 @@ async fn handle_merge_file(
} else {
handle::Handle::refresh_clash();
}
Ok(())
Ok(true)
}
Ok((false, error_msg)) => {
logging!(
@@ -108,7 +121,7 @@ async fn handle_merge_file(
restore_original(file_path, original_content).await?;
let result = (false, error_msg.clone());
crate::cmd::validate::handle_yaml_validation_notice(&result, "合并配置文件");
Ok(())
Ok(false)
}
Err(e) => {
logging!(error, Type::Config, "[cmd配置save] 验证过程发生错误: {}", e);
@@ -122,11 +135,11 @@ async fn handle_full_validation(
file_path_str: &str,
file_path: &std::path::Path,
original_content: &str,
) -> CmdResult {
) -> CmdResult<bool> {
match CoreConfigValidator::validate_config_file(file_path_str, None).await {
Ok((true, _)) => {
logging!(info, Type::Config, "[cmd配置save] 验证成功");
Ok(())
Ok(true)
}
Ok((false, error_msg)) => {
logging!(warn, Type::Config, "[cmd配置save] 验证失败: {}", error_msg);
@@ -160,7 +173,7 @@ async fn handle_full_validation(
handle::Handle::notice_message("config_validate::error", error_msg.to_owned());
}
Ok(())
Ok(false)
}
Err(e) => {
logging!(error, Type::Config, "[cmd配置save] 验证过程发生错误: {}", e);

View File

@@ -158,6 +158,15 @@ pub struct IVerge {
/// 0: 不清理; 1: 1天2: 7天; 3: 30天; 4: 90天
pub auto_log_clean: Option<i32>,
/// Enable scheduled automatic backups
pub enable_auto_backup_schedule: Option<bool>,
/// Automatic backup interval in hours
pub auto_backup_interval_hours: Option<u64>,
/// Create backups automatically when critical configs change
pub auto_backup_on_change: Option<bool>,
/// verge 的各种 port 用于覆盖 clash 的各种 port
#[cfg(not(target_os = "windows"))]
pub verge_redir_port: Option<u16>,
@@ -422,6 +431,9 @@ impl IVerge {
auto_check_update: Some(true),
enable_builtin_enhanced: Some(true),
auto_log_clean: Some(2), // 1: 1天, 2: 7天, 3: 30天, 4: 90天
enable_auto_backup_schedule: Some(false),
auto_backup_interval_hours: Some(24),
auto_backup_on_change: Some(true),
webdav_url: None,
webdav_username: None,
webdav_password: None,
@@ -517,6 +529,9 @@ impl IVerge {
patch!(proxy_layout_column);
patch!(test_list);
patch!(auto_log_clean);
patch!(enable_auto_backup_schedule);
patch!(auto_backup_interval_hours);
patch!(auto_backup_on_change);
patch!(webdav_url);
patch!(webdav_username);

View File

@@ -123,6 +123,15 @@ pub async fn restore_webdav_backup(filename: String) -> Result<()> {
/// Create a backup and save to local storage
pub async fn create_local_backup() -> Result<()> {
create_local_backup_with_namer(|name| name.to_string().into())
.await
.map(|_| ())
}
pub async fn create_local_backup_with_namer<F>(namer: F) -> Result<String>
where
F: FnOnce(&str) -> String,
{
let (file_name, temp_file_path) = backup::create_backup().await.map_err(|err| {
logging!(
error,
@@ -133,7 +142,8 @@ pub async fn create_local_backup() -> Result<()> {
})?;
let backup_dir = local_backup_dir()?;
let target_path = backup_dir.join(file_name.as_str());
let final_name = namer(file_name.as_str());
let target_path = backup_dir.join(final_name.as_str());
if let Err(err) = move_file(temp_file_path.clone(), target_path.clone()).await {
logging!(
@@ -152,7 +162,7 @@ pub async fn create_local_backup() -> Result<()> {
return Err(err);
}
Ok(())
Ok(final_name)
}
async fn move_file(from: PathBuf, to: PathBuf) -> Result<()> {

View File

@@ -2,7 +2,7 @@ use crate::{
config::{Config, IVerge},
core::{CoreManager, handle, hotkey, sysopt, tray},
logging_error,
module::lightweight,
module::{auto_backup::AutoBackupManager, lightweight},
utils::{draft::SharedBox, logging::Type},
};
use anyhow::Result;
@@ -243,6 +243,10 @@ pub async fn patch_verge(patch: &IVerge, not_save_file: bool) -> Result<()> {
return Err(err);
}
Config::verge().await.apply();
logging_error!(
Type::Backup,
AutoBackupManager::global().refresh_settings().await
);
if !not_save_file {
// 分离数据获取和异步调用
let verge_data = Config::verge().await.data_arc();

View File

@@ -0,0 +1,332 @@
use crate::{
config::{Config, IVerge},
feat::create_local_backup_with_namer,
logging,
process::AsyncHandler,
utils::{dirs::local_backup_dir, logging::Type},
};
use anyhow::Result;
use chrono::Local;
use once_cell::sync::OnceCell;
use parking_lot::RwLock;
use std::{
path::PathBuf,
sync::{
Arc,
atomic::{AtomicBool, AtomicI64, Ordering},
},
time::{Duration, UNIX_EPOCH},
};
use tokio::{
fs,
sync::{Mutex, watch},
};
const DEFAULT_INTERVAL_HOURS: u64 = 24;
const MIN_INTERVAL_HOURS: u64 = 1;
const MAX_INTERVAL_HOURS: u64 = 168;
const MIN_BACKUP_INTERVAL_SECS: i64 = 60;
const AUTO_BACKUP_KEEP: usize = 20;
const AUTO_MARKER: &str = "-auto-";
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AutoBackupTrigger {
Scheduled,
GlobalMerge,
GlobalScript,
ProfileChange,
}
impl AutoBackupTrigger {
const fn slug(self) -> &'static str {
match self {
Self::Scheduled => "scheduled",
Self::GlobalMerge => "merge",
Self::GlobalScript => "script",
Self::ProfileChange => "profile",
}
}
const fn is_schedule(self) -> bool {
matches!(self, Self::Scheduled)
}
}
#[derive(Clone, Copy, Debug)]
struct AutoBackupSettings {
schedule_enabled: bool,
interval_hours: u64,
change_enabled: bool,
}
impl AutoBackupSettings {
fn from_verge(verge: &IVerge) -> Self {
let interval = verge
.auto_backup_interval_hours
.unwrap_or(DEFAULT_INTERVAL_HOURS)
.clamp(MIN_INTERVAL_HOURS, MAX_INTERVAL_HOURS);
Self {
schedule_enabled: verge.enable_auto_backup_schedule.unwrap_or(false),
interval_hours: interval,
change_enabled: verge.auto_backup_on_change.unwrap_or(true),
}
}
}
impl Default for AutoBackupSettings {
fn default() -> Self {
Self {
schedule_enabled: false,
interval_hours: DEFAULT_INTERVAL_HOURS,
change_enabled: true,
}
}
}
pub struct AutoBackupManager {
settings: Arc<RwLock<AutoBackupSettings>>,
settings_tx: watch::Sender<AutoBackupSettings>,
runner_started: AtomicBool,
exec_lock: Mutex<()>,
last_backup: AtomicI64,
}
impl AutoBackupManager {
pub fn global() -> &'static Self {
static INSTANCE: OnceCell<AutoBackupManager> = OnceCell::new();
INSTANCE.get_or_init(|| {
let (tx, _rx) = watch::channel(AutoBackupSettings::default());
Self {
settings: Arc::new(RwLock::new(AutoBackupSettings::default())),
settings_tx: tx,
runner_started: AtomicBool::new(false),
exec_lock: Mutex::new(()),
last_backup: AtomicI64::new(0),
}
})
}
pub async fn init(&self) -> Result<()> {
let settings = Self::load_settings().await;
{
*self.settings.write() = settings;
}
let _ = self.settings_tx.send(settings);
self.maybe_start_runner(settings);
Ok(())
}
pub async fn refresh_settings(&self) -> Result<()> {
let settings = Self::load_settings().await;
{
*self.settings.write() = settings;
}
let _ = self.settings_tx.send(settings);
self.maybe_start_runner(settings);
Ok(())
}
pub fn trigger_backup(trigger: AutoBackupTrigger) {
AsyncHandler::spawn(move || async move {
if let Err(err) = Self::global().execute_trigger(trigger).await {
logging!(
warn,
Type::Backup,
"Auto backup execution failed ({:?}): {err:#?}",
trigger
);
}
});
}
fn maybe_start_runner(&self, settings: AutoBackupSettings) {
if settings.schedule_enabled {
self.ensure_runner();
}
}
fn ensure_runner(&self) {
if self.runner_started.swap(true, Ordering::SeqCst) {
return;
}
let mut rx = self.settings_tx.subscribe();
AsyncHandler::spawn(move || async move {
Self::run_scheduler(&mut rx).await;
});
}
async fn run_scheduler(rx: &mut watch::Receiver<AutoBackupSettings>) {
let mut current = *rx.borrow();
loop {
if !current.schedule_enabled {
if rx.changed().await.is_err() {
break;
}
current = *rx.borrow();
continue;
}
let duration = Duration::from_secs(current.interval_hours.saturating_mul(3600));
let sleeper = tokio::time::sleep(duration);
tokio::pin!(sleeper);
tokio::select! {
_ = &mut sleeper => {
if let Err(err) = Self::global()
.execute_trigger(AutoBackupTrigger::Scheduled)
.await
{
logging!(
warn,
Type::Backup,
"Scheduled auto backup failed: {err:#?}"
);
}
}
changed = rx.changed() => {
if changed.is_err() {
break;
}
current = *rx.borrow();
}
}
}
}
async fn execute_trigger(&self, trigger: AutoBackupTrigger) -> Result<()> {
let snapshot = *self.settings.read();
if trigger.is_schedule() && !snapshot.schedule_enabled {
return Ok(());
}
if !trigger.is_schedule() && !snapshot.change_enabled {
return Ok(());
}
if !self.should_run_now() {
return Ok(());
}
let _guard = self.exec_lock.lock().await;
if !self.should_run_now() {
return Ok(());
}
let file_name =
create_local_backup_with_namer(|name| append_auto_suffix(name, trigger.slug()).into())
.await?;
self.last_backup
.store(Local::now().timestamp(), Ordering::Release);
if let Err(err) = cleanup_auto_backups().await {
logging!(
warn,
Type::Backup,
"Failed to cleanup old auto backups: {err:#?}"
);
}
logging!(
info,
Type::Backup,
"Auto backup created ({:?}): {}",
trigger,
file_name
);
Ok(())
}
fn should_run_now(&self) -> bool {
let last = self.last_backup.load(Ordering::Acquire);
if last == 0 {
return true;
}
let now = Local::now().timestamp();
now.saturating_sub(last) >= MIN_BACKUP_INTERVAL_SECS
}
async fn load_settings() -> AutoBackupSettings {
let verge = Config::verge().await;
AutoBackupSettings::from_verge(&verge.latest_arc())
}
}
fn append_auto_suffix(file_name: &str, slug: &str) -> String {
match file_name.rsplit_once('.') {
Some((stem, ext)) => format!("{stem}{AUTO_MARKER}{slug}.{ext}"),
None => format!("{file_name}{AUTO_MARKER}{slug}"),
}
}
async fn cleanup_auto_backups() -> Result<()> {
if AUTO_BACKUP_KEEP == 0 {
return Ok(());
}
let backup_dir = local_backup_dir()?;
if !backup_dir.exists() {
return Ok(());
}
let mut entries = match fs::read_dir(&backup_dir).await {
Ok(dir) => dir,
Err(err) => {
logging!(
warn,
Type::Backup,
"Failed to read backup directory: {err:#?}"
);
return Ok(());
}
};
let mut files: Vec<(PathBuf, u64)> = Vec::new();
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if !path.is_file() {
continue;
}
let file_name = match entry.file_name().into_string() {
Ok(name) => name,
Err(_) => continue,
};
if !file_name.contains(AUTO_MARKER) {
continue;
}
let modified = entry
.metadata()
.await
.and_then(|meta| meta.modified())
.ok()
.and_then(|time| time.duration_since(UNIX_EPOCH).ok())
.map(|dur| dur.as_secs())
.unwrap_or(0);
files.push((path, modified));
}
if files.len() <= AUTO_BACKUP_KEEP {
return Ok(());
}
files.sort_by_key(|(_, ts)| *ts);
let remove_count = files.len() - AUTO_BACKUP_KEEP;
for (path, _) in files.into_iter().take(remove_count) {
if let Err(err) = fs::remove_file(&path).await {
logging!(
warn,
Type::Backup,
"Failed to remove auto backup {}: {err:#?}",
path.display()
);
}
}
Ok(())
}

View File

@@ -1,3 +1,4 @@
pub mod auto_backup;
pub mod lightweight;
pub mod signal;
pub mod sysinfo;

View File

@@ -10,7 +10,7 @@ use crate::{
tray::Tray,
},
logging, logging_error,
module::{lightweight::auto_lightweight_boot, signal},
module::{auto_backup::AutoBackupManager, lightweight::auto_lightweight_boot, signal},
process::AsyncHandler,
utils::{init, logging::Type, server, window_manager::WindowManager},
};
@@ -68,6 +68,7 @@ pub fn resolve_setup_async() {
init_timer(),
init_hotkey(),
init_auto_lightweight_boot(),
init_auto_backup(),
);
});
}
@@ -127,6 +128,10 @@ pub(super) async fn init_auto_lightweight_boot() {
logging_error!(Type::Setup, auto_lightweight_boot().await);
}
pub(super) async fn init_auto_backup() {
logging_error!(Type::Setup, AutoBackupManager::global().init().await);
}
pub(super) fn init_signal() {
logging!(info, Type::Setup, "Initializing signal handlers...");
signal::register();