feat: migrate logs API from REST to IPC streaming (#4277)

* feat: migrate logs API from REST to IPC streaming

- Replace REST API `/logs` calls with IPC streaming implementation
- Add new `src-tauri/src/ipc/logs.rs` with `LogsMonitor` for real-time log streaming
- Implement duplicate stream prevention with level tracking
- Add frontend-backend communication via Tauri commands for log management
- Remove WebSocket compatibility, maintain IPC-only mode
- Fix duplicate monitoring task startup when toggling log service
- Add proper task lifecycle management with JoinHandle cleanup

* refactor: remove dead code from logs.rs to fix clippy warnings

- Remove unused `timestamp` field from LogItem struct
- Remove unused `client` field from LogsMonitor struct
- Remove unused methods: `is_fresh`, `get_current_monitoring_level`, `get_current_logs`
- Simplify LogsMonitor initialization by removing client dependency
- All clippy warnings with -D warnings now resolved

* refactor: extract duplicate fmt_bytes function to utils module

- Create new utils/format.rs module with fmt_bytes function
- Remove duplicate fmt_bytes implementations from traffic.rs and memory.rs
- Update imports to use shared utils::format::fmt_bytes
- Add comprehensive unit tests for fmt_bytes function
- Ensure DRY principle compliance and code maintainability
This commit is contained in:
Tunglies
2025-07-30 23:11:21 +08:00
committed by GitHub
parent e2a548f6a5
commit 1f78d576a3
16 changed files with 526 additions and 161 deletions

View File

@@ -44,6 +44,7 @@
- 修复 `Windows` 安装器参数使用错误问题
- 修复 `IPC` 迁移后节点测速功能异常
- 修复 `IPC` 迁移后连接上下行速率计算功能异常
- 修复 `IPC` 迁移后内核日志功能异常
- 修复 `External-Controller-Cors` 无法保存所需前置条件
- 修复首页端口不一致问题

View File

@@ -1,7 +1,12 @@
use super::CmdResult;
use crate::{
config::*, core::*, feat, ipc::IpcManager, process::AsyncHandler,
state::proxy::ProxyRequestCache, wrap_err,
config::*,
core::*,
feat,
ipc::{self, IpcManager},
process::AsyncHandler,
state::proxy::ProxyRequestCache,
wrap_err,
};
use serde_yaml::Mapping;
use std::time::Duration;
@@ -572,3 +577,23 @@ pub async fn is_clash_debug_enabled() -> CmdResult<bool> {
pub async fn clash_gc() -> CmdResult {
wrap_err!(IpcManager::global().gc().await)
}
/// 获取日志 (使用新的流式实现)
#[tauri::command]
pub async fn get_clash_logs(level: Option<String>) -> CmdResult<serde_json::Value> {
Ok(ipc::get_logs_json(level).await)
}
/// 启动日志监控
#[tauri::command]
pub async fn start_logs_monitoring(level: Option<String>) -> CmdResult {
ipc::start_logs_monitoring(level).await;
Ok(())
}
/// 清除日志
#[tauri::command]
pub async fn clear_logs() -> CmdResult {
ipc::clear_logs().await;
Ok(())
}

View File

@@ -382,29 +382,5 @@ impl IpcManager {
}
}
// 流量数据相关
#[allow(dead_code)]
pub async fn get_traffic(&self) -> AnyResult<serde_json::Value> {
let url = "/traffic";
logging!(info, Type::Ipc, true, "IPC: 发送 GET 请求到 {}", url);
let result = self.send_request("GET", url, None).await;
logging!(
info,
Type::Ipc,
true,
"IPC: /traffic 请求结果: {:?}",
result
);
result
}
// 内存相关
#[allow(dead_code)]
pub async fn get_memory(&self) -> AnyResult<serde_json::Value> {
let url = "/memory";
logging!(info, Type::Ipc, true, "IPC: 发送 GET 请求到 {}", url);
let result = self.send_request("GET", url, None).await;
logging!(info, Type::Ipc, true, "IPC: /memory 请求结果: {:?}", result);
result
}
// 日志相关功能已迁移到 logs.rs 模块,使用流式处理
}

295
src-tauri/src/ipc/logs.rs Normal file
View File

@@ -0,0 +1,295 @@
use kode_bridge::IpcStreamClient;
use serde::{Deserialize, Serialize};
use std::{
collections::VecDeque,
sync::{Arc, OnceLock},
time::Instant,
};
use tokio::{sync::RwLock, task::JoinHandle, time::Duration};
use crate::{
logging,
utils::{dirs::ipc_path, logging::Type},
};
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct LogData {
#[serde(rename = "type")]
pub log_type: String,
pub payload: String,
}
#[derive(Debug, Clone)]
pub struct LogItem {
pub log_type: String,
pub payload: String,
pub time: String,
}
impl LogItem {
fn new(log_type: String, payload: String) -> Self {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
// Simple time formatting (HH:MM:SS)
let hours = (now / 3600) % 24;
let minutes = (now / 60) % 60;
let seconds = now % 60;
let time_str = format!("{hours:02}:{minutes:02}:{seconds:02}");
Self {
log_type,
payload,
time: time_str,
}
}
}
#[derive(Debug, Clone)]
pub struct CurrentLogs {
pub logs: VecDeque<LogItem>,
pub level: String,
pub last_updated: Instant,
}
impl Default for CurrentLogs {
fn default() -> Self {
Self {
logs: VecDeque::with_capacity(1000),
level: "info".to_string(),
last_updated: Instant::now(),
}
}
}
// Logs monitor with streaming support
pub struct LogsMonitor {
current: Arc<RwLock<CurrentLogs>>,
task_handle: Arc<RwLock<Option<JoinHandle<()>>>>,
current_monitoring_level: Arc<RwLock<Option<String>>>,
}
static INSTANCE: OnceLock<LogsMonitor> = OnceLock::new();
impl LogsMonitor {
pub fn global() -> &'static LogsMonitor {
INSTANCE.get_or_init(|| {
let instance = LogsMonitor::new();
logging!(info, Type::Ipc, true, "LogsMonitor initialized");
instance
})
}
fn new() -> Self {
let current = Arc::new(RwLock::new(CurrentLogs::default()));
Self {
current,
task_handle: Arc::new(RwLock::new(None)),
current_monitoring_level: Arc::new(RwLock::new(None)),
}
}
pub async fn start_monitoring(&self, level: Option<String>) {
let filter_level = level.clone().unwrap_or_else(|| "info".to_string());
// Check if we're already monitoring the same level
{
let current_level = self.current_monitoring_level.read().await;
if let Some(existing_level) = current_level.as_ref() {
if existing_level == &filter_level {
logging!(
info,
Type::Ipc,
true,
"LogsMonitor: Already monitoring level '{}', skipping duplicate request",
filter_level
);
return;
}
}
}
// Stop existing monitoring task if level changed or first time
{
let mut handle = self.task_handle.write().await;
if let Some(task) = handle.take() {
task.abort();
logging!(
info,
Type::Ipc,
true,
"LogsMonitor: Stopped previous monitoring task (level changed)"
);
}
}
// Update current monitoring level
{
let mut current_level = self.current_monitoring_level.write().await;
*current_level = Some(filter_level.clone());
}
let monitor_current = self.current.clone();
let ipc_path_buf = ipc_path().unwrap();
let ipc_path = ipc_path_buf.to_str().unwrap_or_default();
let client = IpcStreamClient::new(ipc_path).unwrap();
// Update current level in data structure
{
let mut current = monitor_current.write().await;
current.level = filter_level.clone();
}
let task = tokio::spawn(async move {
loop {
let url = if filter_level == "info" {
"/logs".to_string()
} else {
let level_param = if filter_level == "all" {
"debug"
} else {
&filter_level
};
format!("/logs?level={level_param}")
};
logging!(
info,
Type::Ipc,
true,
"LogsMonitor: Starting stream for {}",
url
);
let _ = client
.get(&url)
.timeout(Duration::from_secs(30))
.process_lines(|line| {
if let Ok(log_data) = serde_json::from_str::<LogData>(line.trim()) {
// Filter logs based on level if needed
let should_include = match filter_level.as_str() {
"all" => true,
level => log_data.log_type.to_lowercase() == level.to_lowercase(),
};
if should_include {
let log_item = LogItem::new(log_data.log_type, log_data.payload);
tokio::spawn({
let current = monitor_current.clone();
async move {
let mut logs = current.write().await;
// Add new log
logs.logs.push_back(log_item);
// Keep only the last 1000 logs
if logs.logs.len() > 1000 {
logs.logs.pop_front();
}
logs.last_updated = Instant::now();
}
});
}
}
Ok(())
})
.await;
// Wait before retrying
tokio::time::sleep(Duration::from_secs(2)).await;
}
});
// Store the task handle
{
let mut handle = self.task_handle.write().await;
*handle = Some(task);
}
logging!(
info,
Type::Ipc,
true,
"LogsMonitor: Started new monitoring task for level: {:?}",
level
);
}
pub async fn current(&self) -> CurrentLogs {
self.current.read().await.clone()
}
pub async fn clear_logs(&self) {
let mut current = self.current.write().await;
current.logs.clear();
current.last_updated = Instant::now();
// Also reset monitoring level when clearing logs
{
let mut monitoring_level = self.current_monitoring_level.write().await;
*monitoring_level = None;
}
// Abort current monitoring task
{
let mut handle = self.task_handle.write().await;
if let Some(task) = handle.take() {
task.abort();
logging!(
info,
Type::Ipc,
true,
"LogsMonitor: Stopped monitoring task due to clear_logs"
);
}
}
}
pub async fn get_logs_as_json(&self, level: Option<String>) -> serde_json::Value {
let current = self.current().await;
let filtered_logs: Vec<serde_json::Value> = current
.logs
.iter()
.filter(|log| {
if let Some(ref filter_level) = level {
if filter_level == "all" {
true
} else {
log.log_type.to_lowercase() == filter_level.to_lowercase()
}
} else {
true
}
})
.map(|log| {
serde_json::json!({
"type": log.log_type,
"payload": log.payload,
"time": log.time
})
})
.collect();
serde_json::Value::Array(filtered_logs)
}
}
pub async fn start_logs_monitoring(level: Option<String>) {
LogsMonitor::global().start_monitoring(level).await;
}
pub async fn clear_logs() {
LogsMonitor::global().clear_logs().await;
}
pub async fn get_logs_json(level: Option<String>) -> serde_json::Value {
LogsMonitor::global().get_logs_as_json(level).await
}

View File

@@ -8,7 +8,7 @@ use tokio::{sync::RwLock, time::Duration};
use crate::{
logging,
utils::{dirs::ipc_path, logging::Type},
utils::{dirs::ipc_path, format::fmt_bytes, logging::Type},
};
#[derive(Debug, Clone, Deserialize, Serialize)]
@@ -101,16 +101,6 @@ impl MemoryMonitor {
}
}
fn fmt_bytes(bytes: u64) -> String {
const UNITS: &[&str] = &["B", "KB", "MB", "GB"];
let (mut val, mut unit) = (bytes as f64, 0);
while val >= 1024.0 && unit < 3 {
val /= 1024.0;
unit += 1;
}
format!("{:.1}{}", val, UNITS[unit])
}
pub async fn get_current_memory() -> CurrentMemory {
MemoryMonitor::global().current().await
}

View File

@@ -1,8 +1,10 @@
pub mod general;
pub mod logs;
pub mod memory;
pub mod traffic;
pub use general::IpcManager;
pub use logs::{clear_logs, get_logs_json, start_logs_monitoring};
pub use memory::{get_current_memory, get_formatted_memory};
pub use traffic::{get_current_traffic, get_formatted_traffic};

View File

@@ -8,7 +8,7 @@ use tokio::{sync::RwLock, time::Duration};
use crate::{
logging,
utils::{dirs::ipc_path, logging::Type},
utils::{dirs::ipc_path, format::fmt_bytes, logging::Type},
};
#[derive(Debug, Clone, Deserialize, Serialize)]
@@ -119,16 +119,6 @@ impl TrafficMonitor {
}
}
fn fmt_bytes(bytes: u64) -> String {
const UNITS: &[&str] = &["B", "KB", "MB", "GB"];
let (mut val, mut unit) = (bytes as f64, 0);
while val >= 1024.0 && unit < 3 {
val /= 1024.0;
unit += 1;
}
format!("{:.1}{}", val, UNITS[unit])
}
pub async fn get_current_traffic() -> CurrentTraffic {
TrafficMonitor::global().current().await
}

View File

@@ -289,6 +289,9 @@ pub fn run() {
cmd::get_group_proxy_delays,
cmd::is_clash_debug_enabled,
cmd::clash_gc,
cmd::get_clash_logs,
cmd::start_logs_monitoring,
cmd::clear_logs,
cmd::get_traffic_data,
cmd::get_memory_data,
cmd::get_formatted_traffic_data,

View File

@@ -0,0 +1,25 @@
/// Format bytes into human readable string (B, KB, MB, GB)
pub fn fmt_bytes(bytes: u64) -> String {
const UNITS: &[&str] = &["B", "KB", "MB", "GB"];
let (mut val, mut unit) = (bytes as f64, 0);
while val >= 1024.0 && unit < 3 {
val /= 1024.0;
unit += 1;
}
format!("{:.1}{}", val, UNITS[unit])
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fmt_bytes() {
assert_eq!(fmt_bytes(0), "0.0B");
assert_eq!(fmt_bytes(512), "512.0B");
assert_eq!(fmt_bytes(1024), "1.0KB");
assert_eq!(fmt_bytes(1536), "1.5KB");
assert_eq!(fmt_bytes(1024 * 1024), "1.0MB");
assert_eq!(fmt_bytes(1024 * 1024 * 1024), "1.0GB");
}
}

View File

@@ -1,5 +1,6 @@
pub mod autostart;
pub mod dirs;
pub mod format;
pub mod help;
pub mod i18n;
pub mod init;

View File

@@ -12,18 +12,6 @@ export type { ILogItem };
const MAX_LOG_NUM = 1000;
const buildWSUrl = (server: string, logLevel: LogLevel) => {
let baseUrl = `${server}/logs`;
// 只处理日志级别参数
if (logLevel && logLevel !== "info") {
const level = logLevel === "all" ? "debug" : logLevel;
baseUrl += `?level=${level}`;
}
return baseUrl;
};
interface LogStore {
logs: ILogItem[];
clearLogs: () => void;

View File

@@ -184,7 +184,7 @@ const Layout = () => {
useEffect(() => {
if (clashInfo) {
const { server = "", secret = "" } = clashInfo;
initGlobalLogService(server, secret, enableLog, "info");
initGlobalLogService(enableLog, "info");
}
}, [clashInfo, enableLog]);

View File

@@ -71,18 +71,12 @@ const LogPage = () => {
const handleLogLevelChange = (newLevel: LogLevel) => {
setLogLevel(newLevel);
if (clashInfo) {
const { server = "", secret = "" } = clashInfo;
changeLogLevel(newLevel, server, secret);
}
changeLogLevel(newLevel);
};
const handleToggleLog = () => {
if (clashInfo) {
const { server = "", secret = "" } = clashInfo;
toggleLogEnabled(server, secret);
setEnableLog(!enableLog);
}
toggleLogEnabled();
setEnableLog(!enableLog);
};
return (

View File

@@ -412,6 +412,18 @@ export async function gc() {
return invoke<void>("clash_gc");
}
export async function getClashLogs(level?: string) {
return invoke<any>("get_clash_logs", { level });
}
export async function startLogsMonitoring(level?: string) {
return invoke<void>("start_logs_monitoring", { level });
}
export async function clearLogs() {
return invoke<void>("clear_logs");
}
export async function getVergeConfig() {
return invoke<IVergeConfig>("get_verge_config");
}

View File

@@ -1,6 +1,10 @@
// 全局日志服务,使应用在任何页面都能收集日志
import { create } from "zustand";
import { createAuthSockette } from "@/utils/websocket";
import {
fetchLogsViaIPC,
startLogsStreaming,
clearLogs as clearLogsIPC,
} from "@/services/ipc-log-service";
import dayjs from "dayjs";
// 最大日志数量
@@ -24,6 +28,7 @@ interface GlobalLogStore {
setCurrentLevel: (level: LogLevel) => void;
clearLogs: () => void;
appendLog: (log: ILogItem) => void;
setLogs: (logs: ILogItem[]) => void;
}
// 创建全局状态存储
@@ -43,124 +48,117 @@ export const useGlobalLogStore = create<GlobalLogStore>((set) => ({
: [...state.logs, log];
return { logs: newLogs };
}),
setLogs: (logs: ILogItem[]) => set({ logs }),
}));
// 构建WebSocket URL
const buildWSUrl = (server: string, logLevel: LogLevel) => {
let baseUrl = `${server}/logs`;
// 只处理日志级别参数
if (logLevel && logLevel !== "info") {
const level = logLevel === "all" ? "debug" : logLevel;
baseUrl += `?level=${level}`;
// IPC 日志获取函数
export const fetchLogsViaIPCPeriodically = async (
logLevel: LogLevel = "info",
) => {
try {
const logs = await fetchLogsViaIPC(logLevel);
useGlobalLogStore.getState().setLogs(logs);
console.log(`[GlobalLog-IPC] 成功获取 ${logs.length} 条日志`);
} catch (error) {
console.error("[GlobalLog-IPC] 获取日志失败:", error);
}
return baseUrl;
};
// 初始化全局日志服务
let globalLogSocket: any = null;
// 初始化全局日志服务 (仅IPC模式)
let ipcPollingInterval: number | null = null;
let isInitializing = false; // 添加初始化标志
export const initGlobalLogService = (
server: string,
secret: string,
enabled: boolean = false,
logLevel: LogLevel = "info",
) => {
const { appendLog, setEnabled } = useGlobalLogStore.getState();
// 防止重复初始化
if (isInitializing) {
console.log("[GlobalLog-IPC] 正在初始化中,跳过重复调用");
return;
}
const { setEnabled, setCurrentLevel } = useGlobalLogStore.getState();
// 更新启用状态
setEnabled(enabled);
setCurrentLevel(logLevel);
// 如果不启用或没有服务器信息,则不初始化
if (!enabled || !server) {
closeGlobalLogConnection();
return;
}
// 关闭现有连接
closeGlobalLogConnection();
// 创建新的WebSocket连接使用新的认证方法
const wsUrl = buildWSUrl(server, logLevel);
console.log(`[GlobalLog] 正在连接日志服务: ${wsUrl}`);
if (!server) {
console.warn("[GlobalLog] 服务器地址为空,无法建立连接");
return;
}
globalLogSocket = createAuthSockette(wsUrl, secret, {
timeout: 8000, // 8秒超时
onmessage(event) {
try {
const data = JSON.parse(event.data) as ILogItem;
const time = dayjs().format("MM-DD HH:mm:ss");
appendLog({ ...data, time });
} catch (error) {
console.error("[GlobalLog] 解析日志数据失败:", error);
}
},
onerror(event) {
console.error("[GlobalLog] WebSocket连接错误", event);
// 记录错误状态但不关闭连接,让重连机制起作用
useGlobalLogStore.setState({ isConnected: false });
// 只有在重试彻底失败后才关闭连接
if (
event &&
typeof event === "object" &&
"type" in event &&
event.type === "error"
) {
console.error("[GlobalLog] 连接已彻底失败,关闭连接");
closeGlobalLogConnection();
}
},
onclose(event) {
console.log("[GlobalLog] WebSocket连接关闭", event);
useGlobalLogStore.setState({ isConnected: false });
},
onopen(event) {
console.log("[GlobalLog] WebSocket连接已建立", event);
useGlobalLogStore.setState({ isConnected: true });
},
});
};
// 关闭全局日志连接
export const closeGlobalLogConnection = () => {
if (globalLogSocket) {
globalLogSocket.close();
globalLogSocket = null;
// 如果不启用,则不初始化
if (!enabled) {
clearIpcPolling();
useGlobalLogStore.setState({ isConnected: false });
return;
}
isInitializing = true;
// 使用IPC流式模式
console.log("[GlobalLog-IPC] 启用IPC流式日志服务");
// 启动流式监控
startLogsStreaming(logLevel);
// 立即获取一次日志
fetchLogsViaIPCPeriodically(logLevel);
// 设置定期轮询来同步流式缓存的数据
clearIpcPolling();
ipcPollingInterval = setInterval(() => {
fetchLogsViaIPCPeriodically(logLevel);
}, 1000); // 每1秒同步一次流式缓存
// 设置连接状态
useGlobalLogStore.setState({ isConnected: true });
isInitializing = false;
};
// 清除IPC轮询
const clearIpcPolling = () => {
if (ipcPollingInterval) {
clearInterval(ipcPollingInterval);
ipcPollingInterval = null;
console.log("[GlobalLog-IPC] 轮询已停止");
}
};
// 切换日志级别
export const changeLogLevel = (
level: LogLevel,
server: string,
secret: string,
) => {
// 关闭全局日志连接 (仅IPC模式)
export const closeGlobalLogConnection = () => {
clearIpcPolling();
isInitializing = false; // 重置初始化标志
useGlobalLogStore.setState({ isConnected: false });
console.log("[GlobalLog-IPC] 日志服务已关闭");
};
// 切换日志级别 (仅IPC模式)
export const changeLogLevel = (level: LogLevel) => {
const { enabled } = useGlobalLogStore.getState();
useGlobalLogStore.setState({ currentLevel: level });
if (enabled && server) {
initGlobalLogService(server, secret, enabled, level);
// 如果正在初始化,则跳过,避免重复启动
if (isInitializing) {
console.log("[GlobalLog-IPC] 正在初始化中,跳过级别变更流启动");
return;
}
if (enabled) {
// IPC流式模式下重新启动监控
startLogsStreaming(level);
fetchLogsViaIPCPeriodically(level);
}
};
// 切换启用状态
export const toggleLogEnabled = (server: string, secret: string) => {
// 切换启用状态 (仅IPC模式)
export const toggleLogEnabled = () => {
const { enabled, currentLevel } = useGlobalLogStore.getState();
const newEnabled = !enabled;
useGlobalLogStore.setState({ enabled: newEnabled });
if (newEnabled && server) {
initGlobalLogService(server, secret, newEnabled, currentLevel);
if (newEnabled) {
// IPC模式下直接启动
initGlobalLogService(newEnabled, currentLevel);
} else {
closeGlobalLogConnection();
}
@@ -169,6 +167,8 @@ export const toggleLogEnabled = (server: string, secret: string) => {
// 获取日志清理函数
export const clearGlobalLogs = () => {
useGlobalLogStore.getState().clearLogs();
// 同时清理后端流式缓存
clearLogsIPC();
};
// 自定义钩子,用于获取过滤后的日志数据

View File

@@ -0,0 +1,63 @@
// IPC-based log service using Tauri commands with streaming support
import {
getClashLogs,
startLogsMonitoring,
clearLogs as clearLogsCmd,
} from "@/services/cmds";
import dayjs from "dayjs";
export type LogLevel = "warning" | "info" | "debug" | "error" | "all";
export interface ILogItem {
time?: string;
type: string;
payload: string;
[key: string]: any;
}
// Start logs monitoring with specified level
export const startLogsStreaming = async (logLevel: LogLevel = "info") => {
try {
const level = logLevel === "all" ? undefined : logLevel;
await startLogsMonitoring(level);
console.log(
`[IPC-LogService] Started logs monitoring with level: ${logLevel}`,
);
} catch (error) {
console.error("[IPC-LogService] Failed to start logs monitoring:", error);
}
};
// Fetch logs using IPC command (now from streaming cache)
export const fetchLogsViaIPC = async (
logLevel: LogLevel = "info",
): Promise<ILogItem[]> => {
try {
const level = logLevel === "all" ? undefined : logLevel;
const response = await getClashLogs(level);
// The response should be in the format expected by the frontend
// Transform the logs to match the expected format
if (Array.isArray(response)) {
return response.map((log: any) => ({
...log,
time: log.time || dayjs().format("HH:mm:ss"),
}));
}
return [];
} catch (error) {
console.error("[IPC-LogService] Failed to fetch logs:", error);
return [];
}
};
// Clear logs
export const clearLogs = async () => {
try {
await clearLogsCmd();
console.log("[IPC-LogService] Logs cleared");
} catch (error) {
console.error("[IPC-LogService] Failed to clear logs:", error);
}
};