feat: optimize system monitor with async I/O and traffic since reboot
- Replace synchronous file operations with async aiofiles - Implement concurrent data gathering using asyncio.gather() - Move API calls to thread pool executor - Add LRU cache for config file reads - Optimize parsing functions for single-pass processing - Reduce measurement intervals for faster response times - Add system uptime and boot time display - Track total network traffic since last reboot from /proc/net/dev - Separate system traffic (since reboot) from user traffic (all-time) - Add human-readable uptime formatting - Enhance output with clear traffic categorization
This commit is contained in:
@ -2,12 +2,17 @@
|
||||
|
||||
import sys
|
||||
import json
|
||||
from hysteria2_api import Hysteria2Client
|
||||
import asyncio
|
||||
import aiofiles
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import lru_cache
|
||||
from hysteria2_api import Hysteria2Client
|
||||
from init_paths import *
|
||||
from paths import *
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_secret() -> str:
|
||||
if not CONFIG_FILE.exists():
|
||||
print("Error: config.json file not found!", file=sys.stderr)
|
||||
@ -25,24 +30,74 @@ def get_secret() -> str:
|
||||
|
||||
|
||||
def convert_bytes(bytes_val: int) -> str:
|
||||
units = [("TB", 1 << 40), ("GB", 1 << 30), ("MB", 1 << 20), ("KB", 1 << 10)]
|
||||
for unit, factor in units:
|
||||
if bytes_val >= factor:
|
||||
return f"{bytes_val / factor:.2f} {unit}"
|
||||
if bytes_val >= (1 << 40):
|
||||
return f"{bytes_val / (1 << 40):.2f} TB"
|
||||
elif bytes_val >= (1 << 30):
|
||||
return f"{bytes_val / (1 << 30):.2f} GB"
|
||||
elif bytes_val >= (1 << 20):
|
||||
return f"{bytes_val / (1 << 20):.2f} MB"
|
||||
elif bytes_val >= (1 << 10):
|
||||
return f"{bytes_val / (1 << 10):.2f} KB"
|
||||
return f"{bytes_val} B"
|
||||
|
||||
|
||||
def get_cpu_usage(interval: float = 0.1) -> float:
|
||||
def read_cpu_times():
|
||||
with open("/proc/stat") as f:
|
||||
line = f.readline()
|
||||
fields = list(map(int, line.strip().split()[1:]))
|
||||
idle, total = fields[3], sum(fields)
|
||||
return idle, total
|
||||
def convert_speed(bytes_per_second: int) -> str:
|
||||
if bytes_per_second >= (1 << 40):
|
||||
return f"{bytes_per_second / (1 << 40):.2f} TB/s"
|
||||
elif bytes_per_second >= (1 << 30):
|
||||
return f"{bytes_per_second / (1 << 30):.2f} GB/s"
|
||||
elif bytes_per_second >= (1 << 20):
|
||||
return f"{bytes_per_second / (1 << 20):.2f} MB/s"
|
||||
elif bytes_per_second >= (1 << 10):
|
||||
return f"{bytes_per_second / (1 << 10):.2f} KB/s"
|
||||
return f"{int(bytes_per_second)} B/s"
|
||||
|
||||
idle1, total1 = read_cpu_times()
|
||||
time.sleep(interval)
|
||||
idle2, total2 = read_cpu_times()
|
||||
|
||||
async def read_file_async(filepath: str) -> str:
|
||||
try:
|
||||
async with aiofiles.open(filepath, 'r') as f:
|
||||
return await f.read()
|
||||
except FileNotFoundError:
|
||||
return ""
|
||||
|
||||
|
||||
def format_uptime(seconds: float) -> str:
|
||||
seconds = int(seconds)
|
||||
days, remainder = divmod(seconds, 86400)
|
||||
hours, remainder = divmod(remainder, 3600)
|
||||
minutes, _ = divmod(remainder, 60)
|
||||
return f"{days}d {hours}h {minutes}m"
|
||||
|
||||
|
||||
async def get_uptime_and_boottime() -> tuple[str, str]:
|
||||
try:
|
||||
content = await read_file_async("/proc/uptime")
|
||||
uptime_seconds = float(content.split()[0])
|
||||
boot_time_epoch = time.time() - uptime_seconds
|
||||
boot_time_str = time.strftime("%Y-%m-%d %H:%M", time.localtime(boot_time_epoch))
|
||||
uptime_str = format_uptime(uptime_seconds)
|
||||
return uptime_str, boot_time_str
|
||||
except (FileNotFoundError, IndexError, ValueError):
|
||||
return "N/A", "N/A"
|
||||
|
||||
|
||||
def parse_cpu_stats(content: str) -> tuple[int, int]:
|
||||
if not content:
|
||||
return 0, 0
|
||||
line = content.split('\n')[0]
|
||||
fields = list(map(int, line.strip().split()[1:]))
|
||||
idle, total = fields[3], sum(fields)
|
||||
return idle, total
|
||||
|
||||
|
||||
async def get_cpu_usage(interval: float = 0.1) -> float:
|
||||
content1 = await read_file_async("/proc/stat")
|
||||
idle1, total1 = parse_cpu_stats(content1)
|
||||
|
||||
await asyncio.sleep(interval)
|
||||
|
||||
content2 = await read_file_async("/proc/stat")
|
||||
idle2, total2 = parse_cpu_stats(content2)
|
||||
|
||||
idle_delta = idle2 - idle1
|
||||
total_delta = total2 - total1
|
||||
@ -50,23 +105,18 @@ def get_cpu_usage(interval: float = 0.1) -> float:
|
||||
return round(cpu_usage, 1)
|
||||
|
||||
|
||||
def parse_meminfo(content: str) -> tuple[int, int]:
|
||||
if not content:
|
||||
return 0, 0
|
||||
|
||||
def get_memory_usage() -> tuple[int, int]:
|
||||
mem_info = {}
|
||||
try:
|
||||
with open("/proc/meminfo", "r") as f:
|
||||
for line in f:
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
key = parts[0].rstrip(':')
|
||||
if parts[1].isdigit():
|
||||
mem_info[key] = int(parts[1])
|
||||
except FileNotFoundError:
|
||||
print("Error: /proc/meminfo not found.", file=sys.stderr)
|
||||
return 0, 0
|
||||
except Exception as e:
|
||||
print(f"Error reading /proc/meminfo: {e}", file=sys.stderr)
|
||||
return 0, 0
|
||||
for line in content.split('\n'):
|
||||
if ':' in line:
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
key = parts[0].rstrip(':')
|
||||
if parts[1].isdigit():
|
||||
mem_info[key] = int(parts[1])
|
||||
|
||||
mem_total_kb = mem_info.get("MemTotal", 0)
|
||||
mem_free_kb = mem_info.get("MemFree", 0)
|
||||
@ -76,19 +126,69 @@ def get_memory_usage() -> tuple[int, int]:
|
||||
|
||||
used_kb = mem_total_kb - mem_free_kb - buffers_kb - cached_kb - sreclaimable_kb
|
||||
|
||||
if used_kb < 0:
|
||||
used_kb = mem_total_kb - mem_info.get("MemAvailable", mem_total_kb)
|
||||
used_kb = max(0, used_kb)
|
||||
used_kb = max(0, used_kb)
|
||||
return mem_total_kb // 1024, used_kb // 1024
|
||||
|
||||
|
||||
total_mb = mem_total_kb // 1024
|
||||
used_mb = used_kb // 1024
|
||||
|
||||
return total_mb, used_mb
|
||||
async def get_memory_usage() -> tuple[int, int]:
|
||||
content = await read_file_async("/proc/meminfo")
|
||||
return parse_meminfo(content)
|
||||
|
||||
|
||||
def parse_network_stats(content: str) -> tuple[int, int]:
|
||||
if not content:
|
||||
return 0, 0
|
||||
|
||||
def get_online_user_count(secret: str) -> int:
|
||||
rx_bytes, tx_bytes = 0, 0
|
||||
lines = content.split('\n')
|
||||
|
||||
for line in lines[2:]:
|
||||
if not line.strip():
|
||||
continue
|
||||
parts = line.split()
|
||||
if len(parts) < 10:
|
||||
continue
|
||||
iface = parts[0].strip().replace(':', '')
|
||||
if iface == 'lo':
|
||||
continue
|
||||
try:
|
||||
rx_bytes += int(parts[1])
|
||||
tx_bytes += int(parts[9])
|
||||
except (IndexError, ValueError):
|
||||
continue
|
||||
|
||||
return rx_bytes, tx_bytes
|
||||
|
||||
|
||||
async def get_network_stats() -> tuple[int, int]:
|
||||
content = await read_file_async('/proc/net/dev')
|
||||
return parse_network_stats(content)
|
||||
|
||||
|
||||
async def get_network_speed(interval: float = 0.5) -> tuple[int, int]:
|
||||
rx1, tx1 = await get_network_stats()
|
||||
await asyncio.sleep(interval)
|
||||
rx2, tx2 = await get_network_stats()
|
||||
|
||||
rx_speed = (rx2 - rx1) / interval
|
||||
tx_speed = (tx2 - tx1) / interval
|
||||
return int(rx_speed), int(tx_speed)
|
||||
|
||||
|
||||
def parse_connection_counts(tcp_content: str, udp_content: str) -> tuple[int, int]:
|
||||
tcp_count = len(tcp_content.split('\n')) - 2 if tcp_content else 0
|
||||
udp_count = len(udp_content.split('\n')) - 2 if udp_content else 0
|
||||
return max(0, tcp_count), max(0, udp_count)
|
||||
|
||||
|
||||
async def get_connection_counts() -> tuple[int, int]:
|
||||
tcp_task = read_file_async('/proc/net/tcp')
|
||||
udp_task = read_file_async('/proc/net/udp')
|
||||
tcp_content, udp_content = await asyncio.gather(tcp_task, udp_task)
|
||||
return parse_connection_counts(tcp_content, udp_content)
|
||||
|
||||
|
||||
def get_online_user_count_sync(secret: str) -> int:
|
||||
try:
|
||||
client = Hysteria2Client(
|
||||
base_url=API_BASE_URL,
|
||||
@ -101,47 +201,83 @@ def get_online_user_count(secret: str) -> int:
|
||||
return 0
|
||||
|
||||
|
||||
def get_total_traffic() -> tuple[int, int]:
|
||||
async def get_online_user_count(secret: str) -> int:
|
||||
loop = asyncio.get_event_loop()
|
||||
with ThreadPoolExecutor() as executor:
|
||||
return await loop.run_in_executor(executor, get_online_user_count_sync, secret)
|
||||
|
||||
|
||||
def parse_total_traffic(content: str) -> tuple[int, int]:
|
||||
if not content:
|
||||
return 0, 0
|
||||
|
||||
try:
|
||||
users = json.loads(content)
|
||||
total_upload = sum(int(user_data.get("upload_bytes", 0) or 0) for user_data in users.values())
|
||||
total_download = sum(int(user_data.get("download_bytes", 0) or 0) for user_data in users.values())
|
||||
return total_upload, total_download
|
||||
except (json.JSONDecodeError, ValueError, AttributeError):
|
||||
return 0, 0
|
||||
|
||||
|
||||
async def get_user_traffic() -> tuple[int, int]:
|
||||
if not USERS_FILE.exists():
|
||||
return 0, 0
|
||||
|
||||
try:
|
||||
with USERS_FILE.open() as f:
|
||||
users = json.load(f)
|
||||
|
||||
total_upload = 0
|
||||
total_download = 0
|
||||
|
||||
for user_data in users.values():
|
||||
total_upload += int(user_data.get("upload_bytes", 0) or 0)
|
||||
total_download += int(user_data.get("download_bytes", 0) or 0)
|
||||
|
||||
return total_upload, total_download
|
||||
async with aiofiles.open(USERS_FILE, 'r') as f:
|
||||
content = await f.read()
|
||||
return parse_total_traffic(content)
|
||||
except Exception as e:
|
||||
print(f"Error parsing traffic data: {e}", file=sys.stderr)
|
||||
return 0, 0
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
async def main():
|
||||
secret = get_secret()
|
||||
|
||||
cpu_usage = get_cpu_usage()
|
||||
mem_total, mem_used = get_memory_usage()
|
||||
online_users = get_online_user_count(secret)
|
||||
tasks = [
|
||||
get_uptime_and_boottime(),
|
||||
get_memory_usage(),
|
||||
get_connection_counts(),
|
||||
get_online_user_count(secret),
|
||||
get_user_traffic(),
|
||||
get_cpu_usage(0.1),
|
||||
get_network_speed(0.3),
|
||||
get_network_stats()
|
||||
]
|
||||
|
||||
print(f"📈 CPU Usage: {cpu_usage}")
|
||||
print(f"📋 Total RAM: {mem_total}MB")
|
||||
print(f"💻 Used RAM: {mem_used}MB")
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
uptime_str, boot_time_str = results[0]
|
||||
mem_total, mem_used = results[1]
|
||||
tcp_connections, udp_connections = results[2]
|
||||
online_users = results[3]
|
||||
user_upload, user_download = results[4]
|
||||
cpu_usage = results[5]
|
||||
download_speed, upload_speed = results[6]
|
||||
reboot_rx, reboot_tx = results[7]
|
||||
|
||||
print(f"🕒 Uptime: {uptime_str} (since {boot_time_str})")
|
||||
print(f"📈 CPU Usage: {cpu_usage}%")
|
||||
print(f"💻 Used RAM: {mem_used}MB / {mem_total}MB")
|
||||
print(f"👥 Online Users: {online_users}")
|
||||
print()
|
||||
|
||||
total_upload, total_download = get_total_traffic()
|
||||
|
||||
print(f"🔼 Uploaded Traffic: {convert_bytes(total_upload)}")
|
||||
print(f"🔽 Downloaded Traffic: {convert_bytes(total_download)}")
|
||||
print(f"📊 Total Traffic: {convert_bytes(total_upload + total_download)}")
|
||||
print(f"🔼 Upload Speed: {convert_speed(upload_speed)}")
|
||||
print(f"🔽 Download Speed: {convert_speed(download_speed)}")
|
||||
print(f"📡 TCP Connections: {tcp_connections}")
|
||||
print(f"📡 UDP Connections: {udp_connections}")
|
||||
print()
|
||||
print("📊 Traffic Since Last Reboot:")
|
||||
print(f" 🔼 Total Uploaded: {convert_bytes(reboot_tx)}")
|
||||
print(f" 🔽 Total Downloaded: {convert_bytes(reboot_rx)}")
|
||||
print(f" 📈 Combined Traffic: {convert_bytes(reboot_tx + reboot_rx)}")
|
||||
print()
|
||||
print("📊 User Traffic (All Time):")
|
||||
print(f" 🔼 Uploaded Traffic: {convert_bytes(user_upload)}")
|
||||
print(f" 🔽 Downloaded Traffic: {convert_bytes(user_download)}")
|
||||
print(f" 📈 Total Traffic: {convert_bytes(user_upload + user_download)}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
asyncio.run(main())
|
||||
@ -22,6 +22,7 @@ urllib3==2.5.0
|
||||
yarl==1.20.1
|
||||
hysteria2-api==0.1.3
|
||||
schedule==1.2.2
|
||||
aiofiles==24.1.0
|
||||
|
||||
# webpanel
|
||||
annotated-types==0.7.0
|
||||
|
||||
Reference in New Issue
Block a user