Merge pull request from cppla/dev

新增实时IO统计
This commit is contained in:
cppla 2022-04-01 16:29:53 +08:00 committed by GitHub
commit 4c0f4b94e8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 209 additions and 56 deletions

@ -6,7 +6,7 @@
[![Python Support](https://img.shields.io/badge/python-2.7%2B%20-blue.svg)](https://github.com/cppla/ServerStatus)
[![C++ Compiler](http://img.shields.io/badge/C++-GNU-blue.svg?style=flat&logo=cplusplus)](https://github.com/cppla/ServerStatus)
[![License](https://img.shields.io/badge/license-MIT-4EB1BA.svg?style=flat-square)](https://github.com/cppla/ServerStatus)
[![Version](https://img.shields.io/badge/Version-Beta%201.0.7-red)](https://github.com/cppla/ServerStatus)
[![Version](https://img.shields.io/badge/Version-Beta%201.0.8-red)](https://github.com/cppla/ServerStatus)
![Latest Version](http://dl.cpp.la/Archive/serverstatus-1.0.2.png)

@ -1,7 +1,7 @@
#!/usr/bin/env python3
# coding: utf-8
# Update by : https://github.com/cppla/ServerStatus, Update date: 20211009
# 版本1.0.2, 支持Python版本2.7 to 3.9
# Update by : https://github.com/cppla/ServerStatus, Update date: 20220323
# 版本1.0.3, 支持Python版本2.7 to 3.9
# 支持操作系统: Linux, OSX, FreeBSD, OpenBSD and NetBSD, both 32-bit and 64-bit architectures
# 说明: 默认情况下修改server和user就可以了。丢包率监测方向可以自定义例如CU = "www.facebook.com"。
@ -9,16 +9,15 @@ SERVER = "127.0.0.1"
USER = "s01"
PORT = 35601
PASSWORD = "USER_DEFAULT_PASSWORD"
INTERVAL = 1
PROBEPORT = 80
PROBE_PROTOCOL_PREFER = "ipv4" # ipv4, ipv6
PING_PACKET_HISTORY_LEN = 100
PORT = 35601
CU = "cu.tz.cloudcpp.com"
CT = "ct.tz.cloudcpp.com"
CM = "cm.tz.cloudcpp.com"
PROBEPORT = 80
PROBE_PROTOCOL_PREFER = "ipv4" # ipv4, ipv6
PING_PACKET_HISTORY_LEN = 100
INTERVAL = 1
import socket
import time
@ -147,6 +146,10 @@ netSpeed = {
'avgrx': 0,
'avgtx': 0
}
diskIO = {
'read': 0,
'write': 0
}
def _ping_thread(host, mark, port):
lostPacket = 0
@ -210,7 +213,72 @@ def _net_speed():
netSpeed["avgtx"] = avgtx
time.sleep(INTERVAL)
def get_realtime_date():
def _disk_io():
'''
good luck for opensource! by: cpp.la
磁盘IO因为IOPS原因SSD和HDD包括RAID卡ZFS等阵列技术IO对性能的影响还需要结合自身服务器情况来判断
比如我这里是机械硬盘大量做随机小文件读写那么很低的读写也就能造成硬盘长时间的等待
如果这里做连续性IO那么普通机械硬盘写入到100Mb/s那么也能造成硬盘长时间的等待
磁盘读写有误差4k8k https://stackoverflow.com/questions/34413926/psutil-vs-dd-monitoring-disk-i-o
:return:
'''
while True:
# pre pid snapshot
snapshot_first = {}
# next pid snapshot
snapshot_second = {}
# read count snapshot
snapshot_read = 0
# write count snapshot
snapshot_write = 0
# process snapshot
pid_snapshot = [str(i) for i in os.listdir("/proc") if i.isdigit() is True]
for pid in pid_snapshot:
try:
with open("/proc/{}/io".format(pid)) as f:
pid_io = {}
for line in f.readlines():
if "read_bytes" in line:
pid_io["read"] = int(line.split("read_bytes:")[-1].strip())
elif "write_bytes" in line and "cancelled_write_bytes" not in line:
pid_io["write"] = int(line.split("write_bytes:")[-1].strip())
pid_io["name"] = open("/proc/{}/comm".format(pid), "r").read().strip()
snapshot_first[pid] = pid_io
except:
if pid in snapshot_first:
snapshot_first.pop(pid)
time.sleep(INTERVAL)
for pid in pid_snapshot:
try:
with open("/proc/{}/io".format(pid)) as f:
pid_io = {}
for line in f.readlines():
if "read_bytes" in line:
pid_io["read"] = int(line.split("read_bytes:")[-1].strip())
elif "write_bytes" in line and "cancelled_write_bytes" not in line:
pid_io["write"] = int(line.split("write_bytes:")[-1].strip())
pid_io["name"] = open("/proc/{}/comm".format(pid), "r").read().strip()
snapshot_second[pid] = pid_io
except:
if pid in snapshot_first:
snapshot_first.pop(pid)
if pid in snapshot_second:
snapshot_second.pop(pid)
for k, v in snapshot_first.items():
if snapshot_first[k]["name"] == snapshot_second[k]["name"] and snapshot_first[k]["name"] != "bash":
snapshot_read += (snapshot_second[k]["read"] - snapshot_first[k]["read"])
snapshot_write += (snapshot_second[k]["write"] - snapshot_first[k]["write"])
diskIO["read"] = snapshot_read
diskIO["write"] = snapshot_write
def get_realtime_data():
'''
real time get system data
:return:
'''
t1 = threading.Thread(
target=_ping_thread,
kwargs={
@ -238,14 +306,12 @@ def get_realtime_date():
t4 = threading.Thread(
target=_net_speed,
)
t1.setDaemon(True)
t2.setDaemon(True)
t3.setDaemon(True)
t4.setDaemon(True)
t1.start()
t2.start()
t3.start()
t4.start()
t5 = threading.Thread(
target=_disk_io,
)
for ti in [t1, t2, t3, t4, t5]:
ti.setDaemon(True)
ti.start()
def byte_str(object):
'''
@ -273,7 +339,7 @@ if __name__ == '__main__':
elif 'INTERVAL' in argc:
INTERVAL = int(argc.split('INTERVAL=')[-1])
socket.setdefaulttimeout(30)
get_realtime_date()
get_realtime_data()
while True:
try:
print("Connecting...")
@ -343,6 +409,8 @@ if __name__ == '__main__':
array['time_189'] = pingTime.get('189')
array['time_10086'] = pingTime.get('10086')
array['tcp'], array['udp'], array['process'], array['thread'] = tupd()
array['io_read'] = diskIO.get("read")
array['io_write'] = diskIO.get("write")
s.send(byte_str("update " + json.dumps(array) + "\n"))
except KeyboardInterrupt:

@ -1,8 +1,8 @@
#!/usr/bin/env python3
# coding: utf-8
# Update by : https://github.com/cppla/ServerStatus, Update date: 20211009
# Update by : https://github.com/cppla/ServerStatus, Update date: 20220323
# 依赖于psutil跨平台库
# 版本1.0.2, 支持Python版本2.7 to 3.9
# 版本1.0.3, 支持Python版本2.7 to 3.9
# 支持操作系统: Linux, Windows, OSX, Sun Solaris, FreeBSD, OpenBSD and NetBSD, both 32-bit and 64-bit architectures
# 说明: 默认情况下修改server和user就可以了。丢包率监测方向可以自定义例如CU = "www.facebook.com"。
@ -10,16 +10,15 @@ SERVER = "127.0.0.1"
USER = "s01"
PORT = 35601
PASSWORD = "USER_DEFAULT_PASSWORD"
INTERVAL = 1
PROBEPORT = 80
PROBE_PROTOCOL_PREFER = "ipv4" # ipv4, ipv6
PING_PACKET_HISTORY_LEN = 100
PORT = 35601
CU = "cu.tz.cloudcpp.com"
CT = "ct.tz.cloudcpp.com"
CM = "cm.tz.cloudcpp.com"
PROBEPORT = 80
PROBE_PROTOCOL_PREFER = "ipv4" # ipv4, ipv6
PING_PACKET_HISTORY_LEN = 100
INTERVAL = 1
import socket
import time
@ -131,6 +130,10 @@ netSpeed = {
'avgrx': 0,
'avgtx': 0
}
diskIO = {
'read': 0,
'write': 0
}
def _ping_thread(host, mark, port):
lostPacket = 0
@ -190,7 +193,62 @@ def _net_speed():
netSpeed["avgtx"] = avgtx
time.sleep(INTERVAL)
def get_realtime_date():
def _disk_io():
"""
the code is by: https://github.com/giampaolo/psutil/blob/master/scripts/iotop.py
good luck for opensource! modify: cpp.la
Calculate IO usage by comparing IO statics before and
after the interval.
Return a tuple including all currently running processes
sorted by IO activity and total disks I/O activity.
磁盘IO因为IOPS原因SSD和HDD包括RAID卡ZFS等IO对性能的影响还需要结合自身服务器情况来判断
比如我这里是机械硬盘大量做随机小文件读写那么很低的读写也就能造成硬盘长时间的等待
如果这里做连续性IO那么普通机械硬盘写入到100Mb/s那么也能造成硬盘长时间的等待
磁盘读写有误差4k8k https://stackoverflow.com/questions/34413926/psutil-vs-dd-monitoring-disk-i-o
"""
while True:
# first get a list of all processes and disk io counters
procs = [p for p in psutil.process_iter()]
for p in procs[:]:
try:
p._before = p.io_counters()
except psutil.Error:
procs.remove(p)
continue
disks_before = psutil.disk_io_counters()
# sleep some time, only when INTERVAL==1 , io read/write per_sec.
# when INTERVAL > 1, io read/write per_INTERVAL
time.sleep(INTERVAL)
# then retrieve the same info again
for p in procs[:]:
with p.oneshot():
try:
p._after = p.io_counters()
p._cmdline = ' '.join(p.cmdline())
if not p._cmdline:
p._cmdline = p.name()
p._username = p.username()
except (psutil.NoSuchProcess, psutil.ZombieProcess):
procs.remove(p)
disks_after = psutil.disk_io_counters()
# finally calculate results by comparing data before and
# after the interval
for p in procs:
p._read_per_sec = p._after.read_bytes - p._before.read_bytes
p._write_per_sec = p._after.write_bytes - p._before.write_bytes
p._total = p._read_per_sec + p._write_per_sec
diskIO["read"] = disks_after.read_bytes - disks_before.read_bytes
diskIO["write"] = disks_after.write_bytes - disks_before.write_bytes
def get_realtime_data():
'''
real time get system data
:return:
'''
t1 = threading.Thread(
target=_ping_thread,
kwargs={
@ -218,14 +276,12 @@ def get_realtime_date():
t4 = threading.Thread(
target=_net_speed,
)
t1.setDaemon(True)
t2.setDaemon(True)
t3.setDaemon(True)
t4.setDaemon(True)
t1.start()
t2.start()
t3.start()
t4.start()
t5 = threading.Thread(
target=_disk_io,
)
for ti in [t1, t2, t3, t4, t5]:
ti.setDaemon(True)
ti.start()
def byte_str(object):
'''
@ -253,7 +309,7 @@ if __name__ == '__main__':
elif 'INTERVAL' in argc:
INTERVAL = int(argc.split('INTERVAL=')[-1])
socket.setdefaulttimeout(30)
get_realtime_date()
get_realtime_data()
while 1:
try:
print("Connecting...")
@ -324,6 +380,8 @@ if __name__ == '__main__':
array['time_189'] = pingTime.get('189')
array['time_10086'] = pingTime.get('10086')
array['tcp'], array['udp'], array['process'], array['thread'] = tupd()
array['io_read'] = diskIO.get("read")
array['io_write'] = diskIO.get("write")
s.send(byte_str("update " + json.dumps(array) + "\n"))
except KeyboardInterrupt:

@ -178,6 +178,10 @@ int CMain::HandleMessage(int ClientNetID, char *pMessage)
pClient->m_Stats.m_HDDTotal = rStart["hdd_total"].u.integer;
if(rStart["hdd_used"].type)
pClient->m_Stats.m_HDDUsed = rStart["hdd_used"].u.integer;
if(rStart["io_read"].type)
pClient->m_Stats.m_IORead = rStart["io_read"].u.integer;
if(rStart["io_write"].type)
pClient->m_Stats.m_IOWrite = rStart["io_write"].u.integer;
if(rStart["cpu"].type)
pClient->m_Stats.m_CPU = rStart["cpu"].u.dbl;
if(rStart["online4"].type && pClient->m_ClientNetType == NETTYPE_IPV6)
@ -190,19 +194,19 @@ int CMain::HandleMessage(int ClientNetID, char *pMessage)
if(m_Config.m_Verbose)
{
if(rStart["online4"].type)
dbg_msg("main", "Online4: %s\nUptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\n",
dbg_msg("main", "Online4: %s\nUptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\nIORead: %" PRId64 "\nIOWrite: %" PRId64 "\n",
rStart["online4"].u.boolean ? "true" : "false",
pClient->m_Stats.m_Uptime,
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU);
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU, pClient->m_Stats.m_IORead, pClient->m_Stats.m_IOWrite);
else if(rStart["online6"].type)
dbg_msg("main", "Online6: %s\nUptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\n",
dbg_msg("main", "Online6: %s\nUptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\nIORead: %" PRId64 "\nIOWrite: %" PRId64 "\n",
rStart["online6"].u.boolean ? "true" : "false",
pClient->m_Stats.m_Uptime,
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU);
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU, pClient->m_Stats.m_IORead, pClient->m_Stats.m_IOWrite);
else
dbg_msg("main", "Uptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\n",
dbg_msg("main", "Uptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\nIORead: %" PRId64 "\nIOWrite: %" PRId64 "\n",
pClient->m_Stats.m_Uptime,
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU);
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU, pClient->m_Stats.m_IORead, pClient->m_Stats.m_IOWrite);
}
// clean up
@ -274,7 +278,7 @@ void CMain::JSONUpdateThread(void *pUser)
}
str_format(pBuf, sizeof(aFileBuf) - (pBuf - aFileBuf),
"{ \"name\": \"%s\",\"type\": \"%s\",\"host\": \"%s\",\"location\": \"%s\",\"online4\": %s, \"online6\": %s, \"uptime\": \"%s\",\"load_1\": %.2f, \"load_5\": %.2f, \"load_15\": %.2f,\"ping_10010\": %.2f, \"ping_189\": %.2f, \"ping_10086\": %.2f,\"time_10010\": %" PRId64 ", \"time_189\": %" PRId64 ", \"time_10086\": %" PRId64 ", \"tcp_count\": %" PRId64 ", \"udp_count\": %" PRId64 ", \"process_count\": %" PRId64 ", \"thread_count\": %" PRId64 ", \"network_rx\": %" PRId64 ", \"network_tx\": %" PRId64 ", \"network_in\": %" PRId64 ", \"network_out\": %" PRId64 ", \"cpu\": %d, \"memory_total\": %" PRId64 ", \"memory_used\": %" PRId64 ", \"swap_total\": %" PRId64 ", \"swap_used\": %" PRId64 ", \"hdd_total\": %" PRId64 ", \"hdd_used\": %" PRId64 ", \"last_network_in\": %" PRId64 ", \"last_network_out\": %" PRId64 ",\"custom\": \"%s\" },\n",
"{ \"name\": \"%s\",\"type\": \"%s\",\"host\": \"%s\",\"location\": \"%s\",\"online4\": %s, \"online6\": %s, \"uptime\": \"%s\",\"load_1\": %.2f, \"load_5\": %.2f, \"load_15\": %.2f,\"ping_10010\": %.2f, \"ping_189\": %.2f, \"ping_10086\": %.2f,\"time_10010\": %" PRId64 ", \"time_189\": %" PRId64 ", \"time_10086\": %" PRId64 ", \"tcp_count\": %" PRId64 ", \"udp_count\": %" PRId64 ", \"process_count\": %" PRId64 ", \"thread_count\": %" PRId64 ", \"network_rx\": %" PRId64 ", \"network_tx\": %" PRId64 ", \"network_in\": %" PRId64 ", \"network_out\": %" PRId64 ", \"cpu\": %d, \"memory_total\": %" PRId64 ", \"memory_used\": %" PRId64 ", \"swap_total\": %" PRId64 ", \"swap_used\": %" PRId64 ", \"hdd_total\": %" PRId64 ", \"hdd_used\": %" PRId64 ", \"last_network_in\": %" PRId64 ", \"last_network_out\": %" PRId64 ",\"io_read\": %" PRId64 ", \"io_write\": %" PRId64 ",\"custom\": \"%s\" },\n",
pClients[i].m_aName,pClients[i].m_aType,pClients[i].m_aHost,pClients[i].m_aLocation,
pClients[i].m_Stats.m_Online4 ? "true" : "false",pClients[i].m_Stats.m_Online6 ? "true" : "false",
aUptime, pClients[i].m_Stats.m_Load_1, pClients[i].m_Stats.m_Load_5, pClients[i].m_Stats.m_Load_15, pClients[i].m_Stats.m_ping_10010, pClients[i].m_Stats.m_ping_189, pClients[i].m_Stats.m_ping_10086,
@ -283,6 +287,7 @@ void CMain::JSONUpdateThread(void *pUser)
pClients[i].m_Stats.m_SwapTotal, pClients[i].m_Stats.m_SwapUsed, pClients[i].m_Stats.m_HDDTotal, pClients[i].m_Stats.m_HDDUsed,
pClients[i].m_Stats.m_NetworkIN == 0 || pClients[i].m_LastNetworkIN == 0 ? pClients[i].m_Stats.m_NetworkIN : pClients[i].m_LastNetworkIN,
pClients[i].m_Stats.m_NetworkOUT == 0 || pClients[i].m_LastNetworkOUT == 0 ? pClients[i].m_Stats.m_NetworkOUT : pClients[i].m_LastNetworkOUT,
pClients[i].m_Stats.m_IORead, pClients[i].m_Stats.m_IOWrite,
pClients[i].m_Stats.m_aCustom);
pBuf += strlen(pBuf);
}

@ -73,6 +73,8 @@ class CMain
int64_t m_udpCount;
int64_t m_processCount;
int64_t m_threadCount;
int64_t m_IORead;
int64_t m_IOWrite;
double m_CPU;
char m_aCustom[512];
// Options

@ -19,14 +19,15 @@ tr.odd.expandRow > :hover { background: #212e36 !important; }
.expandRow > td { padding: 0 !important; border-top: 0px !important; }
#month_traffic { min-width: 85px; max-width: 95px;}
#network { min-width: 115px; }
#cpu, #ram, #hdd { min-width: 45px; max-width: 90px; }
#cpu, #ram, #hdd { min-width: 20px; max-width: 60px; }
#io { width: 115px; }
#ping { max-width: 95px; }
@media only screen and (max-width: 1080px) {
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 720px) {
body { font-size: 10px; }
@ -34,7 +35,7 @@ tr.odd.expandRow > :hover { background: #212e36 !important; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 620px) {
body { font-size: 10px; }
@ -44,7 +45,7 @@ tr.odd.expandRow > :hover { background: #212e36 !important; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 533px) {
body { font-size: 10px; }
@ -54,7 +55,7 @@ tr.odd.expandRow > :hover { background: #212e36 !important; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 450px) {
body { font-size: 10px; }
@ -66,5 +67,5 @@ tr.odd.expandRow > :hover { background: #212e36 !important; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#cpu, #ram, #hdd { min-width: 25px; max-width: 50px; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}

@ -16,14 +16,15 @@ tr.odd.expandRow > :hover { background: #FFF !important; }
.expandRow > td { padding: 0 !important; border-top: 0px !important; }
#month_traffic { min-width: 85px; max-width: 95px;}
#network { min-width: 115px; }
#cpu, #ram, #hdd { min-width: 45px; max-width: 90px; }
#cpu, #ram, #hdd { min-width: 20px; max-width: 60px; }
#io { width: 115px; }
#ping { max-width: 95px; }
@media only screen and (max-width: 1080px) {
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 720px) {
body { font-size: 10px; }
@ -31,7 +32,7 @@ tr.odd.expandRow > :hover { background: #FFF !important; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 620px) {
body { font-size: 10px; }
@ -41,7 +42,7 @@ tr.odd.expandRow > :hover { background: #FFF !important; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 533px) {
body { font-size: 10px; }
@ -51,7 +52,7 @@ tr.odd.expandRow > :hover { background: #FFF !important; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 450px) {
body { font-size: 10px; }
@ -63,5 +64,5 @@ tr.odd.expandRow > :hover { background: #FFF !important; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#cpu, #ram, #hdd { min-width: 25px; max-width: 50px; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
#ping, tr td:nth-child(14) { display:none; visibility:hidden; }
}

@ -87,6 +87,7 @@
<th id="cpu">核芯</th>
<th id="ram">内存</th>
<th id="hdd">硬盘</th>
<th id="io">IO(r/w)</th>
<th id="ping">联通|电信|移动</th>
</tr>
</thead>

@ -74,6 +74,7 @@ function uptime() {
"<td id=\"cpu\"><div class=\"progress\"><div style=\"width: 100%;\" class=\"progress-bar progress-bar-warning\"><small>加载中</small></div></div></td>" +
"<td id=\"memory\"><div class=\"progress\"><div style=\"width: 100%;\" class=\"progress-bar progress-bar-warning\"><small>加载中</small></div></div></td>" +
"<td id=\"hdd\"><div class=\"progress\"><div style=\"width: 100%;\" class=\"progress-bar progress-bar-warning\"><small>加载中</small></div></div></td>" +
"<td id=\"io\">加载中</td>" +
"<td id=\"ping\"><div class=\"progress\"><div style=\"width: 100%;\" class=\"progress-bar progress-bar-warning\"><small>加载中</small></div></div></td>" +
"</tr>" +
"<tr class=\"expandRow " + hack + "\"><td colspan=\"16\"><div class=\"accordian-body collapse\" id=\"rt" + i + "\">" +
@ -135,6 +136,7 @@ function uptime() {
TableRow.children["hdd"].children[0].children[0].className = "progress-bar progress-bar-danger";
TableRow.children["hdd"].children[0].children[0].style.width = "100%";
TableRow.children["hdd"].children[0].children[0].innerHTML = "<small>关闭</small>";
TableRow.children["io"].innerHTML = "";
TableRow.children["ping"].children[0].children[0].className = "progress-bar progress-bar-danger";
TableRow.children["ping"].children[0].children[0].style.width = "100%";
TableRow.children["ping"].children[0].children[0].innerHTML = "<small>关闭</small>";
@ -238,6 +240,19 @@ function uptime() {
TableRow.children["hdd"].children[0].children[0].innerHTML = HDD + "%";
ExpandRow[0].children["expand_hdd"].innerHTML = "硬盘: " + bytesToSize(result.servers[i].hdd_used*1024*1024, 2) + " / " + bytesToSize(result.servers[i].hdd_total*1024*1024, 2);
//IO 过小的B字节单位没有意义
var io = "";
if(result.servers[i].io_read < 1024*1024)
io += (result.servers[i].io_read/1024).toFixed(1) + "K";
else
io += (result.servers[i].io_read/1024/1024).toFixed(1) + "M";
io += "💿"
if(result.servers[i].io_write < 1024*1024)
io += (result.servers[i].io_write/1024).toFixed(1) + "K";
else
io += (result.servers[i].io_write/1024/1024).toFixed(1) + "M";
TableRow.children["io"].innerHTML = io;
// delay time
// tcp, udp, process, thread count
@ -291,6 +306,8 @@ function uptime() {
TableRow.children["hdd"].children[0].children[0].className = "progress-bar progress-bar-error";
TableRow.children["hdd"].children[0].children[0].style.width = "100%";
TableRow.children["hdd"].children[0].children[0].innerHTML = "<small>错误</small>";
TableRow.children["io"].children[0].children[0].className = "progress-bar progress-bar-error";
TableRow.children["io"].children[0].children[0].innerHTML = "<small>错误</small>";
TableRow.children["ping"].children[0].children[0].className = "progress-bar progress-bar-error";
TableRow.children["ping"].children[0].children[0].style.width = "100%";
TableRow.children["ping"].children[0].children[0].innerHTML = "<small>错误</small>";