67 Commits
1.0.7 ... 1.0.9

Author SHA1 Message Date
cppla
d5a047c781 Update README.md 2022-08-25 16:23:02 +08:00
cppla
10872059c5 build 1.0.9
build 1.0.9
2022-08-25 16:14:46 +08:00
cppla
44656e565f 更改watchdog callback说明
更改watchdog callback说明
2022-08-25 16:13:24 +08:00
cppla
78c7da7361 经测试邮箱 和wechat不能稳定发送
经测试邮箱 和wechat不能稳定发送
2022-08-25 16:03:46 +08:00
cppla
b5edeea057 Update README.md
添加一个邮箱服务
2022-08-25 14:40:56 +08:00
cppla
c2259f347d Update README.md 2022-08-23 19:39:54 +08:00
cppla
bb5f03047d Update README.md 2022-08-23 19:39:10 +08:00
cppla
6ab6e16d0f 1080 to 1200 2022-08-01 19:37:26 +08:00
cppla
615cec88c2 300s 2022-07-21 10:53:08 +08:00
cppla
94896bac80 change docker to beijing time 2022-07-18 10:36:36 +08:00
cppla
a788b5da90 . 2022-07-17 10:11:11 +08:00
cppla
de0bc9dd74 Update README.md 2022-07-17 10:08:04 +08:00
cppla
ec16fc1fac Update README.md 2022-07-16 19:47:33 +08:00
cppla
cbd803b686 Create README.md
update tips
2022-07-16 19:46:59 +08:00
cppla
3eddb27d51 update 2022-07-16 19:30:29 +08:00
cppla
522809483e Compatible with older versions config.json 2022-07-16 19:11:29 +08:00
cppla
9d706f4da8 Update README.md
update watchdog 说明
2022-07-16 18:44:09 +08:00
cppla
a0fe8ee33c Update README.md
callback说明
2022-07-16 18:35:08 +08:00
cppla
c5eed8e4fa Update README.md
加入watchdog 说明
2022-07-16 18:00:10 +08:00
cppla
32d302580e update 2022-07-16 17:53:17 +08:00
cppla
b03d090a5c update readme 2022-07-16 17:47:57 +08:00
cppla
6151806141 c++ eval build use c++11 2022-07-16 14:42:32 +08:00
cppla
2703993272 add timeout for libcurl 2022-07-16 13:45:37 +08:00
cppla
022e5edb28 formate 2022-07-16 00:41:25 +08:00
cppla
135eb180c8 escape 2022-07-16 00:32:33 +08:00
cppla
2ead43a0d8 update 2022-07-15 23:14:53 +08:00
cppla
baae11de3b tg sms 2022-07-15 22:34:44 +08:00
cppla
2e62ffa593 link libcurl static lib 2022-07-15 21:14:39 +08:00
cppla
f70705d872 next add callback 2022-07-13 17:11:22 +08:00
cppla
7316dbdddb update 2022-07-13 16:55:38 +08:00
cppla
a1689acbf0 trigger msg for watchdog 2022-07-12 16:58:30 +08:00
cppla
e2a59a5465 eval 2022-07-12 15:59:24 +08:00
cppla
962d564c80 copy msg 2022-07-12 13:50:57 +08:00
cppla
77973a5309 0 and 1 2022-07-11 19:03:29 +08:00
cppla
a9128b137d read config. 2022-07-07 18:30:31 +08:00
cppla
0a2a007e24 support for python 3.10 2022-05-30 13:08:34 +08:00
cppla
ab5d6f7d2d Update serverstatus.js
change style for io speed
2022-05-10 15:20:58 +08:00
cppla
dae7c07772 1.0.8 build 2022-05-10 11:56:38 +08:00
cppla
14447399a0 Update README.md 2022-05-06 10:26:46 +08:00
cppla
3327c5fd25 修复增删节点月流量匹配的问题 2022-05-05 10:15:06 +08:00
cppla
217b14bf55 old style 2022-04-29 16:07:56 +08:00
cppla
d7b8f27082 IO Speed as Tips 2022-04-29 13:58:38 +08:00
cppla
8f3736e4fc 增加开源的支持
just do it .
2022-04-13 15:40:46 +08:00
windows11
22c1905acb style 2022-04-02 15:55:58 +08:00
cppla
f73970262b test 2022-04-02 15:39:41 +08:00
cppla
f45d9be9fc ui 2022-04-02 15:34:01 +08:00
cppla
cb1313f5b5 test 2022-04-02 15:25:10 +08:00
cppla
b3f7ab45c5 blue 2022-04-02 14:47:23 +08:00
cppla
54451cab85 no float for IO 2022-04-02 11:39:37 +08:00
cppla
0f26eb502a 1080p change 2022-04-01 18:55:41 +08:00
cppla
472196d1ea 1.0.8 beta 2022-04-01 17:42:43 +08:00
cppla
7e84c230e6 Merge branch 'dev' 2022-04-01 17:36:22 +08:00
cppla
76cc15e84f css 2022-04-01 17:35:58 +08:00
cppla
4c0f4b94e8 Merge pull request #149 from cppla/dev
新增实时IO统计
2022-04-01 16:29:53 +08:00
cppla
36040be11e add 2022-04-01 16:28:22 +08:00
cppla
776ad68392 io css style 2022-04-01 16:24:52 +08:00
cppla
39c92f4788 bug 2022-04-01 16:07:30 +08:00
cppla
6a15832966 bug test 2022-04-01 16:04:08 +08:00
cppla
654ecd7a3e update disk io style 2022-04-01 15:50:09 +08:00
cppla
47cf7a1818 持续获取io 2022-04-01 15:37:43 +08:00
cppla
5176cb0340 table control 2022-04-01 15:19:06 +08:00
cppla
1f47bcdb6f beta 2022-04-01 14:55:19 +08:00
cppla
7698ce0e4b 开源让编程更加美好 2022-03-31 19:53:17 +08:00
cppla
2d457c66ad push disk io for client-linux.py 2022-03-31 19:48:26 +08:00
cppla
d8c8d8fd3f add todo 2022-03-24 17:24:01 +08:00
cppla
d64beb7ba8 psutil add io total 2022-03-23 11:45:16 +08:00
cppla
30f9999fc6 rename 2022-03-23 11:13:18 +08:00
13 changed files with 41301 additions and 196 deletions

View File

@@ -3,7 +3,7 @@ FROM debian:buster as builder
MAINTAINER cppla https://cpp.la
RUN apt-get update -y && apt-get -y install gcc g++ make
RUN apt-get update -y && apt-get -y install gcc g++ make libcurl4-openssl-dev
COPY . .
@@ -20,6 +20,10 @@ RUN mkdir -p /ServerStatus/server/
COPY --from=builder server /ServerStatus/server/
COPY --from=builder web /usr/share/nginx/html/
# china time
ENV TZ=Asia/Shanghai
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
EXPOSE 80 35601
CMD nohup sh -c '/etc/init.d/nginx start && /ServerStatus/server/sergate --config=/ServerStatus/server/config.json --web-dir=/usr/share/nginx/html'

102
README.md
View File

@@ -3,14 +3,14 @@
* ServerStatus中文版是一个酷炫高逼格的云探针、云监控、服务器云监控、多服务器探针~。
* 在线演示https://tz.cloudcpp.com
[![Python Support](https://img.shields.io/badge/python-2.7%2B%20-blue.svg)](https://github.com/cppla/ServerStatus)
[![Python Support](https://img.shields.io/badge/python-3.6%2B%20-blue.svg)](https://github.com/cppla/ServerStatus)
[![C++ Compiler](http://img.shields.io/badge/C++-GNU-blue.svg?style=flat&logo=cplusplus)](https://github.com/cppla/ServerStatus)
[![License](https://img.shields.io/badge/license-MIT-4EB1BA.svg?style=flat-square)](https://github.com/cppla/ServerStatus)
[![Version](https://img.shields.io/badge/Version-Beta%201.0.7-red)](https://github.com/cppla/ServerStatus)
[![Version](https://img.shields.io/badge/Version-Build%201.0.9-red)](https://github.com/cppla/ServerStatus)
![Latest Version](http://dl.cpp.la/Archive/serverstatus-1.0.2.png)
![Latest Version](http://dl.cpp.la/Archive/serverstatus_1.0.9.png)
`curl -sSL https://get.docker.com/ | sh && apt -y install docker-compose`
`Watchdog🐶已经加入触发式告警。 interval只是为了防止频繁收到报警信息造成骚扰并不是探测间隔。`
# 目录介绍:
@@ -26,15 +26,12 @@
【服务端】:
```bash
`OneTouch`:
`Docker`:
wget --no-check-certificate -qO ~/serverstatus-config.json https://raw.githubusercontent.com/cppla/ServerStatus/master/server/config.json && mkdir ~/serverstatus-monthtraffic
docker run -d --restart=always --name=serverstatus -v ~/serverstatus-config.json:/ServerStatus/server/config.json -v ~/serverstatus-monthtraffic:/usr/share/nginx/html/json -p 80:80 -p 35601:35601 cppla/serverstatus:latest
`ServerStatus`: docker-compose up -d
`ServerStatus with tgbot`: TG_CHAT_ID=你的电报ID TG_BOT_TOKEN=你的电报密钥 docker-compose -f docker-compose-telegram.yml up -d
`Docker-compose`: docker-compose up -d
```
【客户端】:
@@ -47,25 +44,32 @@ wget --no-check-certificate -qO client-linux.py 'https://raw.githubusercontent.c
# 手动安装教程:
【克隆代码】:
```
git clone https://github.com/cppla/ServerStatus.git
```
**【服务端配置】**
【服务端配置】:
一、生成服务端程序
#### 一、生成服务端程序
```
cd ServerStatus/server
make
`Debian/Ubuntu`: apt-get -y install gcc g++ make libcurl4-openssl-dev
`Centos/Redhat`: yum -y install gcc gcc-c++ make libcurl-devel
cd ServerStatus/server && make
./sergate
```
如果没错误提示OKctrl+c关闭如果有错误提示检查35601端口是否被占用
二、修改配置文件
修改config.json文件注意username, password的值需要和客户端对应一致    
#### 二、修改配置文件
```diff
! watchdog rule 可以为任何已知字段的表达式。
! watchdog interval 最小通知间隔。
! watchdog callback 可自定义为Get方法的URL告警内容将拼接其后并发起回调。
! watchdog callback Telegramhttps://api.telegram.org/bot你自己的密钥/sendMessage?parse_mode=HTML&disable_web_page_preview=true&chat_id=你自己的标识&text=
! watchdog callback Server酱: https://sctapi.ftqq.com/你自己的密钥.send?title=ServerStatus&desp=
! watchdog callback PushDeer: https://api2.pushdeer.com/message/push?pushkey=你自己的密钥&text=
```
{"servers":
```
{
"servers":
[
{
"username": "s01",
@@ -76,60 +80,68 @@ make
"password": "USER_DEFAULT_PASSWORD",
"monthstart": 1
},
],
"watchdog":
[
{
"name": "服务器负载高监控",
"rule": "cpu>90&load_5>3",
"interval": 600,
"callback": "https://yourSMSurl"
},
{
"name": "你可以组合任何已知字段的表达式",
"rule": "(hdd_used/hdd_total)*100>95",
"interval": 1800,
"callback": "https://yourSMSurl"
}
]
}
```
三、拷贝ServerStatus/status到你的网站目录
#### 三、拷贝ServerStatus/status到你的网站目录
例如:
```
sudo cp -r ServerStatus/web/* /home/wwwroot/default
```
四、运行服务端:
#### 四、运行服务端:
web-dir参数为上一步设置的网站根目录务必修改成自己网站的路径
```
./sergate --config=config.json --web-dir=/home/wwwroot/default
```
【客户端配置】:
**【客户端配置】**
客户端有两个版本client-linux为普通linuxclient-psutil为跨平台版普通版不成功换成跨平台版即可。
一、client-linux版配置
#### 一、client-linux版配置
1、vim client-linux.py, 修改SERVER地址username帐号 password密码
2、python3 client-linux.py 运行即可。
二、client-psutil版配置:
#### 二、client-psutil版配置:
1、安装psutil跨平台依赖库
```
`Debian/Ubuntu`: apt -y install python3-pip && pip3 install psutil
`Centos/Redhat`: yum -y install python3-pip gcc python3-devel && pip3 install psutil
`Windows`: https://pypi.org/project/psutil/
```
2、vim client-psutil.py, 修改SERVER地址username帐号 password密码
3、python3 client-psutil.py 运行即可。
```
### for Centos
sudo yum -y install epel-release
sudo yum -y install python3-pip
sudo yum clean all
sudo yum -y install gcc
sudo yum -y install python3-devel
sudo pip3 install psutil
### for Ubuntu/Debian:
sudo apt -y install python3-pip
sudo pip3 install psutil
### for Windows:
地址https://pypi.org/project/psutil/
下载psutil for windows, 安装即可
```
打开云探针页面,就可以正常的监控。接下来把服务器和客户端脚本自行加入开机启动,或者进程守护,或以后台方式运行即可!例如: nohup python3 client-linux.py &
服务器和客户端自行加入开机启动,或进程守护,或后台方式运行。 例如: nohup python3 client-linux.py &
`extra scene (run web/ssview.py)`
![Shell View](http://dl.cpp.la/Archive/serverstatus-shell.png)
# 相关开源项目:
# Make Better
* BotoXhttps://github.com/BotoX/ServerStatus
* mojeda: https://github.com/mojeda
* mojeda's ServerStatus: https://github.com/mojeda/ServerStatus
* BlueVM's project: http://www.lowendtalk.com/discussion/comment/169690#Comment_169690
# Jetbrains
<a href="https://www.jetbrains.com/?from=ServerStatus"><img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jb_square.png" width="100px"></a>

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# coding: utf-8
# Update by : https://github.com/cppla/ServerStatus, Update date: 20211009
# 版本1.0.2, 支持Python版本2.7 to 3.9
# Update by : https://github.com/cppla/ServerStatus, Update date: 20220530
# 版本1.0.3, 支持Python版本2.7 to 3.10
# 支持操作系统: Linux, OSX, FreeBSD, OpenBSD and NetBSD, both 32-bit and 64-bit architectures
# 说明: 默认情况下修改server和user就可以了。丢包率监测方向可以自定义例如CU = "www.facebook.com"。
@@ -9,16 +9,15 @@ SERVER = "127.0.0.1"
USER = "s01"
PORT = 35601
PASSWORD = "USER_DEFAULT_PASSWORD"
INTERVAL = 1
PROBEPORT = 80
PROBE_PROTOCOL_PREFER = "ipv4" # ipv4, ipv6
PING_PACKET_HISTORY_LEN = 100
PORT = 35601
CU = "cu.tz.cloudcpp.com"
CT = "ct.tz.cloudcpp.com"
CM = "cm.tz.cloudcpp.com"
PROBEPORT = 80
PROBE_PROTOCOL_PREFER = "ipv4" # ipv4, ipv6
PING_PACKET_HISTORY_LEN = 100
INTERVAL = 1
import socket
import time
@@ -147,6 +146,10 @@ netSpeed = {
'avgrx': 0,
'avgtx': 0
}
diskIO = {
'read': 0,
'write': 0
}
def _ping_thread(host, mark, port):
lostPacket = 0
@@ -210,7 +213,72 @@ def _net_speed():
netSpeed["avgtx"] = avgtx
time.sleep(INTERVAL)
def get_realtime_date():
def _disk_io():
'''
good luck for opensource! by: cpp.la
磁盘IO因为IOPS原因SSD和HDD、包括RAID卡ZFS等阵列技术。IO对性能的影响还需要结合自身服务器情况来判断。
比如我这里是机械硬盘,大量做随机小文件读写,那么很低的读写也就能造成硬盘长时间的等待。
如果这里做连续性IO那么普通机械硬盘写入到100Mb/s那么也能造成硬盘长时间的等待。
磁盘读写有误差4k8k https://stackoverflow.com/questions/34413926/psutil-vs-dd-monitoring-disk-i-o
:return:
'''
while True:
# pre pid snapshot
snapshot_first = {}
# next pid snapshot
snapshot_second = {}
# read count snapshot
snapshot_read = 0
# write count snapshot
snapshot_write = 0
# process snapshot
pid_snapshot = [str(i) for i in os.listdir("/proc") if i.isdigit() is True]
for pid in pid_snapshot:
try:
with open("/proc/{}/io".format(pid)) as f:
pid_io = {}
for line in f.readlines():
if "read_bytes" in line:
pid_io["read"] = int(line.split("read_bytes:")[-1].strip())
elif "write_bytes" in line and "cancelled_write_bytes" not in line:
pid_io["write"] = int(line.split("write_bytes:")[-1].strip())
pid_io["name"] = open("/proc/{}/comm".format(pid), "r").read().strip()
snapshot_first[pid] = pid_io
except:
if pid in snapshot_first:
snapshot_first.pop(pid)
time.sleep(INTERVAL)
for pid in pid_snapshot:
try:
with open("/proc/{}/io".format(pid)) as f:
pid_io = {}
for line in f.readlines():
if "read_bytes" in line:
pid_io["read"] = int(line.split("read_bytes:")[-1].strip())
elif "write_bytes" in line and "cancelled_write_bytes" not in line:
pid_io["write"] = int(line.split("write_bytes:")[-1].strip())
pid_io["name"] = open("/proc/{}/comm".format(pid), "r").read().strip()
snapshot_second[pid] = pid_io
except:
if pid in snapshot_first:
snapshot_first.pop(pid)
if pid in snapshot_second:
snapshot_second.pop(pid)
for k, v in snapshot_first.items():
if snapshot_first[k]["name"] == snapshot_second[k]["name"] and snapshot_first[k]["name"] != "bash":
snapshot_read += (snapshot_second[k]["read"] - snapshot_first[k]["read"])
snapshot_write += (snapshot_second[k]["write"] - snapshot_first[k]["write"])
diskIO["read"] = snapshot_read
diskIO["write"] = snapshot_write
def get_realtime_data():
'''
real time get system data
:return:
'''
t1 = threading.Thread(
target=_ping_thread,
kwargs={
@@ -238,14 +306,12 @@ def get_realtime_date():
t4 = threading.Thread(
target=_net_speed,
)
t1.setDaemon(True)
t2.setDaemon(True)
t3.setDaemon(True)
t4.setDaemon(True)
t1.start()
t2.start()
t3.start()
t4.start()
t5 = threading.Thread(
target=_disk_io,
)
for ti in [t1, t2, t3, t4, t5]:
ti.daemon = True
ti.start()
def byte_str(object):
'''
@@ -273,7 +339,7 @@ if __name__ == '__main__':
elif 'INTERVAL' in argc:
INTERVAL = int(argc.split('INTERVAL=')[-1])
socket.setdefaulttimeout(30)
get_realtime_date()
get_realtime_data()
while True:
try:
print("Connecting...")
@@ -343,6 +409,8 @@ if __name__ == '__main__':
array['time_189'] = pingTime.get('189')
array['time_10086'] = pingTime.get('10086')
array['tcp'], array['udp'], array['process'], array['thread'] = tupd()
array['io_read'] = diskIO.get("read")
array['io_write'] = diskIO.get("write")
s.send(byte_str("update " + json.dumps(array) + "\n"))
except KeyboardInterrupt:

View File

@@ -1,8 +1,8 @@
#!/usr/bin/env python3
# coding: utf-8
# Update by : https://github.com/cppla/ServerStatus, Update date: 20211009
# Update by : https://github.com/cppla/ServerStatus, Update date: 20220530
# 依赖于psutil跨平台库
# 版本1.0.2, 支持Python版本2.7 to 3.9
# 版本1.0.3, 支持Python版本2.7 to 3.10
# 支持操作系统: Linux, Windows, OSX, Sun Solaris, FreeBSD, OpenBSD and NetBSD, both 32-bit and 64-bit architectures
# 说明: 默认情况下修改server和user就可以了。丢包率监测方向可以自定义例如CU = "www.facebook.com"。
@@ -10,16 +10,15 @@ SERVER = "127.0.0.1"
USER = "s01"
PORT = 35601
PASSWORD = "USER_DEFAULT_PASSWORD"
INTERVAL = 1
PROBEPORT = 80
PROBE_PROTOCOL_PREFER = "ipv4" # ipv4, ipv6
PING_PACKET_HISTORY_LEN = 100
PORT = 35601
CU = "cu.tz.cloudcpp.com"
CT = "ct.tz.cloudcpp.com"
CM = "cm.tz.cloudcpp.com"
PROBEPORT = 80
PROBE_PROTOCOL_PREFER = "ipv4" # ipv4, ipv6
PING_PACKET_HISTORY_LEN = 100
INTERVAL = 1
import socket
import time
@@ -131,6 +130,10 @@ netSpeed = {
'avgrx': 0,
'avgtx': 0
}
diskIO = {
'read': 0,
'write': 0
}
def _ping_thread(host, mark, port):
lostPacket = 0
@@ -190,7 +193,62 @@ def _net_speed():
netSpeed["avgtx"] = avgtx
time.sleep(INTERVAL)
def get_realtime_date():
def _disk_io():
"""
the code is by: https://github.com/giampaolo/psutil/blob/master/scripts/iotop.py
good luck for opensource! modify: cpp.la
Calculate IO usage by comparing IO statics before and
after the interval.
Return a tuple including all currently running processes
sorted by IO activity and total disks I/O activity.
磁盘IO因为IOPS原因SSD和HDD、包括RAID卡ZFS等。IO对性能的影响还需要结合自身服务器情况来判断。
比如我这里是机械硬盘,大量做随机小文件读写,那么很低的读写也就能造成硬盘长时间的等待。
如果这里做连续性IO那么普通机械硬盘写入到100Mb/s那么也能造成硬盘长时间的等待。
磁盘读写有误差4k8k https://stackoverflow.com/questions/34413926/psutil-vs-dd-monitoring-disk-i-o
"""
while True:
# first get a list of all processes and disk io counters
procs = [p for p in psutil.process_iter()]
for p in procs[:]:
try:
p._before = p.io_counters()
except psutil.Error:
procs.remove(p)
continue
disks_before = psutil.disk_io_counters()
# sleep some time, only when INTERVAL==1 , io read/write per_sec.
# when INTERVAL > 1, io read/write per_INTERVAL
time.sleep(INTERVAL)
# then retrieve the same info again
for p in procs[:]:
with p.oneshot():
try:
p._after = p.io_counters()
p._cmdline = ' '.join(p.cmdline())
if not p._cmdline:
p._cmdline = p.name()
p._username = p.username()
except (psutil.NoSuchProcess, psutil.ZombieProcess):
procs.remove(p)
disks_after = psutil.disk_io_counters()
# finally calculate results by comparing data before and
# after the interval
for p in procs:
p._read_per_sec = p._after.read_bytes - p._before.read_bytes
p._write_per_sec = p._after.write_bytes - p._before.write_bytes
p._total = p._read_per_sec + p._write_per_sec
diskIO["read"] = disks_after.read_bytes - disks_before.read_bytes
diskIO["write"] = disks_after.write_bytes - disks_before.write_bytes
def get_realtime_data():
'''
real time get system data
:return:
'''
t1 = threading.Thread(
target=_ping_thread,
kwargs={
@@ -218,14 +276,12 @@ def get_realtime_date():
t4 = threading.Thread(
target=_net_speed,
)
t1.setDaemon(True)
t2.setDaemon(True)
t3.setDaemon(True)
t4.setDaemon(True)
t1.start()
t2.start()
t3.start()
t4.start()
t5 = threading.Thread(
target=_disk_io,
)
for ti in [t1, t2, t3, t4, t5]:
ti.daemon = True
ti.start()
def byte_str(object):
'''
@@ -253,7 +309,7 @@ if __name__ == '__main__':
elif 'INTERVAL' in argc:
INTERVAL = int(argc.split('INTERVAL=')[-1])
socket.setdefaulttimeout(30)
get_realtime_date()
get_realtime_data()
while 1:
try:
print("Connecting...")
@@ -324,6 +380,8 @@ if __name__ == '__main__':
array['time_189'] = pingTime.get('189')
array['time_10086'] = pingTime.get('10086')
array['tcp'], array['udp'], array['process'], array['thread'] = tupd()
array['io_read'] = diskIO.get("read")
array['io_write'] = diskIO.get("write")
s.send(byte_str("update " + json.dumps(array) + "\n"))
except KeyboardInterrupt:

View File

@@ -6,7 +6,7 @@ CFLAGS = -Wall -O2
#CXX = clang++
CXX = g++
CXXFLAGS = -Wall -O2
CXXFLAGS = -Wall -O2 -std=c++11
ODIR = obj
SDIR = src
@@ -26,7 +26,7 @@ $(ODIR)/%.o: $(SDIR)/%.cpp
$(CXX) -c $(INC) $(CXXFLAGS) $< -o $@
$(OUT): $(OBJS)
$(CXX) $(LIBS) $^ -o $(OUT)
$(CXX) $(LIBS) $^ -o $(OUT) -lcurl
.PHONY: clean

View File

@@ -1,5 +1,5 @@
{"servers":
[
{
"servers": [
{
"username": "s01",
"name": "node1",
@@ -37,5 +37,31 @@
"password": "USER_DEFAULT_PASSWORD",
"monthstart": 1
}
],
"watchdog": [
{
"name": "cpu high warning",
"rule": "cpu>98",
"interval": 600,
"callback": "https://yourSMSurl"
},
{
"name": "memory high warning",
"rule": "(memory_used/memory_total)*100>90",
"interval": 600,
"callback": "https://yourSMSurl"
},
{
"name": "offline warning",
"rule": "online4=0&online6=0",
"interval": 300,
"callback": "https://yourSMSurl"
},
{
"name": "you can parse an expression combining any known field",
"rule": "load_5>10",
"interval": 1800,
"callback": "https://yourSMSurl"
}
]
}

40746
server/src/exprtk.hpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -7,6 +7,8 @@
#include <json.h>
#include "server.h"
#include "main.h"
#include "exprtk.hpp"
#include "curl/curl.h"
#if defined(CONF_FAMILY_UNIX)
#include <signal.h>
@@ -96,6 +98,13 @@ void CMain::OnDelClient(int ClientNetID)
{
int ClientID = ClientNetToClient(ClientNetID);
dbg_msg("main", "OnDelClient(ncid=%d, cid=%d)", ClientNetID, ClientID);
//copy offline message for watchdog
WatchdogMessage(ClientNetID,
0, 0, 0, 0, 0, 0,
0, 0, 0,0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0,0, 0, 0,
0, 0, 0, 0);
if(ClientID >= 0 && ClientID < NET_MAX_CLIENTS)
{
Client(ClientID)->m_Connected = false;
@@ -178,6 +187,10 @@ int CMain::HandleMessage(int ClientNetID, char *pMessage)
pClient->m_Stats.m_HDDTotal = rStart["hdd_total"].u.integer;
if(rStart["hdd_used"].type)
pClient->m_Stats.m_HDDUsed = rStart["hdd_used"].u.integer;
if(rStart["io_read"].type)
pClient->m_Stats.m_IORead = rStart["io_read"].u.integer;
if(rStart["io_write"].type)
pClient->m_Stats.m_IOWrite = rStart["io_write"].u.integer;
if(rStart["cpu"].type)
pClient->m_Stats.m_CPU = rStart["cpu"].u.dbl;
if(rStart["online4"].type && pClient->m_ClientNetType == NETTYPE_IPV6)
@@ -187,22 +200,35 @@ int CMain::HandleMessage(int ClientNetID, char *pMessage)
if(rStart["custom"].type == json_string)
str_copy(pClient->m_Stats.m_aCustom, rStart["custom"].u.string.ptr, sizeof(pClient->m_Stats.m_aCustom));
//copy message for watchdog to analysis
WatchdogMessage(ClientNetID,
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15,
pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086,
pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,
pClient->m_Stats.m_tcpCount, pClient->m_Stats.m_udpCount, pClient->m_Stats.m_processCount,
pClient->m_Stats.m_threadCount, pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx,
pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT,pClient->m_Stats.m_MemTotal,
pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed,
pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_IORead,
pClient->m_Stats.m_IOWrite, pClient->m_Stats.m_CPU, pClient->m_Stats.m_Online4,
pClient->m_Stats.m_Online6);
if(m_Config.m_Verbose)
{
if(rStart["online4"].type)
dbg_msg("main", "Online4: %s\nUptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\n",
dbg_msg("main", "Online4: %s\nUptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\nIORead: %" PRId64 "\nIOWrite: %" PRId64 "\n",
rStart["online4"].u.boolean ? "true" : "false",
pClient->m_Stats.m_Uptime,
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU);
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU, pClient->m_Stats.m_IORead, pClient->m_Stats.m_IOWrite);
else if(rStart["online6"].type)
dbg_msg("main", "Online6: %s\nUptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\n",
dbg_msg("main", "Online6: %s\nUptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\nIORead: %" PRId64 "\nIOWrite: %" PRId64 "\n",
rStart["online6"].u.boolean ? "true" : "false",
pClient->m_Stats.m_Uptime,
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU);
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU, pClient->m_Stats.m_IORead, pClient->m_Stats.m_IOWrite);
else
dbg_msg("main", "Uptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\n",
dbg_msg("main", "Uptime: %" PRId64 "\nLoad_1: %f\nLoad_5: %f\nLoad_15: %f\nPing_10010: %f\nPing_189: %f\nPing_10086: %f\nTime_10010: %" PRId64 "\nTime_189: %" PRId64 "\nTime_10086: %" PRId64 "\nTcp_count: %" PRId64 "\nUdp_count: %" PRId64 "\nprocess_count: %" PRId64 "\nthread_count: %" PRId64 "\nNetworkRx: %" PRId64 "\nNetworkTx: %" PRId64 "\nNetworkIN: %" PRId64 "\nNetworkOUT: %" PRId64 "\nMemTotal: %" PRId64 "\nMemUsed: %" PRId64 "\nSwapTotal: %" PRId64 "\nSwapUsed: %" PRId64 "\nHDDTotal: %" PRId64 "\nHDDUsed: %" PRId64 "\nCPU: %f\nIORead: %" PRId64 "\nIOWrite: %" PRId64 "\n",
pClient->m_Stats.m_Uptime,
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU);
pClient->m_Stats.m_Load_1, pClient->m_Stats.m_Load_5, pClient->m_Stats.m_Load_15, pClient->m_Stats.m_ping_10010, pClient->m_Stats.m_ping_189, pClient->m_Stats.m_ping_10086, pClient->m_Stats.m_time_10010, pClient->m_Stats.m_time_189, pClient->m_Stats.m_time_10086,pClient->m_Stats.m_tcpCount,pClient->m_Stats.m_udpCount,pClient->m_Stats.m_processCount,pClient->m_Stats.m_threadCount,pClient->m_Stats.m_NetworkRx, pClient->m_Stats.m_NetworkTx, pClient->m_Stats.m_NetworkIN, pClient->m_Stats.m_NetworkOUT, pClient->m_Stats.m_MemTotal, pClient->m_Stats.m_MemUsed, pClient->m_Stats.m_SwapTotal, pClient->m_Stats.m_SwapUsed, pClient->m_Stats.m_HDDTotal, pClient->m_Stats.m_HDDUsed, pClient->m_Stats.m_CPU, pClient->m_Stats.m_IORead, pClient->m_Stats.m_IOWrite);
}
// clean up
@@ -230,6 +256,111 @@ int CMain::HandleMessage(int ClientNetID, char *pMessage)
return 1;
}
void CMain::WatchdogMessage(int ClientNetID, double load_1, double load_5, double load_15, double ping_10010, double ping_189, double ping_10086,
double time_10010, double time_189, double time_10086, double tcp, double udp, double process, double thread,
double network_rx, double network_tx, double network_in, double network_out, double memory_total, double memory_used,
double swap_total, double swap_used, double hdd_total, double hdd_used, double io_read, double io_write, double cpu,
double online4, double online6)
{
int ID = 0;
while (strcmp(Watchdog(ID)->m_aName, "NULL"))
{
typedef exprtk::symbol_table<double> symbol_table_t;
typedef exprtk::expression<double> expression_t;
typedef exprtk::parser<double> parser_t;
const std::string expression_string = Watchdog(ID)->m_aRule;
symbol_table_t symbol_table;
symbol_table.add_variable("load_1",load_1);
symbol_table.add_variable("load_5",load_5);
symbol_table.add_variable("load_15",load_15);
symbol_table.add_variable("ping_10010",ping_10010);
symbol_table.add_variable("ping_189",ping_189);
symbol_table.add_variable("ping_10086",ping_10086);
symbol_table.add_variable("time_10010",time_10010);
symbol_table.add_variable("time_189",time_189);
symbol_table.add_variable("time_10086",time_10086);
symbol_table.add_variable("tcp",tcp);
symbol_table.add_variable("udp",udp);
symbol_table.add_variable("process",process);
symbol_table.add_variable("thread",thread);
symbol_table.add_variable("network_rx",network_rx);
symbol_table.add_variable("network_tx",network_tx);
symbol_table.add_variable("network_in",network_in);
symbol_table.add_variable("network_out",network_out);
symbol_table.add_variable("memory_total",memory_total);
symbol_table.add_variable("memory_used",memory_used);
symbol_table.add_variable("swap_total",swap_total);
symbol_table.add_variable("swap_used",swap_used);
symbol_table.add_variable("hdd_total",hdd_total);
symbol_table.add_variable("hdd_used",hdd_used);
symbol_table.add_variable("io_read",io_read);
symbol_table.add_variable("io_write",io_write);
symbol_table.add_variable("cpu",cpu);
symbol_table.add_variable("online4",online4);
symbol_table.add_variable("online6",online6);
symbol_table.add_constants();
expression_t expression;
expression.register_symbol_table(symbol_table);
parser_t parser;
parser.compile(expression_string,expression);
if (expression.value() > 0)
{
int ClientID = ClientNetToClient(ClientNetID);
time_t currentStamp = (long long)time(/*ago*/0);
if ((currentStamp-Client(ClientID)->m_AlarmLastTime) > Watchdog(ID)->m_aInterval)
{
//todo 这里需要换成线程
Client(ClientID)->m_AlarmLastTime = currentStamp;
CURL *curl;
CURLcode res;
curl_global_init(CURL_GLOBAL_ALL);
curl = curl_easy_init();
if(curl) {
//standard time
char standardTime[32]= { 0 };
strftime(standardTime, sizeof(standardTime), "%Y-%m-%d %H:%M:%S",localtime(&currentStamp));
//url encode
char encodeBuffer[2048] = { 0 };
sprintf(encodeBuffer, " \n\n【告警名称】 %s \n\n【告警规则】 %s \n\n【告警时间】 %s \n\n ---------------- \n\n【用户名】 %s \n\n【节点名】 %s \n\n【虚拟化】 %s \n\n【主机名】 %s \n\n【位 置】 %s",
Watchdog(ID)->m_aName,
Watchdog(ID)->m_aRule,
standardTime,
Client(ClientID)->m_aUsername,
Client(ClientID)->m_aName,
Client(ClientID)->m_aType,
Client(ClientID)->m_aHost,
Client(ClientID)->m_aLocation);
char *encodeUrl = curl_easy_escape(curl, encodeBuffer, strlen(encodeBuffer));
//standard url
char urlBuffer[2048] = { 0 };
sprintf(urlBuffer, "%s%s",Watchdog(ID)->m_aCallback, encodeUrl);
curl_easy_setopt(curl, CURLOPT_URL, urlBuffer);
curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 3L);
curl_easy_setopt(curl, CURLOPT_TIMEOUT, 6L);
res = curl_easy_perform(curl);
if(res != CURLE_OK)
fprintf(stderr, "watchdog failed: %s\n", curl_easy_strerror(res));
if(encodeUrl)
curl_free(encodeUrl);
curl_easy_cleanup(curl);
}
curl_global_cleanup();
}
}
ID++;
}
}
void CMain::JSONUpdateThread(void *pUser)
{
CJSONUpdateThreadData *m_pJSONUpdateThreadData = (CJSONUpdateThreadData *)pUser;
@@ -274,7 +405,7 @@ void CMain::JSONUpdateThread(void *pUser)
}
str_format(pBuf, sizeof(aFileBuf) - (pBuf - aFileBuf),
"{ \"name\": \"%s\",\"type\": \"%s\",\"host\": \"%s\",\"location\": \"%s\",\"online4\": %s, \"online6\": %s, \"uptime\": \"%s\",\"load_1\": %.2f, \"load_5\": %.2f, \"load_15\": %.2f,\"ping_10010\": %.2f, \"ping_189\": %.2f, \"ping_10086\": %.2f,\"time_10010\": %" PRId64 ", \"time_189\": %" PRId64 ", \"time_10086\": %" PRId64 ", \"tcp_count\": %" PRId64 ", \"udp_count\": %" PRId64 ", \"process_count\": %" PRId64 ", \"thread_count\": %" PRId64 ", \"network_rx\": %" PRId64 ", \"network_tx\": %" PRId64 ", \"network_in\": %" PRId64 ", \"network_out\": %" PRId64 ", \"cpu\": %d, \"memory_total\": %" PRId64 ", \"memory_used\": %" PRId64 ", \"swap_total\": %" PRId64 ", \"swap_used\": %" PRId64 ", \"hdd_total\": %" PRId64 ", \"hdd_used\": %" PRId64 ", \"last_network_in\": %" PRId64 ", \"last_network_out\": %" PRId64 ",\"custom\": \"%s\" },\n",
"{ \"name\": \"%s\",\"type\": \"%s\",\"host\": \"%s\",\"location\": \"%s\",\"online4\": %s, \"online6\": %s, \"uptime\": \"%s\",\"load_1\": %.2f, \"load_5\": %.2f, \"load_15\": %.2f,\"ping_10010\": %.2f, \"ping_189\": %.2f, \"ping_10086\": %.2f,\"time_10010\": %" PRId64 ", \"time_189\": %" PRId64 ", \"time_10086\": %" PRId64 ", \"tcp_count\": %" PRId64 ", \"udp_count\": %" PRId64 ", \"process_count\": %" PRId64 ", \"thread_count\": %" PRId64 ", \"network_rx\": %" PRId64 ", \"network_tx\": %" PRId64 ", \"network_in\": %" PRId64 ", \"network_out\": %" PRId64 ", \"cpu\": %d, \"memory_total\": %" PRId64 ", \"memory_used\": %" PRId64 ", \"swap_total\": %" PRId64 ", \"swap_used\": %" PRId64 ", \"hdd_total\": %" PRId64 ", \"hdd_used\": %" PRId64 ", \"last_network_in\": %" PRId64 ", \"last_network_out\": %" PRId64 ",\"io_read\": %" PRId64 ", \"io_write\": %" PRId64 ",\"custom\": \"%s\" },\n",
pClients[i].m_aName,pClients[i].m_aType,pClients[i].m_aHost,pClients[i].m_aLocation,
pClients[i].m_Stats.m_Online4 ? "true" : "false",pClients[i].m_Stats.m_Online6 ? "true" : "false",
aUptime, pClients[i].m_Stats.m_Load_1, pClients[i].m_Stats.m_Load_5, pClients[i].m_Stats.m_Load_15, pClients[i].m_Stats.m_ping_10010, pClients[i].m_Stats.m_ping_189, pClients[i].m_Stats.m_ping_10086,
@@ -283,6 +414,7 @@ void CMain::JSONUpdateThread(void *pUser)
pClients[i].m_Stats.m_SwapTotal, pClients[i].m_Stats.m_SwapUsed, pClients[i].m_Stats.m_HDDTotal, pClients[i].m_Stats.m_HDDUsed,
pClients[i].m_Stats.m_NetworkIN == 0 || pClients[i].m_LastNetworkIN == 0 ? pClients[i].m_Stats.m_NetworkIN : pClients[i].m_LastNetworkIN,
pClients[i].m_Stats.m_NetworkOUT == 0 || pClients[i].m_LastNetworkOUT == 0 ? pClients[i].m_Stats.m_NetworkOUT : pClients[i].m_LastNetworkOUT,
pClients[i].m_Stats.m_IORead, pClients[i].m_Stats.m_IOWrite,
pClients[i].m_Stats.m_aCustom);
pBuf += strlen(pBuf);
}
@@ -406,6 +538,28 @@ int CMain::ReadConfig()
}
}
// watch dog
// support by: https://cpp.la
ID = 0;
const json_value &jStart = (*pJsonData)["watchdog"];
if(jStart.type == json_array)
{
for(unsigned i = 0; i < jStart.u.array.length; i++)
{
if(ID < 0 || ID >= NET_MAX_CLIENTS)
continue;
str_copy(Watchdog(ID)->m_aName, jStart[i]["name"].u.string.ptr, sizeof(Watchdog(ID)->m_aName));
str_copy(Watchdog(ID)->m_aRule, jStart[i]["rule"].u.string.ptr, sizeof(Watchdog(ID)->m_aRule));
Watchdog(ID)->m_aInterval = jStart[i]["interval"].u.integer;
str_copy(Watchdog(ID)->m_aCallback, jStart[i]["callback"].u.string.ptr, sizeof(Watchdog(ID)->m_aCallback));
ID++;
}
str_copy(Watchdog(ID)->m_aName, "NULL", sizeof(Watchdog(ID)->m_aName));
} else
str_copy(Watchdog(ID)->m_aName, "NULL", sizeof(Watchdog(ID)->m_aName));
// if file exists, read last network traffic recordreset m_LastNetworkIN and m_LastNetworkOUT
// support by: https://cpp.la
IOHANDLE nFile = io_open(m_Config.m_aJSONFile, IOFLAG_READ);
@@ -429,7 +583,7 @@ int CMain::ReadConfig()
json_value *pNJsonData = json_parse_ex(&nJsonSettings, pNFileData, strlen(pNFileData), aError);
if(pNJsonData)
{
const json_value &rStart = (*pNJsonData)["servers"];
const json_value &cStart = (*pNJsonData)["servers"];
if(rStart.type == json_array)
{
int ID = 0;
@@ -437,14 +591,18 @@ int CMain::ReadConfig()
{
if(ID < 0 || ID >= NET_MAX_CLIENTS)
continue;
// check name and host for match , when ServerStatus reload month traffic.
if(strcmp(Client(ID)->m_aName, rStart[i]["name"].u.string.ptr)==0 && strcmp(Client(ID)->m_aHost, rStart[i]["host"].u.string.ptr)==0)
for(unsigned j = 0; j < cStart.u.array.length; j++)
{
Client(ID)->m_LastNetworkIN = rStart[i]["last_network_in"].u.integer;
Client(ID)->m_LastNetworkOUT = rStart[i]["last_network_out"].u.integer;
if(strcmp(Client(ID)->m_aName, cStart[j]["name"].u.string.ptr)==0 &&
strcmp(Client(ID)->m_aType, cStart[j]["type"].u.string.ptr)==0 &&
strcmp(Client(ID)->m_aHost, cStart[j]["host"].u.string.ptr)==0 &&
strcmp(Client(ID)->m_aLocation, cStart[j]["location"].u.string.ptr)==0)
{
Client(ID)->m_LastNetworkIN = cStart[j]["last_network_in"].u.integer;
Client(ID)->m_LastNetworkOUT = cStart[j]["last_network_out"].u.integer;
break;
}
}
ID++;
}
}
@@ -557,3 +715,4 @@ int main(int argc, const char *argv[])
return RetVal;
}

View File

@@ -38,10 +38,11 @@ class CMain
char m_aPassword[128];
int m_aMonthStart; //track month network traffic. by: https://cpp.la
int64_t m_LastNetworkIN;
int64_t m_LastNetworkOUT;
int64 m_TimeConnected;
int64 m_LastUpdate;
int64_t m_LastNetworkIN; //restore month traffic info.
int64_t m_LastNetworkOUT; //restore month traffic info.
int64_t m_TimeConnected;
int64_t m_LastUpdate;
int64_t m_AlarmLastTime; //record last alarm time.
struct CStats
{
@@ -73,6 +74,8 @@ class CMain
int64_t m_udpCount;
int64_t m_processCount;
int64_t m_threadCount;
int64_t m_IORead;
int64_t m_IOWrite;
double m_CPU;
char m_aCustom[512];
// Options
@@ -80,6 +83,13 @@ class CMain
} m_Stats;
} m_aClients[NET_MAX_CLIENTS];
struct CWatchDog{
char m_aName[128];
char m_aRule[128];
int m_aInterval;
char m_aCallback[1024];
} m_aCWatchDogs[NET_MAX_CLIENTS];
struct CJSONUpdateThreadData
{
CClient *pClients;
@@ -97,6 +107,14 @@ public:
int ReadConfig();
int Run();
CWatchDog *Watchdog(int ruleID) { return &m_aCWatchDogs[ruleID]; }
void WatchdogMessage(int ClientNetID,
double load_1, double load_5, double load_15, double ping_10010, double ping_189, double ping_10086,
double time_10010, double time_189, double time_10086, double tcp, double udp, double process, double thread,
double network_rx, double network_tx, double network_in, double network_out,double memory_total,
double memory_used,double swap_total, double swap_used, double hdd_total,
double hdd_used, double io_read, double io_write, double cpu,double online4, double online6);
CClient *Client(int ClientID) { return &m_aClients[ClientID]; }
CClient *ClientNet(int ClientNetID);
const CConfig *Config() const { return &m_Config; }

View File

@@ -22,49 +22,49 @@ tr.odd.expandRow > :hover { background: #212e36 !important; }
#cpu, #ram, #hdd { min-width: 45px; max-width: 90px; }
#ping { max-width: 95px; }
@media only screen and (max-width: 1080px) {
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
@media only screen and (max-width: 1200px) {
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 720px) {
body { font-size: 10px; }
.content { padding: 0; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
body { font-size: 10px; }
.content { padding: 0; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 620px) {
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 533px) {
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 450px) {
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#name, tr td:nth-child(3) { min-width: 55px; max-width: 85px; text-overflow: ellipsis; white-space: nowrap; overflow: hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#cpu, #ram, #hdd { min-width: 25px; max-width: 50px; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#name, tr td:nth-child(3) { min-width: 55px; max-width: 85px; text-overflow: ellipsis; white-space: nowrap; overflow: hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#cpu, #ram, #hdd { min-width: 25px; max-width: 50px; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}

View File

@@ -19,49 +19,49 @@ tr.odd.expandRow > :hover { background: #FFF !important; }
#cpu, #ram, #hdd { min-width: 45px; max-width: 90px; }
#ping { max-width: 95px; }
@media only screen and (max-width: 1080px) {
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
@media only screen and (max-width: 1200px) {
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 720px) {
body { font-size: 10px; }
.content { padding: 0; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
body { font-size: 10px; }
.content { padding: 0; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 620px) {
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 533px) {
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}
@media only screen and (max-width: 450px) {
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#name, tr td:nth-child(3) { min-width: 55px; max-width: 85px; text-overflow: ellipsis; white-space: nowrap; overflow: hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#cpu, #ram, #hdd { min-width: 25px; max-width: 50px; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
body { font-size: 10px; }
.content { padding: 0; }
#month_traffic, tr td:nth-child(2) { display:none; visibility:hidden; }
#name, tr td:nth-child(3) { min-width: 55px; max-width: 85px; text-overflow: ellipsis; white-space: nowrap; overflow: hidden; }
#type, tr td:nth-child(4) { display:none; visibility:hidden; }
#location, tr td:nth-child(5) { display:none; visibility:hidden; }
#uptime, tr td:nth-child(6) { display:none; visibility:hidden; }
#traffic, tr td:nth-child(9) { display:none; visibility:hidden; }
#cpu, #ram, #hdd { min-width: 25px; max-width: 50px; }
#ping, tr td:nth-child(13) { display:none; visibility:hidden; }
}

View File

@@ -75,19 +75,19 @@
<table class="table table-striped table-condensed table-hover">
<thead>
<tr>
<th id="online_status" style="text-align: center;">协议</th>
<th id="month_traffic" style="text-align: center;">月流量 ↓|↑</th>
<th id="name">节点</th>
<th id="type">虚拟化</th>
<th id="location">位置</th>
<th id="uptime">在线</th>
<th id="online_status" style="text-align: center;">🔗协议</th>
<th id="month_traffic" style="text-align: center;">📊月流量↓|↑</th>
<th id="name">📌节点</th>
<th id="type">🗂️虚拟化</th>
<th id="location">🌍位置</th>
<th id="uptime">⏱️在线</th>
<th id="load">负载</th>
<th id="network">网络 ↓|↑</th>
<th id="traffic">总流量 ↓|↑</th>
<th id="cpu">核芯</th>
<th id="ram">内存</th>
<th id="hdd">硬盘</th>
<th id="ping">联通|电信|移动</th>
<th id="network">🚦网络↓|↑</th>
<th id="traffic">📋总流量↓|↑</th>
<th id="cpu">🎯核芯</th>
<th id="ram">⚡️内存</th>
<th id="hdd">💾硬盘</th>
<th id="ping">🌐CU|CT|CM</th>
</tr>
</thead>
<tbody id="servers">

View File

@@ -41,11 +41,12 @@ function bytesToSize(bytes, precision, si)
} else {
return bytes + ' B';
}
if(si != 0) {
return ret;
/*if(si != 0) {
return ret + 'B';
} else {
return ret + 'iB';
}
}*/
}
function uptime() {
@@ -236,7 +237,20 @@ function uptime() {
TableRow.children["hdd"].children[0].children[0].className = "progress-bar progress-bar-success";
TableRow.children["hdd"].children[0].children[0].style.width = HDD + "%";
TableRow.children["hdd"].children[0].children[0].innerHTML = HDD + "%";
ExpandRow[0].children["expand_hdd"].innerHTML = "硬盘: " + bytesToSize(result.servers[i].hdd_used*1024*1024, 2) + " / " + bytesToSize(result.servers[i].hdd_total*1024*1024, 2);
// IO Speed for HDD.
// IO 过小的B字节单位没有意义
var io = "";
if(result.servers[i].io_read < 1024*1024)
io += parseInt(result.servers[i].io_read/1024) + "K";
else
io += parseInt(result.servers[i].io_read/1024/1024) + "M";
io += " / "
if(result.servers[i].io_write < 1024*1024)
io += parseInt(result.servers[i].io_write/1024) + "K";
else
io += parseInt(result.servers[i].io_write/1024/1024) + "M";
// Expand for HDD.
ExpandRow[0].children["expand_hdd"].innerHTML = "硬盘|读写: " + bytesToSize(result.servers[i].hdd_used*1024*1024, 2) + " / " + bytesToSize(result.servers[i].hdd_total*1024*1024, 2) + " | " + io;
// delay time