Compare commits

...

507 Commits

Author SHA1 Message Date
164cd8cb3b Automatic commit of package [zabbix-agent-addons] release [0.2.172-1].
Created by command:

/usr/bin/tito tag
2023-12-21 15:58:47 +01:00
ff4a05ae59 Add Zabbix template for storageDevices 2023-12-21 15:58:31 +01:00
c1e46207da Read raw value for SSL_Life_Left 2023-12-21 15:00:07 +01:00
f72910cc6b Read SSD_Life_Left if available 2023-12-21 14:47:28 +01:00
62b8de1b05 /dev/bus/0 might not exist but can be queried 2023-12-21 14:23:58 +01:00
066f622888 Report more info from some NVMe 2023-12-21 14:16:31 +01:00
12c8396be3 Adjust default values for stor dev 2023-12-21 14:09:42 +01:00
f7835e1b90 Fix UserParam 2023-12-20 16:22:15 +01:00
a5ad4081f0 Add new script for smart monitoring 2023-12-20 16:19:21 +01:00
00790af9bf Automatic commit of package [zabbix-agent-addons] release [0.2.171-1].
Created by command:

/usr/bin/tito tag
2023-09-19 12:03:17 +02:00
55c878cf24 Ignore samba NT_STATUS_PROTOCOL_UNREACHABLE errors 2023-09-19 12:03:04 +02:00
2cec18b4a5 Automatic commit of package [zabbix-agent-addons] release [0.2.170-1].
Created by command:

/usr/bin/tito tag
2023-06-29 14:06:00 +02:00
1ddf903568 Typo 2023-06-29 14:04:38 +02:00
e5047e7b92 Fix + discover NMVe 2023-06-29 14:02:30 +02:00
34d19c8622 Automatic commit of package [zabbix-agent-addons] release [0.2.169-1].
Created by command:

/usr/bin/tito tag
2023-06-29 12:12:23 +02:00
2c29e4ecaa Better sensor output parsing 2023-06-29 12:12:07 +02:00
995bd50151 Automatic commit of package [zabbix-agent-addons] release [0.2.168-1].
Created by command:

/usr/bin/tito tag
2023-06-29 11:15:27 +02:00
7b42d3f2a9 Drop ipmitool stderr and simplify output parsing 2023-06-29 11:15:10 +02:00
313e022894 Automatic commit of package [zabbix-agent-addons] release [0.2.167-1].
Created by command:

/usr/bin/tito tag
2023-06-29 09:31:14 +02:00
3159f43ced Fix fan detection on some BMC boards 2023-06-29 09:29:15 +02:00
b0958e6fba Update ZFS template 2022-05-06 17:05:06 +02:00
ed5c8b745f Automatic commit of package [zabbix-agent-addons] release [0.2.166-1].
Created by command:

/usr/bin/tito tag
2022-03-26 18:30:53 +01:00
5acc49a55c Fix counting samba computers auth tries 2022-03-26 18:30:16 +01:00
7cfbd64eb4 Automatic commit of package [zabbix-agent-addons] release [0.2.165-1].
Created by command:

/usr/bin/tito tag
2022-03-21 11:30:00 +01:00
221a0afe5d last_seen might not be defined in check_unifi 2022-03-21 11:29:41 +01:00
e0be2c506f Automatic commit of package [zabbix-agent-addons] release [0.2.164-1].
Created by command:

/usr/bin/tito tag
2022-03-21 10:49:24 +01:00
4e50e2b2d4 Use JSON bool in unifi scripts 2022-03-21 10:48:50 +01:00
f46a580f90 Automatic commit of package [zabbix-agent-addons] release [0.2.163-1].
Created by command:

/usr/bin/tito tag
2022-01-24 11:01:15 +01:00
e0d2825f21 Fix check_zimbra_sudo 2022-01-24 11:01:03 +01:00
56fcaf7f6c Automatic commit of package [zabbix-agent-addons] release [0.2.162-1].
Created by command:

/usr/bin/tito tag
2022-01-21 13:46:24 +01:00
5afabbfaad Add alloc_ct for LVM VG when missing 2022-01-21 13:46:06 +01:00
849170c10b Automatic commit of package [zabbix-agent-addons] release [0.2.161-1].
Created by command:

/usr/bin/tito tag
2022-01-21 12:19:45 +01:00
31ccccd2e0 Fix Zimbra discovery and check scripts 2022-01-21 12:19:29 +01:00
1950abbc0f Automatic commit of package [zabbix-agent-addons] release [0.2.160-1].
Created by command:

/usr/bin/tito tag
2022-01-20 17:28:52 +01:00
51fa9f602a Add a {#DOCKER_CONTAINER_STATUS} LLD macro 2022-01-20 17:28:37 +01:00
607204a150 Automatic commit of package [zabbix-agent-addons] release [0.2.159-1].
Created by command:

/usr/bin/tito tag
2022-01-20 14:55:55 +01:00
5e7e22d311 Update Docker template 2022-01-20 14:55:34 +01:00
80bfaee714 Don't query state in docker discovery
As it's not supported on some older docker, and not used anyway
2022-01-20 14:54:19 +01:00
8140dcb7cf Automatic commit of package [zabbix-agent-addons] release [0.2.158-1].
Created by command:

/usr/bin/tito tag
2022-01-13 13:59:21 +01:00
820d12a682 Small fixes for Docker check script and template 2022-01-13 13:58:58 +01:00
2d88e6fe34 Automatic commit of package [zabbix-agent-addons] release [0.2.157-1].
Created by command:

/usr/bin/tito tag
2022-01-13 09:44:00 +01:00
cd48caa24c Enhacements in the Docker template 2022-01-13 09:43:45 +01:00
7c42540a66 Automatic commit of package [zabbix-agent-addons] release [0.2.156-1].
Created by command:

/usr/bin/tito tag
2022-01-12 16:24:07 +01:00
a24f4adb81 Add Docker scripts 2022-01-12 16:23:50 +01:00
a73021a2d5 Automatic commit of package [zabbix-agent-addons] release [0.2.155-1].
Created by command:

/usr/bin/tito tag
2022-01-11 16:29:51 +01:00
8df27d15d7 Automatic commit of package [zabbix-agent-addons] release [0.2.154-1].
Created by command:

/usr/bin/tito tag
2021-12-16 16:47:35 +01:00
cfdd92b9c6 Fix zpool iostat as /proc/spl/kstat/zfs/pool/io doesn't exist anymore 2021-12-16 16:46:43 +01:00
Daniel Berteaud
72682f9bad Add nodata triggers for Elasticsearch 2021-10-19 14:31:56 +02:00
Daniel Berteaud
1272a06771 Include Zabbix template for Elasticsearch 2021-10-19 10:27:16 +02:00
Daniel Berteaud
c17260c519 Automatic commit of package [zabbix-agent-addons] release [0.2.153-1].
Created by command:

/usr/bin/tito tag
2021-10-19 10:19:04 +02:00
Daniel Berteaud
5387ae53b8 Tweak elasticsearch monitoring scripts 2021-10-19 10:18:44 +02:00
Daniel Berteaud
415c608252 Automatic commit of package [zabbix-agent-addons] release [0.2.152-1].
Created by command:

/usr/bin/tito tag
2021-10-18 12:18:55 +02:00
Daniel Berteaud
9ad6d8b1b3 Small fixes in elasticsearch scripts 2021-10-18 12:18:37 +02:00
Daniel Berteaud
6dc46b819f Automatic commit of package [zabbix-agent-addons] release [0.2.151-1].
Created by command:

/usr/bin/tito tag
2021-10-18 11:35:14 +02:00
Daniel Berteaud
74b3ba5928 Add Elasticsearch monitoring scripts 2021-10-18 11:34:53 +02:00
Daniel Berteaud
ad9b9b569a Updates and fixes in Zabbix templates 2021-09-22 18:06:08 +02:00
Daniel Berteaud
0ac983e410 Automatic commit of package [zabbix-agent-addons] release [0.2.150-1].
Created by command:

/usr/bin/tito tag
2021-07-16 10:15:36 +02:00
Daniel Berteaud
f0e704ce95 Do not count Unconfigured(good) drives as an error 2021-07-16 10:15:01 +02:00
Daniel Berteaud
1f7b7e86d8 Remove duplicated templates 2021-06-24 08:35:37 +02:00
Daniel Berteaud
9eb1706e7d Typo in template filename 2021-06-24 08:30:39 +02:00
Daniel Berteaud
02cec2e7e8 Update and provide more templates 2021-06-19 14:22:10 +02:00
Daniel Berteaud
6b2c293acd Update and add more Zabbix templates 2021-06-19 14:03:49 +02:00
Daniel Berteaud
360895affb Remove health and capacity sanoid checks from discovery
This is handled by check_zfs anyway
2021-06-09 18:22:32 +02:00
Daniel Berteaud
2c0ea77e90 Automatic commit of package [zabbix-agent-addons] release [0.2.149-1].
Created by command:

/usr/bin/tito tag
2021-05-27 19:44:45 +02:00
Daniel Berteaud
ca549543c8 Support Debian lib path for BackupPC 2021-05-27 19:44:26 +02:00
Daniel Berteaud
4a97f44925 Automatic commit of package [zabbix-agent-addons] release [0.2.148-1].
Created by command:

/usr/bin/tito tag
2021-02-17 10:31:59 +01:00
Daniel Berteaud
c1f20d9388 Fix zfs pool monitoring when a pool has errors
We must drop stderr so messages like permissions error getting errors do
not pollute the JSON output
2021-02-17 10:30:59 +01:00
Daniel Berteaud
0a6c9cdd62 Alert only if not samba monitoring for 25min (instead of 15) 2021-01-14 19:29:52 +01:00
Daniel Berteaud
b99e6ca69d Automatic commit of package [zabbix-agent-addons] release [0.2.147-1].
Created by command:

/usr/bin/tito tag
2021-01-14 19:14:10 +01:00
Daniel Berteaud
5ff7e2214c [check_samba_dc_sudo] Fix typo with GPO listing 2021-01-14 19:13:53 +01:00
Daniel Berteaud
91ee1f7648 Automatic commit of package [zabbix-agent-addons] release [0.2.146-1].
Created by command:

/usr/bin/tito tag
2021-01-14 18:55:49 +01:00
Daniel Berteaud
b780d464dd [check_samba_sudo] Update default audit log file path, and drop errors from samba-tool 2021-01-14 18:55:16 +01:00
Daniel Berteaud
0a32289a83 Automatic commit of package [zabbix-agent-addons] release [0.2.145-1].
Created by command:

/usr/bin/tito tag
2021-01-14 16:00:17 +01:00
Daniel Berteaud
a0b003c219 Add perl(File::ReadBackwards) dependency 2021-01-14 15:59:53 +01:00
Daniel Berteaud
b0804d6963 Automatic commit of package [zabbix-agent-addons] release [0.2.144-1].
Created by command:

/usr/bin/tito tag
2021-01-14 15:42:02 +01:00
Daniel Berteaud
acd74aa1db Optimize samba audit_auth log parsing by reading from the tail of the file
GLPI #47700
2021-01-14 15:40:22 +01:00
Daniel Berteaud
d0ee0ee54c Automatic commit of package [zabbix-agent-addons] release [0.2.143-1].
Created by command:

/usr/bin/tito tag
2021-01-13 19:31:38 +01:00
Daniel Berteaud
f40e85abee Update BackupPC template 2021-01-13 19:31:27 +01:00
Daniel Berteaud
13e4669fe7 Automatic commit of package [zabbix-agent-addons] release [0.2.142-1].
Created by command:

/usr/bin/tito tag
2021-01-13 17:48:56 +01:00
Daniel Berteaud
bf190465bf Modernize lvm monitoring scripts
support getting all the value at once in a JSON structure, support VG monitoring, and various other cleanups

GLPI #47654
2021-01-13 17:48:01 +01:00
Daniel Berteaud
b95cca848c Don't catch stderr for vgdisplay commands
Prevent useless messages from poluting the output
2021-01-13 17:41:55 +01:00
Daniel Berteaud
6fbfb70ae0 Automatic commit of package [zabbix-agent-addons] release [0.2.141-1].
Created by command:

/usr/bin/tito tag
2021-01-12 19:24:45 +01:00
Daniel Berteaud
33c03ebe45 Small fixes in check_samba_dc (skip unparsable logs, and handle message with NT_STATUS_NO_SUCH_USER 2021-01-12 19:23:28 +01:00
Daniel Berteaud
67a3da6404 Automatic commit of package [zabbix-agent-addons] release [0.2.140-1].
Created by command:

/usr/bin/tito tag
2021-01-11 22:54:17 +01:00
Daniel Berteaud
628f2a5e2e Add general stats to BackupPC monitoring script 2021-01-11 22:53:49 +01:00
Daniel Berteaud
eaad6bd516 Automatic commit of package [zabbix-agent-addons] release [0.2.139-1].
Created by command:

/usr/bin/tito tag
2021-01-11 16:09:34 +01:00
Daniel Berteaud
1c2c4e8377 Add OU discovery to samba monitoring
GLPI #47623
2021-01-11 16:09:09 +01:00
Daniel Berteaud
4dedf3dc30 Automatic commit of package [zabbix-agent-addons] release [0.2.138-1].
Created by command:

/usr/bin/tito tag
2021-01-11 09:38:56 +01:00
Daniel Berteaud
089b418416 Add missing Samba application name for aggregated items 2021-01-11 09:38:36 +01:00
Daniel Berteaud
19ec29e577 Minor fixes for samba script and template 2021-01-11 09:28:29 +01:00
Daniel Berteaud
2c0eb2e854 Automatic commit of package [zabbix-agent-addons] release [0.2.137-1].
Created by command:

/usr/bin/tito tag
2021-01-09 17:21:14 +01:00
Daniel Berteaud
7563ab8655 Add scripts and template to monitor Samba 4 DC
GLPI #47603
2021-01-09 17:20:43 +01:00
Daniel Berteaud
174f9a5cf5 Automatic commit of package [zabbix-agent-addons] release [0.2.136-1].
Created by command:

/usr/bin/tito tag
2021-01-08 13:05:06 +01:00
Daniel Berteaud
bc0edccfa0 Add guest counter for PVE cluster and node
Also update the Zabbix template to handle those new counters

GLPI #47604
2021-01-08 13:04:22 +01:00
Daniel Berteaud
43914cd61d Automatic commit of package [zabbix-agent-addons] release [0.2.135-1].
Created by command:

/usr/bin/tito tag
2020-12-17 16:30:37 +01:00
Daniel Berteaud
ebfff4b43d Update Template_App_MySQL 2020-12-17 16:30:22 +01:00
Daniel Berteaud
8d8c90e31a Update Template_App_ZFS 2020-12-17 16:28:57 +01:00
Daniel Berteaud
831b6d9be1 Automatic commit of package [zabbix-agent-addons] release [0.2.134-1].
Created by command:

/usr/bin/tito tag
2020-12-01 11:39:54 +01:00
Daniel Berteaud
10660aedf6 Possibility to check certificate
with available --no-cert-check option to disable it. Auto disable if connecting on localhost
2020-12-01 11:38:58 +01:00
Daniel Berteaud
b2076e496f Automatic commit of package [zabbix-agent-addons] release [0.2.133-1].
Created by command:

/usr/bin/tito tag
2020-11-07 21:09:19 +01:00
Daniel Berteaud
dd8fe69af8 Add perl in BuildReq for el8 2020-11-07 19:36:12 +01:00
Daniel Berteaud
be7b717c12 Automatic commit of package [zabbix-agent-addons] release [0.2.132-1].
Created by command:

/usr/bin/tito tag
2020-10-26 19:15:37 +01:00
Daniel Berteaud
ecb831f05a Run upsc commands with 2>/de/null
With default conf, it'll print "Init SSL without certificate database" which we're not interested in in this context (and would break Zabbix item)
2020-10-26 19:12:49 +01:00
Daniel Berteaud
cf1dbfbba0 IPMI sensors can have / and - in their name 2020-10-26 18:22:54 +01:00
Daniel Berteaud
e8f9e75886 Automatic commit of package [zabbix-agent-addons] release [0.2.131-1].
Created by command:

/usr/bin/tito tag
2020-10-22 20:42:53 +02:00
Daniel Berteaud
cd790587e6 Don't return garbage in mpath discovery if command failed 2020-10-22 20:42:26 +02:00
Daniel Berteaud
975e8f1ccc Automatic commit of package [zabbix-agent-addons] release [0.2.130-1].
Created by command:

/usr/bin/tito tag
2020-10-20 18:44:26 +02:00
Daniel Berteaud
e76a5f4333 Add App_Multipath template 2020-10-20 18:44:14 +02:00
Daniel Berteaud
9130c512ea Add Linux_Server template 2020-10-20 18:42:43 +02:00
Daniel Berteaud
6034c7073f Automatic commit of package [zabbix-agent-addons] release [0.2.129-1].
Created by command:

/usr/bin/tito tag
2020-10-20 18:18:41 +02:00
Daniel Berteaud
c0e1aa14ac Add scripts to discover and check multipath devices 2020-10-20 18:18:17 +02:00
Daniel Berteaud
cdbbc17ce6 Automatic commit of package [zabbix-agent-addons] release [0.2.128-1].
Created by command:

/usr/bin/tito tag
2020-09-29 15:25:09 +02:00
Daniel Berteaud
53b83f2d0e Use MAC of device if no name is defined in Unifi device discovery 2020-09-29 15:24:46 +02:00
Daniel Berteaud
78a6b1a2e4 Automatic commit of package [zabbix-agent-addons] release [0.2.127-1].
Created by command:

/usr/bin/tito tag
2020-09-23 20:01:16 +02:00
Daniel Berteaud
4777b8274f Update scripts to work with ssacli (in adition to hpacucli) 2020-09-23 20:00:46 +02:00
Daniel Berteaud
4eb376b929 Automatic commit of package [zabbix-agent-addons] release [0.2.126-1].
Created by command:

/usr/bin/tito tag
2020-09-04 12:38:21 +02:00
Daniel Berteaud
6467da92c4 Add some compatibility for older MySQL servers 2020-09-04 12:38:07 +02:00
Daniel Berteaud
31c859df0c Automatic commit of package [zabbix-agent-addons] release [0.2.125-1].
Created by command:

/usr/bin/tito tag
2020-09-01 17:52:51 +02:00
Daniel Berteaud
ddff2419a2 Allow empty --defaults opt for check_mysql_sudo 2020-09-01 17:52:24 +02:00
Daniel Berteaud
5bac8a2296 Automatic commit of package [zabbix-agent-addons] release [0.2.124-1].
Created by command:

/usr/bin/tito tag
2020-08-31 17:38:24 +02:00
Daniel Berteaud
6633658811 Update Template_App_MySQL 2020-08-31 17:38:06 +02:00
Daniel Berteaud
4b051b87e8 Automatic commit of package [zabbix-agent-addons] release [0.2.123-1].
Created by command:

/usr/bin/tito tag
2020-08-31 15:25:38 +02:00
Daniel Berteaud
0b4fa6425c check_mysql needs sudo permissions 2020-08-31 15:25:25 +02:00
Daniel Berteaud
8ac978a1d5 Automatic commit of package [zabbix-agent-addons] release [0.2.122-1].
Created by command:

/usr/bin/tito tag
2020-08-31 15:01:16 +02:00
Daniel Berteaud
f20de75884 Add MySQL monitoring script and template 2020-08-31 15:00:48 +02:00
Daniel Berteaud
eaf262cb24 Add Template_Vhost 2020-06-29 23:22:21 +02:00
Daniel Berteaud
40b92cc061 Add templates for Windows (minimal and server) 2020-06-08 09:31:45 +02:00
Daniel Berteaud
a02b5c780b Add /usr/local/BackupPC/lib as lib dir for BackupPC scripts
Needed for manual install, from source
2020-05-27 16:50:50 +02:00
Daniel Berteaud
844c2e3a3e Automatic commit of package [zabbix-agent-addons] release [0.2.121-1].
Created by command:

/usr/bin/tito tag
2020-05-20 16:29:53 +02:00
Daniel Berteaud
f6b45619ec Do not rely on distrib version to check if --output-format is needed for check_pve_sudo 2020-05-20 16:29:22 +02:00
Daniel Berteaud
24ae926c23 Automatic commit of package [zabbix-agent-addons] release [0.2.120-1].
Created by command:

/usr/bin/tito tag
2020-04-03 14:38:39 +02:00
Daniel Berteaud
b8f7e1ae6d Fix mdadm when we have spares
Spares should be counted as active devices
2020-04-03 14:38:12 +02:00
Daniel Berteaud
8a22c0bccc Automatic commit of package [zabbix-agent-addons] release [0.2.119-1].
Created by command:

/usr/bin/tito tag
2020-03-03 15:30:19 +01:00
Daniel Berteaud
ed2884b74f Better detection of smart capable drives 2020-03-03 15:29:57 +01:00
Daniel Berteaud
f50e4f34c0 Automatic commit of package [zabbix-agent-addons] release [0.2.118-1].
Created by command:

/usr/bin/tito tag
2020-03-02 16:29:51 +01:00
Daniel Berteaud
cc9a8bf320 Update Template_App_PVE_Cluster 2020-03-02 16:29:35 +01:00
Daniel Berteaud
bcf3303f6d Automatic commit of package [zabbix-agent-addons] release [0.2.117-1].
Created by command:

/usr/bin/tito tag
2020-03-02 15:25:41 +01:00
Daniel Berteaud
bbcef9d0cf Add basic SNMP templates 2020-03-02 15:25:09 +01:00
Daniel Berteaud
a8a6bf8960 Add Template_App_Unifi 2020-03-02 15:24:44 +01:00
Daniel Berteaud
a9b2143ae5 Add Template_OS_PfSense2 2020-03-02 15:09:13 +01:00
Daniel Berteaud
c70d1027da Add Template_Ping 2020-03-02 15:08:30 +01:00
Daniel Berteaud
0805013d6a Fix cache when the same resource is queried with different options 2020-03-02 15:05:47 +01:00
Daniel Berteaud
4c8ada96a8 Remove debug statement in util_populate_pve_cache 2020-03-02 15:04:46 +01:00
Daniel Berteaud
f252fabc78 Automatic commit of package [zabbix-agent-addons] release [0.2.116-1].
Created by command:

/usr/bin/tito tag
2020-03-02 12:27:32 +01:00
Daniel Berteaud
786bbf05ea Default to accept cached value up to 5 min old for check_pve_sudo 2020-03-02 12:26:49 +01:00
Daniel Berteaud
58ba6e2624 Automatic commit of package [zabbix-agent-addons] release [0.2.115-1].
Created by command:

/usr/bin/tito tag
2020-03-02 12:24:28 +01:00
Daniel Berteaud
25f165ecc9 Add a script to populate check_pve_sudo cache 2020-03-02 12:23:56 +01:00
Daniel Berteaud
c799f083f5 Enhance check_pve_sudo with a local cache support to speed up monitoring 2020-03-02 12:20:32 +01:00
Daniel Berteaud
574a37684b Automatic commit of package [zabbix-agent-addons] release [0.2.114-1].
Created by command:

/usr/bin/tito tag
2020-02-25 18:35:21 +01:00
Daniel Berteaud
3cb67b1b67 Merge branch 'master' of ssh://gitea.fws.fr:3222/fws/zabbix-agent-addons 2020-02-25 18:35:07 +01:00
Daniel Berteaud
661290e473 Automatic commit of package [zabbix-agent-addons] release [0.2.112-1].
Created by command:

/usr/bin/tito tag
2020-02-25 18:33:50 +01:00
Daniel Berteaud
30a4e4a15d Skip Core X temp sensors
No need to monitor them, as long as we already monitor the CPU temp
2020-02-25 18:33:17 +01:00
Daniel Berteaud
3af623b088 Automatic commit of package [zabbix-agent-addons] release [0.2.112-1].
Created by command:

/usr/bin/tito tag
2020-02-19 11:34:34 +01:00
Daniel Berteaud
b19b1dfa96 drop stderrr for upsc commands
On debian based system, upsc prints "Init SSL without certificate database" which prevents Zabbix from getting the value
2020-02-19 11:33:35 +01:00
Daniel Berteaud
35f1623c5f Automatic commit of package [zabbix-agent-addons] release [0.2.111-1].
Created by command:

/usr/bin/tito tag
2020-02-17 08:58:41 +01:00
Daniel Berteaud
93ba949e65 Update ZFS and BackupPC templates 2020-02-17 08:58:26 +01:00
Daniel Berteaud
4def77e07f Automatic commit of package [zabbix-agent-addons] release [0.2.110-1].
Created by command:

/usr/bin/tito tag
2020-02-10 23:11:41 +01:00
Daniel Berteaud
b91713591b Fix a typo in ZabbixSizeTooSmallFactor conf 2020-02-10 23:11:19 +01:00
Daniel Berteaud
bceb5a6a4f Automatic commit of package [zabbix-agent-addons] release [0.2.109-1].
Created by command:

/usr/bin/tito tag
2020-02-05 23:00:02 +01:00
Daniel Berteaud
31a443aa7e Don't skip local node in PVE nodes discovery 2020-02-05 22:59:24 +01:00
Daniel Berteaud
493df17e76 Automatic commit of package [zabbix-agent-addons] release [0.2.108-1].
Created by command:

/usr/bin/tito tag
2020-01-22 17:55:19 +01:00
Daniel Berteaud
eb817e2f7b Only skip RAID volumes checks when in HBA mode, not physical disks checks 2020-01-22 17:17:53 +01:00
Daniel Berteaud
a38b7ab4b2 Declar variable in the correct scope for hba mode detection 2020-01-22 16:23:26 +01:00
Daniel Berteaud
0135771c7a Handle megaraid controlers in HBO/JBOD mode (skip RAID checks) 2020-01-22 16:18:49 +01:00
Daniel Berteaud
376db5167c Use head -1 to be sure to get a single value for sensors
Some can report 2 values, and Zabbix won't be able to parse it
2020-01-22 14:27:57 +01:00
Daniel Berteaud
8180aa2897 Automatic commit of package [zabbix-agent-addons] release [0.2.107-1].
Created by command:

/usr/bin/tito tag
2020-01-16 18:34:15 +01:00
Daniel Berteaud
7955b547c5 Add Zabbix template for Squid 2020-01-16 18:34:03 +01:00
Daniel Berteaud
cf4d45c0dc Automatic commit of package [zabbix-agent-addons] release [0.2.106-1].
Created by command:

/usr/bin/tito tag
2020-01-16 18:08:28 +01:00
Daniel Berteaud
adc4b5dc23 Remove uri from UsereParam args for squid
passing an URI would require to allow special caracters in key args
2020-01-16 18:07:44 +01:00
Daniel Berteaud
657c2a0ff0 Automatic commit of package [zabbix-agent-addons] release [0.2.105-1].
Created by command:

/usr/bin/tito tag
2019-12-17 08:52:14 +01:00
Daniel Berteaud
e080780248 Fix ready sizeNew from last backup (except when link hasn't ran yet) 2019-12-17 08:51:45 +01:00
Daniel Berteaud
058aad1264 Automatic commit of package [zabbix-agent-addons] release [0.2.104-1].
Created by command:

/usr/bin/tito tag
2019-12-15 20:09:35 +01:00
Daniel Berteaud
8c25777182 Disable vfs.dev.discovery in default conf 2019-12-15 20:09:14 +01:00
Daniel Berteaud
f190314a9a Automatic commit of package [zabbix-agent-addons] release [0.2.103-1].
Created by command:

/usr/bin/tito tag
2019-12-15 19:14:33 +01:00
Daniel Berteaud
d6e5240cf7 Set min backup size to 0 in template 2019-12-15 19:14:04 +01:00
Daniel Berteaud
7a02253d56 Automatic commit of package [zabbix-agent-addons] release [0.2.102-1].
Created by command:

/usr/bin/tito tag
2019-12-15 17:20:59 +01:00
Daniel Berteaud
abfa7433e1 Fix key name for enabled value 2019-12-15 17:20:34 +01:00
Daniel Berteaud
7f06d7b2f5 Automatic commit of package [zabbix-agent-addons] release [0.2.101-1].
Created by command:

/usr/bin/tito tag
2019-12-15 17:11:16 +01:00
Daniel Berteaud
d49e7816d5 Init complete JSON objects with default values in bheck_backuppc_sudo 2019-12-15 17:10:46 +01:00
Daniel Berteaud
7ded61c8af Remove unused variables 2019-12-15 17:08:15 +01:00
Daniel Berteaud
14853e81b4 Automatic commit of package [zabbix-agent-addons] release [0.2.100-1].
Created by command:

/usr/bin/tito tag
2019-12-15 16:59:49 +01:00
Daniel Berteaud
eb0ada1a69 Only substract $new_size_of_last_full once 2019-12-15 16:59:26 +01:00
Daniel Berteaud
345cc4d6f3 Automatic commit of package [zabbix-agent-addons] release [0.2.99-1].
Created by command:

/usr/bin/tito tag
2019-12-13 23:46:22 +01:00
Daniel Berteaud
ca2f76e842 Fix when a host has a single backup with 0 new file size
Which can heppen if you deleted backups manually
2019-12-13 23:45:37 +01:00
Daniel Berteaud
a83b7efb96 Automatic commit of package [zabbix-agent-addons] release [0.2.98-1].
Created by command:

/usr/bin/tito tag
2019-12-13 23:28:25 +01:00
Daniel Berteaud
810f192184 Fix backups total size computation when there's only one full 2019-12-13 23:27:39 +01:00
Daniel Berteaud
bcf313009b Automatic commit of package [zabbix-agent-addons] release [0.2.97-1].
Created by command:

/usr/bin/tito tag
2019-12-13 18:30:34 +01:00
Daniel Berteaud
55642ebede Include Zabbix template to monitor BackupPC 2019-12-13 18:30:14 +01:00
Daniel Berteaud
5603e24330 Automatic commit of package [zabbix-agent-addons] release [0.2.96-1].
Created by command:

/usr/bin/tito tag
2019-12-13 16:43:41 +01:00
Daniel Berteaud
08f7decaf2 Enhanced stats for BackupPC's entity 2019-12-13 16:43:15 +01:00
Daniel Berteaud
ef992661a4 Automatic commit of package [zabbix-agent-addons] release [0.2.95-1].
Created by command:

/usr/bin/tito tag
2019-12-11 19:10:40 +01:00
Daniel Berteaud
ba0a868fd4 Wait for BackupPC_link to run before we take new sizes in our stat
So it won't send wrong comp_ratio of size values
2019-12-11 19:09:50 +01:00
Daniel Berteaud
847efa1243 Automatic commit of package [zabbix-agent-addons] release [0.2.94-1].
Created by command:

/usr/bin/tito tag
2019-12-11 18:42:49 +01:00
Daniel Berteaud
f51a6a930d Fix BackupPC script when BackuPPC_link is waiting for the nightly cleanup to finish 2019-12-11 18:42:20 +01:00
Daniel Berteaud
692517ba87 Automatic commit of package [zabbix-agent-addons] release [0.2.93-1].
Created by command:

/usr/bin/tito tag
2019-11-29 18:41:46 +01:00
Daniel Berteaud
096253e5dc Don't use autoloader in our forked Linux::LVM 2019-11-29 18:37:36 +01:00
Daniel Berteaud
5be6520118 Don't requires Linux::LVM anymore 2019-11-29 18:35:09 +01:00
Daniel Berteaud
1b89210fec Replace Linux::LVM occurrences with Zabbix::Agent::Addons::LVM 2019-11-29 18:34:10 +01:00
Daniel Berteaud
b108ac7a82 Bundle a fork of Linux::LVM with support for LVM thin pools
GLPI#39889
2019-11-29 18:31:45 +01:00
Daniel Berteaud
bf787e3670 Automatic commit of package [zabbix-agent-addons] release [0.2.92-1].
Created by command:

/usr/bin/tito tag
2019-11-27 09:34:25 +01:00
Daniel Berteaud
e30dcda6da Better compat with 4.4 vfs.dev.discovery (and use lsblk to get the list of dev if available) 2019-11-27 09:29:10 +01:00
Daniel Berteaud
db939d3e92 Automatic commit of package [zabbix-agent-addons] release [0.2.91-1].
Created by command:

/usr/bin/tito tag
2019-11-26 18:14:26 +01:00
Daniel Berteaud
85c1cbc105 Add DEVNAME macro for vfs.dev.discovery to ease transition to 4.4
4.4 includes vfs.dev.discovery but with different macro names (DEVNAME instead of BLOCKDEV)
2019-11-26 18:13:29 +01:00
Daniel Berteaud
772770bfc5 Minor update in ZFS template 2019-10-29 10:48:23 +01:00
Daniel Berteaud
0adedc6b69 Automatic commit of package [zabbix-agent-addons] release [0.2.90-1].
Created by command:

/usr/bin/tito tag
2019-10-20 18:17:09 +02:00
Daniel Berteaud
05624ee2e1 Fix some unifi stats for uap/usw in recent unifi versions 2019-10-20 18:16:04 +02:00
Daniel Berteaud
331090fe0c Automatic commit of package [zabbix-agent-addons] release [0.2.89-1].
Created by command:

/usr/bin/tito tag
2019-10-14 09:24:53 +02:00
Daniel Berteaud
2251656bfc Add Zabbix template for GlusterFS 2019-10-11 18:47:58 +02:00
Daniel Berteaud
82a528dfe0 Add Zabbix tempalte for DRBD 2019-10-11 18:40:58 +02:00
Daniel Berteaud
87d6541c89 Add Zabbix template for Proxmox Mail Gateway 2019-10-11 18:36:44 +02:00
Daniel Berteaud
1775ef9ae3 Add template to monitor a PVE cluster 2019-10-11 18:07:35 +02:00
Daniel Berteaud
eef0d74995 ZFS ARC low hit ratio for data and global are calculated for 1h 2019-10-11 18:05:36 +02:00
Daniel Berteaud
4584d988f3 Automatic commit of package [zabbix-agent-addons] release [0.2.88-1].
Created by command:

/usr/bin/tito tag
2019-10-11 17:12:06 +02:00
Daniel Berteaud
963427dc60 Add Zabbix template for ZFS 2019-10-11 17:11:49 +02:00
Daniel Berteaud
3cee80e348 Automatic commit of package [zabbix-agent-addons] release [0.2.87-1].
Created by command:

/usr/bin/tito tag
2019-10-11 11:41:36 +02:00
Daniel Berteaud
21fcd4949e Enhance ZFS monitoring scripts to retrieve ARC stats 2019-10-11 11:41:05 +02:00
Daniel Berteaud
020e7a2818 Send an empty data array when Zimbra is not installed 2019-10-08 11:14:44 +02:00
Daniel Berteaud
d64a4954cd Automatic commit of package [zabbix-agent-addons] release [0.2.86-1].
Created by command:

/usr/bin/tito tag
2019-10-01 08:13:24 +02:00
Daniel Berteaud
28d794aba6 Fix pve script when no net or disk stats are available 2019-10-01 08:12:52 +02:00
Daniel Berteaud
36c0a38922 Automatic commit of package [zabbix-agent-addons] release [0.2.85-1].
Created by command:

/usr/bin/tito tag
2019-09-21 16:33:46 +02:00
Daniel Berteaud
b5315cea86 Check $sanoidmon is defined before checking its value 2019-09-21 16:33:28 +02:00
Daniel Berteaud
5493504e77 Automatic commit of package [zabbix-agent-addons] release [0.2.84-1].
Created by command:

/usr/bin/tito tag
2019-09-21 16:24:13 +02:00
Daniel Berteaud
c7a2c5d8ce Fix var name in disco_zfs 2019-09-21 16:24:01 +02:00
Daniel Berteaud
2971c73063 Automatic commit of package [zabbix-agent-addons] release [0.2.83-1].
Created by command:

/usr/bin/tito tag
2019-09-21 16:10:59 +02:00
Daniel Berteaud
dc8bef75ed Better sanoïd monitoring integration 2019-09-21 16:10:40 +02:00
Daniel Berteaud
cd9ee20c02 Automatic commit of package [zabbix-agent-addons] release [0.2.82-1].
Created by command:

/usr/bin/tito tag
2019-09-20 12:54:26 +02:00
Daniel Berteaud
2a0c6b2ad9 Remove trailing x for compressratio with ZoL < 0.8 2019-09-20 12:54:01 +02:00
Daniel Berteaud
e1090b125b Automatic commit of package [zabbix-agent-addons] release [0.2.81-1].
Created by command:

/usr/bin/tito tag
2019-09-20 12:41:06 +02:00
Daniel Berteaud
ad7c36b6de Revert to suffix conversion for ZFS error count
zpool status -p is only supported since ZoL 0.8.0, so, to monitor older servers, we need to do this without -p
2019-09-20 12:39:51 +02:00
Daniel Berteaud
ea789a986c Automatic commit of package [zabbix-agent-addons] release [0.2.80-1].
Created by command:

/usr/bin/tito tag
2019-09-20 10:47:10 +02:00
Daniel Berteaud
f58df33f3b Rewrite ZFS monitoring from scratch
Now support discovery of datasets, a lot more metrics and sanoid snapshots
2019-09-20 10:39:07 +02:00
Daniel Berteaud
285d61c51c Set info in the data element for Zimbra discovery
Because when the response is empty, Zabbix will mark the item as unsupported if it can find the data element
2019-09-19 18:38:39 +02:00
Daniel Berteaud
dd98d6d2b6 Automatic commit of package [zabbix-agent-addons] release [0.2.79-1].
Created by command:

/usr/bin/tito tag --use-version 0.2.79 --use-release 1%{?dist} --no-auto-changelog
2019-09-13 15:36:42 +02:00
Daniel Berteaud
f4dda266d6 Automatic commit of package [zabbix-agent-addons] release [0.2.79-0.beta3].
Created by command:

/usr/bin/tito tag --use-version 0.2.79 --use-release 0.beta3%{?dist} --no-auto-changelog
2019-09-13 15:34:05 +02:00
Daniel Berteaud
61f56ef30e Check zmconfigd service status 2019-09-13 15:33:52 +02:00
Daniel Berteaud
c9851a2b9d Automatic commit of package [zabbix-agent-addons] release [0.2.79-0.beta2].
Created by command:

/usr/bin/tito tag --use-version 0.2.79 --use-release 0.beta2%{?dist} --no-auto-changelog
2019-09-13 15:20:21 +02:00
Daniel Berteaud
bb9a07d0c0 Fix var name in check_zimbra_sudo 2019-09-13 15:19:49 +02:00
Daniel Berteaud
007b2f5ea5 Automatic commit of package [zabbix-agent-addons] release [0.2.79-0.beta1].
Created by command:

/usr/bin/tito tag --use-release 0.beta1%{?dist} --no-auto-changelog
2019-09-13 15:10:18 +02:00
Daniel Berteaud
9383ed9fb0 Fix passing arg to zimbra.status 2019-09-13 15:09:48 +02:00
Daniel Berteaud
b275bf549a Automatic commit of package [zabbix-agent-addons] release [0.2.78-0.beta1].
Created by command:

/usr/bin/tito tag --use-release 0.beta1%{?dist}
2019-09-13 15:07:14 +02:00
Daniel Berteaud
ec110483dd Add simple Zabbix service status scripts 2019-09-13 15:06:35 +02:00
Daniel Berteaud
9de059cdd2 Automatic commit of package [zabbix-agent-addons] release [0.2.77-1].
Created by command:

/usr/bin/tito tag
2019-09-03 12:23:58 +02:00
Daniel Berteaud
c1b19f1651 Skip self PVE node 2019-09-03 12:06:52 +02:00
Daniel Berteaud
e383ffe0fb Automatic commit of package [zabbix-agent-addons] release [0.2.76-1].
Created by command:

/usr/bin/tito tag
2019-07-30 14:02:06 +02:00
Daniel Berteaud
1c8c99e9c9 Add support for some NVMe temp sensors
Found on OVH's Advanced servers for example
2019-07-30 14:01:35 +02:00
Daniel Berteaud
a66e9bb827 Fix when running on Debian buster
Which fails with RC 25 when using File::Spec devnull
2019-07-30 13:59:42 +02:00
Daniel Berteaud
93e207423a Automatic commit of package [zabbix-agent-addons] release [0.2.75-1].
Created by command:

/usr/bin/tito tag --use-version 0.2.75 --use-release 1%{?dist} --no-auto-changelog
2019-05-21 15:38:03 +02:00
Daniel Berteaud
b85a2a653f vdo scripts must use sudo 2019-05-21 15:03:46 +02:00
Daniel Berteaud
7f89a211ea Automatic commit of package [zabbix-agent-addons] release [0.2.75-0.beta1].
Created by command:

/usr/bin/tito tag --use-release 0.beta1%{?dist}
2019-05-21 12:11:23 +02:00
Daniel Berteaud
6c012aece0 Add basic scripts to monitor VDO volumes 2019-05-21 11:57:45 +02:00
Daniel Berteaud
8f69d132e8 Automatic commit of package [zabbix-agent-addons] release [0.2.74-1].
Created by command:

/usr/bin/tito tag
2019-04-16 12:06:40 +02:00
Daniel Berteaud
a9e468b2e5 Don't fail if Statistics::Descriptive doesn't support quantile 2019-04-16 12:06:12 +02:00
Daniel Berteaud
8d9a64713c Automatic commit of package [zabbix-agent-addons] release [0.2.73-1].
Created by command:

/usr/bin/tito tag --use-release 1%{?dist}
2019-04-15 16:44:42 +02:00
Daniel Berteaud
14164177cb More work on BackupPC's monitoring scripts 2019-04-15 16:43:56 +02:00
Daniel Berteaud
2545758742 Automatic commit of package [zabbix-agent-addons] release [0.2.72-1].
Created by command:

/usr/bin/tito tag
2019-04-04 19:20:47 +02:00
Daniel Berteaud
5e01f9ee2d Fix reporting MaxXferError 2019-04-04 19:20:32 +02:00
Daniel Berteaud
9668c8eff2 Automatic commit of package [zabbix-agent-addons] release [0.2.71-1].
Created by command:

/usr/bin/tito tag
2019-04-04 19:15:29 +02:00
Daniel Berteaud
58a1442b67 Fix a typo in check_backuppc_sudo 2019-04-04 19:15:19 +02:00
Daniel Berteaud
8cd2c874c7 Automatic commit of package [zabbix-agent-addons] release [0.2.70-1].
Created by command:

/usr/bin/tito tag
2019-04-04 19:09:13 +02:00
Daniel Berteaud
c3f9b4d919 Fix counting entity size 2019-04-04 19:08:55 +02:00
Daniel Berteaud
ca9bab4d12 Automatic commit of package [zabbix-agent-addons] release [0.2.69-1].
Created by command:

/usr/bin/tito tag
2019-04-04 17:13:48 +02:00
Daniel Berteaud
e51f0065a0 Don't count vm as an entity in BackupPC's entities discovery 2019-04-04 17:13:27 +02:00
Daniel Berteaud
ce3946344f Automatic commit of package [zabbix-agent-addons] release [0.2.68-1].
Created by command:

/usr/bin/tito tag
2019-04-04 15:32:38 +02:00
Daniel Berteaud
3e464cb4f7 Update BackupPC's discovery and monitoring scripts 2019-04-04 15:32:19 +02:00
Daniel Berteaud
d64fc82d3e Automatic commit of package [zabbix-agent-addons] release [0.2.67-1].
Created by command:

/usr/bin/tito tag
2019-04-03 17:45:08 +02:00
Daniel Berteaud
b180cd8a27 Add last_errors in backuppc JSON info 2019-04-03 17:44:47 +02:00
Daniel Berteaud
4a28023967 Update conf for BackupPC 2019-04-03 17:21:20 +02:00
Daniel Berteaud
27af572c34 Automatic commit of package [zabbix-agent-addons] release [0.2.66-1].
Created by command:

/usr/bin/tito tag
2019-04-03 16:58:02 +02:00
Daniel Berteaud
b32897cc3a Remove crazy and useless regex to exclude hosts from BackupPC 2019-04-03 16:57:32 +02:00
Daniel Berteaud
36a0844194 Automatic commit of package [zabbix-agent-addons] release [0.2.65-1].
Created by command:

/usr/bin/tito tag
2019-04-03 16:50:11 +02:00
Daniel Berteaud
26b5624421 Enhance backuppc reporting script
Including reporting the new file size, and sending all the info at once in JSON format
2019-04-03 16:49:20 +02:00
Daniel Berteaud
02d58e3e53 Some coding style updates 2019-04-03 16:28:30 +02:00
Daniel Berteaud
49f17304ad More compact BPCSTATUS (1/0 instead of enabled/disabled) 2019-04-03 16:26:14 +02:00
Daniel Berteaud
f85315bb30 Automatic commit of package [zabbix-agent-addons] release [0.2.64-1].
Created by command:

/usr/bin/tito tag --use-release 1%{?dist}
2019-02-20 19:54:55 +01:00
Daniel Berteaud
d381cb5d31 Also report the number in the deferred queue 2019-02-20 19:54:34 +01:00
Daniel Berteaud
263253fe87 Automatic commit of package [zabbix-agent-addons] release [0.2.63-1].
Created by command:

/usr/bin/tito tag --use-release 1%{?dist}
2019-02-20 19:42:04 +01:00
Daniel Berteaud
0a5f2ccce9 Report number of email in the active and hold queues 2019-02-20 19:38:40 +01:00
Daniel Berteaud
93446b2dad Automatic commit of package [zabbix-agent-addons] release [0.2.62-1].
Created by command:

/usr/bin/tito tag
2019-01-19 12:13:23 +01:00
Daniel Berteaud
3bec374bb2 Add scripts to ping other hosts 2019-01-19 12:12:57 +01:00
Daniel Berteaud
499a46a8ba Automatic commit of package [zabbix-agent-addons] release [0.2.61-1].
Created by command:

/usr/bin/tito tag
2018-12-10 11:58:04 +01:00
Daniel Berteaud
54f98a5f27 Save cookies to a file so we don't have to login at every invocation
GLPI #34449
2018-12-10 11:57:23 +01:00
Daniel Berteaud
0862bfa4bc Automatic commit of package [zabbix-agent-addons] release [0.2.60-1].
Created by command:

/usr/bin/tito tag
2018-12-09 12:50:09 +01:00
Daniel Berteaud
8c062d526c Print ZBX_NOTSUPPORTED in case of API error
Prevent tons of error messages in Zabbix Server's logs
2018-12-09 12:48:58 +01:00
Daniel Berteaud
f9504c35e6 Automatic commit of package [zabbix-agent-addons] release [0.2.59-1].
Created by command:

/usr/bin/tito tag
2018-12-09 11:49:29 +01:00
Daniel Berteaud
d782fc24ab Fix ZBX_NOTSUPPORTED string in several scripts 2018-12-09 11:49:09 +01:00
Daniel Berteaud
62069067a1 Automatic commit of package [zabbix-agent-addons] release [0.2.58-1].
Created by command:

/usr/bin/tito tag --use-release 1%{?dist} --no-auto-changelog
2018-11-16 09:01:10 +01:00
Daniel Berteaud
9dfa189fe2 Add a param for squid discovery 2018-11-16 09:00:35 +01:00
Daniel Berteaud
6653922195 Automatic commit of package [zabbix-agent-addons] release [0.2.57-0.beta1].
Created by command:

/usr/bin/tito tag --use-release 0.beta1%{?dist}
2018-11-15 15:08:53 +01:00
Daniel Berteaud
ddab13d46b Add enhanced squid monitoring support 2018-11-15 15:08:17 +01:00
Daniel Berteaud
1e9d7a353d Automatic commit of package [zabbix-agent-addons] release [0.2.56-1].
Created by command:

/usr/bin/tito tag
2018-11-09 13:06:37 +01:00
Daniel Berteaud
ef7dbfa6b6 Add simple script for nginx (similar httpd) 2018-11-09 13:06:18 +01:00
Daniel Berteaud
cf40711197 Automatic commit of package [zabbix-agent-addons] release [0.2.55-1].
Created by command:

/usr/bin/tito tag
2018-10-26 08:40:06 +02:00
Daniel Berteaud
511ccab917 Fix PVE storage monitoring
GLPI #33910
2018-10-26 08:39:45 +02:00
Daniel Berteaud
8f9d165906 Automatic commit of package [zabbix-agent-addons] release [0.2.54-1].
Created by command:

/usr/bin/tito tag --use-release 1%{?dist} --no-auto-changelog
2018-10-24 09:43:19 +02:00
Daniel Berteaud
2b67439739 Automatic commit of package [zabbix-agent-addons] release [0.2.53-1.beta1].
Created by command:

/usr/bin/tito tag
2018-10-24 09:43:02 +02:00
Daniel Berteaud
f3be79e879 Rework PMG monitoring scripts 2018-10-24 09:42:46 +02:00
Daniel Berteaud
6dc3060f97 Automatic commit of package [zabbix-agent-addons] release [0.2.52-0.beta1].
Created by command:

/usr/bin/tito tag --use-release 0.beta1%{?dist}
2018-10-18 11:42:48 +02:00
Daniel Berteaud
a806da0b58 Add very basic script for PMG monitoring 2018-10-18 11:31:58 +02:00
Daniel Berteaud
10498eee04 Automatic commit of package [zabbix-agent-addons] release [0.2.51-1].
Created by command:

/usr/bin/tito tag
2018-09-18 12:24:16 +02:00
Daniel Berteaud
5d12647141 check_unifi: also output satisfaction for stations 2018-09-18 12:23:52 +02:00
Daniel Berteaud
b057750202 Automatic commit of package [zabbix-agent-addons] release [0.2.50-1].
Created by command:

/usr/bin/tito tag
2018-09-17 08:18:48 +02:00
Daniel Berteaud
21e090cd27 Fix comparison with uninitialized value in check_unifi 2018-09-17 08:18:22 +02:00
Daniel Berteaud
5337376b54 Automatic commit of package [zabbix-agent-addons] release [0.2.49-1].
Created by command:

/usr/bin/tito tag
2018-09-15 14:06:02 +02:00
Daniel Berteaud
e0b80742cc Report number of unarchived alarms in check_unifi --unifi 2018-09-15 14:05:33 +02:00
Daniel Berteaud
c793235413 Automatic commit of package [zabbix-agent-addons] release [0.2.48-1].
Created by command:

/usr/bin/tito tag
2018-09-15 11:40:55 +02:00
Daniel Berteaud
a6ff020641 More fixes for AP monitoring in check_unifi 2018-09-15 11:40:40 +02:00
Daniel Berteaud
20a239fb4e Automatic commit of package [zabbix-agent-addons] release [0.2.47-1].
Created by command:

/usr/bin/tito tag
2018-09-15 11:30:38 +02:00
Daniel Berteaud
ce36e44074 Several fixes in check_unifi 2018-09-15 11:30:19 +02:00
Daniel Berteaud
bc4c22dbe5 Automatic commit of package [zabbix-agent-addons] release [0.2.46-1].
Created by command:

/usr/bin/tito tag --use-release 1%{?dist}
2018-09-14 12:40:28 +02:00
Daniel Berteaud
2c77d2a7c8 Enhance Unifi discovery and monitoring
Adding support for station monitoring
2018-09-14 12:39:55 +02:00
Daniel Berteaud
dc66533d42 Automatic commit of package [zabbix-agent-addons] release [0.2.45-0.beta2].
Created by command:

/usr/bin/tito tag --use-release 0.beta2%{?dist}
2018-09-13 18:45:25 +02:00
Daniel Berteaud
07efb8e1ef Fix check_unifi when value is defined but false 2018-09-13 18:44:48 +02:00
Daniel Berteaud
e2e617bc89 Automatic commit of package [zabbix-agent-addons] release [0.2.44-0.beta1].
Created by command:

/usr/bin/tito tag --use-release 0.beta1%{?dist}
2018-09-13 16:29:56 +02:00
Daniel Berteaud
30b6fdad55 Add scripts to monitor Unifi sites 2018-09-13 16:29:15 +02:00
Daniel Berteaud
b41cc0b2d6 Automatic commit of package [zabbix-agent-addons] release [0.2.43-1].
Created by command:

/usr/bin/tito tag
2018-08-21 13:04:04 +02:00
Daniel Berteaud
28b3ec4c26 Fix PVE scripts to Work with new pvesh version 2018-08-21 13:03:33 +02:00
Daniel Berteaud
a991e64c64 Automatic commit of package [zabbix-agent-addons] release [0.2.42-1].
Created by command:

/usr/bin/tito tag
2018-07-23 19:53:20 +02:00
Daniel Berteaud
3f53c3a0fa Initialize an empty json object 2018-07-23 19:53:03 +02:00
Daniel Berteaud
d75c9abc81 Automatic commit of package [zabbix-agent-addons] release [0.2.41-1].
Created by command:

/usr/bin/tito tag
2018-07-09 09:05:27 +02:00
Daniel Berteaud
dc26a547cb Don't log sudo usage for Zabbix 2018-07-09 09:05:11 +02:00
Daniel Berteaud
bebd1cd6ac Automatic commit of package [zabbix-agent-addons] release [0.2.40-1].
Created by command:

/usr/bin/tito tag
2018-07-04 08:12:01 +02:00
Daniel Berteaud
7d27a931f3 Fix ZFS pool stats retrieval 2018-07-04 08:11:39 +02:00
Daniel Berteaud
a7b6a4aaf5 Automatic commit of package [zabbix-agent-addons] release [0.2.39-1].
Created by command:

/usr/bin/tito tag
2018-06-13 23:47:19 +02:00
Daniel Berteaud
7d882a4778 Fix computing pool CPU usage in check_pve 2018-06-13 23:46:53 +02:00
Daniel Berteaud
4556b3e555 Automatic commit of package [zabbix-agent-addons] release [0.2.38-1].
Created by command:

/usr/bin/tito tag
2018-06-07 14:05:02 +02:00
Daniel Berteaud
8b457ecc69 Add global net and disk stats for the cluster in check_pve_sudo 2018-06-07 14:04:27 +02:00
Daniel Berteaud
334a5f1a46 Automatic commit of package [zabbix-agent-addons] release [0.2.37-1].
Created by command:

/usr/bin/tito tag
2018-06-05 10:03:49 +02:00
Daniel Berteaud
37405cd71d Fix check_pve_sudo for single node monitoring 2018-06-05 10:03:34 +02:00
Daniel Berteaud
4e94735f0e Automatic commit of package [zabbix-agent-addons] release [0.2.36-1].
Created by command:

/usr/bin/tito tag
2018-06-05 09:05:57 +02:00
Daniel Berteaud
a5804359db Remove redundant condition 2018-06-05 00:21:48 +02:00
Daniel Berteaud
159247fb00 Fix {#PVE_STOR_STATUS} macro 2018-06-05 00:11:34 +02:00
Daniel Berteaud
ffe8c6b0a5 Only gather info about online nodes 2018-06-04 23:29:27 +02:00
Daniel Berteaud
c146554291 Add some global cluster stats for PVE 2018-06-04 23:08:27 +02:00
Daniel Berteaud
5d84a64595 Automatic commit of package [zabbix-agent-addons] release [0.2.35-1].
Created by command:

/usr/bin/tito tag
2018-06-03 23:06:28 +02:00
Daniel Berteaud
a9bc0f7f54 Enhance PVE scripts and conf 2018-06-03 23:06:08 +02:00
Daniel Berteaud
29fe3a445d Add basic scripts for PVE monitoring 2018-06-01 10:34:40 +02:00
Daniel Berteaud
792943bc0a Automatic commit of package [zabbix-agent-addons] release [0.2.34-1].
Created by command:

/usr/bin/tito tag
2018-05-30 17:18:56 +02:00
Daniel Berteaud
662d48584e Add stats for ZFS zpools 2018-05-30 17:07:20 +02:00
Daniel Berteaud
0aef2ec8ab Automatic commit of package [zabbix-agent-addons] release [0.2.33-1].
Created by command:

/usr/bin/tito tag
2018-05-29 12:38:42 +02:00
Daniel Berteaud
e72efa4415 Ensure we always return a value for scan action status errors in check_zfs 2018-05-29 12:38:25 +02:00
Daniel Berteaud
5db4a536d5 Automatic commit of package [zabbix-agent-addons] release [0.2.32-1].
Created by command:

/usr/bin/tito tag
2018-05-29 12:20:46 +02:00
Daniel Berteaud
420bb936b3 Handle situations where there's more than 1000 errors on a item in ZFS pools 2018-05-29 12:20:07 +02:00
Daniel Berteaud
aa38dad33d Automatic commit of package [zabbix-agent-addons] release [0.2.31-1].
Created by command:

/usr/bin/tito tag
2018-05-29 12:04:17 +02:00
Daniel Berteaud
65e1f006a7 Various enhancements in check_zfs 2018-05-29 12:03:25 +02:00
Daniel Berteaud
dec361914d Fix macro name for zfs zpool discovery 2018-05-28 17:51:31 +02:00
Daniel Berteaud
b6c3464cef Automatic commit of package [zabbix-agent-addons] release [0.2.30-1].
Created by command:

/usr/bin/tito tag
2018-05-28 17:32:08 +02:00
Daniel Berteaud
e28762debd Rename vfs.zfs.discovery to vfs.zfs.zpool.discovery
So later we'll be able to add other discovery rules for say, datasets
2018-05-28 17:31:23 +02:00
Daniel Berteaud
1c610c2b57 Automatic commit of package [zabbix-agent-addons] release [0.2.29-1].
Created by command:

/usr/bin/tito tag
2018-05-28 17:27:17 +02:00
Daniel Berteaud
d7f456d2ea Add scripts to discover and check ZFS zpools 2018-05-28 17:26:57 +02:00
Daniel Berteaud
e409620384 Automatic commit of package [zabbix-agent-addons] release [0.2.28-1].
Created by command:

/usr/bin/tito tag
2018-03-06 09:30:17 +01:00
Daniel Berteaud
df2da12d82 Use "all" key to get all httpd stats in JSON format 2018-03-06 09:29:50 +01:00
Daniel Berteaud
347ca02674 Automatic commit of package [zabbix-agent-addons] release [0.2.27-1].
Created by command:

/usr/bin/tito tag
2018-03-06 08:52:48 +01:00
Daniel Berteaud
ce67a8f719 Respond with all stats as a JSON structure if no --what given 2018-03-06 08:52:12 +01:00
Daniel Berteaud
12f723222c Automatic commit of package [zabbix-agent-addons] release [0.2.26-1].
Created by command:

/usr/bin/tito tag
2018-03-06 08:43:34 +01:00
Daniel Berteaud
0ca02b4974 Support space in httpd status key
So total_accesses and total_kbytes are available again
2018-03-06 08:43:05 +01:00
Daniel Berteaud
3a9ec69a56 Automatic commit of package [zabbix-agent-addons] release [0.2.25-1].
Created by command:

/usr/bin/tito tag
2018-02-06 10:53:31 +01:00
Daniel Berteaud
381db5eb17 Fix mdadm RAID discovery condition 2018-02-06 10:53:15 +01:00
Daniel Berteaud
cd262c249b Automatic commit of package [zabbix-agent-addons] release [0.2.24-1].
Created by command:

/usr/bin/tito tag
2018-01-09 17:52:33 +01:00
Daniel Berteaud
b4f79b94e4 Don't WARN when device is being checked, only when it's rebuilding 2018-01-09 17:51:53 +01:00
Daniel Berteaud
0c9fad57bd Don't detect mdadm RAID in containers 2018-01-09 17:23:35 +01:00
Daniel Berteaud
5b50016e1f Automatic commit of package [zabbix-agent-addons] release [0.2.23-1].
Created by command:

/usr/bin/tito tag
2017-11-30 10:07:38 +01:00
Daniel Berteaud
f714617bec Check line format in check_httpd
Instead of spliting errors in case server-status redirect to somewhere else
2017-11-30 10:06:39 +01:00
Daniel Berteaud
a548d599bf Automatic commit of package [zabbix-agent-addons] release [0.2.22-1].
Created by command:

/usr/bin/tito tag
2017-11-20 23:51:13 +01:00
Daniel Berteaud
98c9297292 Add script to monitor spamassassin's bayes database stats 2017-11-20 23:50:37 +01:00
Daniel Berteaud
7b6c8bd1d5 Symlink releasrs.conf to global's one 2017-11-20 23:46:56 +01:00
Daniel Berteaud
e1a50ab3b0 Automatic commit of package [zabbix-agent-addons] release [0.2.21-1].
Created by command:

/usr/bin/tito tag
2017-11-14 00:33:45 +01:00
Daniel Berteaud
93bf9cd68d Remove now non existing CHANGELOG.git file 2017-11-14 00:33:28 +01:00
Daniel Berteaud
e176f0f799 Automatic commit of package [zabbix-agent-addons] release [0.2.20-1].
Created by command:

/usr/bin/tito tag
2017-11-14 00:32:03 +01:00
Daniel Berteaud
40efd730f2 Configure tito 2017-11-14 00:31:53 +01:00
Daniel Berteaud
f172075029 Add disttag to release 2017-11-14 00:30:32 +01:00
Daniel Berteaud
c465c3e469 Initialized to use tito. 2017-11-14 00:25:24 +01:00
Daniel Berteaud
6793adbe89 Spec file update 2017-10-12 16:54:07 +02:00
Daniel Berteaud
3318a48195 Correctly handle Partially Degraded state
which can happen with multi level RAID arrays, like RAID 60
2017-10-12 16:02:28 +02:00
Daniel Berteaud
2903b025af Only provide the SELinux policy for el7 2017-08-24 12:10:30 +02:00
Daniel Berteaud
6c1bdb43ee Spec file update 2017-08-23 18:30:46 +02:00
Daniel Berteaud
a3ecef0f9e Add a SELinux policy module 2017-08-23 17:42:51 +02:00
Daniel Berteaud
282a4abffb Spec file update 2017-06-14 16:04:35 +02:00
Daniel Berteaud
0283ab87ec Add a userparam to check opened file descriptors 2017-06-14 15:22:31 +02:00
Daniel Berteaud
9296ff3b2e Spec file update 2016-11-24 16:42:33 +01:00
Daniel Berteaud
6416d60cab Fix disco_nu_ups when there's no upsd daemon running 2016-11-24 16:25:49 +01:00
Daniel Berteaud
c99874dd2d Fix disco_lvm_sudo when there's no VG at all
It was returning an empty string, which is not expected by Zabbix Server, so the item was marked as unsupported. It's now returning an empty JSON list
2016-11-24 16:21:37 +01:00
Daniel Berteaud
a30233bf33 Force ini syntax
Prevent error when the config is empty
2016-11-23 19:18:10 +01:00
Daniel Berteaud
6c9239a5fd Spec file update 2016-11-09 18:51:49 +01:00
Daniel Berteaud
593da4265e Add 0 before . for httpd stats 2016-11-09 17:41:55 +01:00
Daniel Berteaud
ab8ce667b4 Use the uri passed in $1 for server-status 2016-11-09 17:15:49 +01:00
Daniel Berteaud
6db86ee0c5 Return the uri of server-status
Will make it easier later to suppot alternatives uri
2016-11-09 17:14:15 +01:00
Daniel Berteaud
9058167a9e Add scritps to monitor httpd 2016-11-09 16:26:34 +01:00
Daniel Berteaud
c04a3cfcd7 Spec file update 2016-10-31 15:06:35 +01:00
Daniel Berteaud
2393ed113e Fix handling Airflow_Temperature_Cel label 2016-10-30 10:31:09 +01:00
Daniel Berteaud
65b6e43f97 Spec file update 2016-10-28 12:54:31 +02:00
Daniel Berteaud
68899dc3eb Alternative temp label for smartctl 2016-10-28 12:52:44 +02:00
Daniel Berteaud
71cf068d67 Remove debug line 2016-09-01 19:54:10 +02:00
Daniel Berteaud
2ccb17f53c Spec file update 2016-09-01 18:37:43 +02:00
Daniel Berteaud
4d03342c5d Add support for lm_sensors 2016-09-01 18:34:49 +02:00
Daniel Berteaud
f8b1f0f4cb Spec file update 2016-08-25 11:06:51 +02:00
Daniel Berteaud
91f2db6f71 Add items to monitor number of FD for squid 2016-08-25 11:02:17 +02:00
Daniel Berteaud
754f75600a Spec file update 2016-04-06 13:58:21 +02:00
Daniel Berteaud
81e5f414c8 Detect HDD temp sensors with sat+megaraid 2016-04-06 13:52:41 +02:00
Daniel Berteaud
0a30e8c9f2 Spec file update 2016-03-21 10:26:51 +01:00
Daniel Berteaud
c43bdff1a9 Use flock to make sure only one gluster command run at a time 2016-03-21 10:16:28 +01:00
Daniel Berteaud
4b73b1f8df Spec file update 2015-09-16 18:15:33 +02:00
Daniel Berteaud
f9db23c8c5 Prevent heal false positive by checking two times 2015-09-16 15:01:28 +02:00
Daniel Berteaud
f572175a65 Spec file update 2015-09-15 10:07:16 +02:00
Daniel Berteaud
2893a84635 Add simple scripts for monitoring DRBD resources
with auto-discovery
2015-09-14 16:41:35 +02:00
Daniel Berteaud
7cbbe74685 SPec file update 2015-09-10 09:16:40 +02:00
Daniel Berteaud
8747854686 Support temp sensors with negative values
Also enhance threshold detector
2015-09-09 18:09:55 +02:00
Daniel Berteaud
7fd870d984 Spec file update 2015-07-27 16:31:25 +02:00
Daniel Berteaud
a6f8585c05 Several enhancements in sensors ini generator
Including:
Skip sensor if value isn't found
Support power sensor returning value as %
Support power sensor of nut UPS
Better sensor ID sanitization
2015-07-27 16:06:32 +02:00
Daniel Berteaud
1dbc287840 Use separate default threshold for UPS
+ minor coding style updates and optimizations
2015-07-24 16:58:23 +02:00
Daniel Berteaud
3183e26efc Spec file update 2015-07-22 17:32:33 +02:00
Daniel Berteaud
4975211274 Minor fixe in sensor generator 2015-07-22 16:57:02 +02:00
Daniel Berteaud
20b5b3aaa6 Minor simplification in sensors generator 2015-07-21 15:05:10 +02:00
Daniel Berteaud
39038b97c7 Actually create UPS sensors 2015-07-21 15:03:36 +02:00
Daniel Berteaud
c5bf918c51 Fix stupid errors in generate_sensors_ini 2015-07-21 14:58:37 +02:00
Daniel Berteaud
9a0d53a805 Wrong usage of map+chomp 2015-07-21 14:53:08 +02:00
Daniel Berteaud
a9cc531768 Missing return 1 in UPS.pm 2015-07-21 13:08:51 +02:00
Daniel Berteaud
fc9002b06e Chomp upc output 2015-07-20 19:12:26 +02:00
Daniel Berteaud
c47da02021 Switch to which to find binaries in util_generate_sensors_ini 2015-07-20 16:06:55 +02:00
Daniel Berteaud
33701cc275 Add sensors for nut UPS 2015-07-20 15:47:29 +02:00
Daniel Berteaud
f426903cc8 Move UPS detection in a module 2015-07-20 12:18:28 +02:00
Daniel Berteaud
f5a7ebdfed Fix uninitialized $remouvable var 2015-07-20 11:49:35 +02:00
Daniel Berteaud
6f52ff1fab New Zabbix::Agent::Addons::Disks module 2015-07-20 10:44:46 +02:00
Daniel Berteaud
6e0872bc3e Spec file update 2015-07-10 12:51:01 +02:00
Daniel Berteaud
7b5acdf11e Fix GlusterFS brick count on 3.7.x
There's a news RDMA column in gluster vol status
2015-07-10 12:34:26 +02:00
Daniel Berteaud
8ba8179fa0 Spec file update 2015-07-10 10:30:28 +02:00
Daniel Berteaud
c36585d92d Fix using HD hysteresis param 2015-07-10 10:27:33 +02:00
Daniel Berteaud
b571fa5509 Take the old file as param for the sensor converter script 2015-07-09 17:39:33 +02:00
Daniel Berteaud
b9df71e9ba Use separate margin and hyst param for HD 2015-07-09 16:06:50 +02:00
Daniel Berteaud
367454e025 Default temp margin to 20°C instead of 10 2015-07-09 14:30:17 +02:00
Daniel Berteaud
cf8585f524 Fix pwr thresholds order 2015-07-09 14:28:46 +02:00
Daniel Berteaud
113f72fb3a Fix threshold detection for power sensors 2015-07-09 13:54:07 +02:00
Daniel Berteaud
4094dfc530 Better error handling and support for power sensors in the sensor config generator 2015-07-09 13:20:16 +02:00
Daniel Berteaud
c3ee93d817 Fix using keys on hash 2015-07-08 20:03:18 +02:00
Daniel Berteaud
8e90619012 Fix shebang 2015-07-08 20:00:31 +02:00
Daniel Berteaud
e7c34c6e58 Add a generator script for sensors
it'll try to auto discover sensors available using IPMI and smartctl
2015-07-08 16:45:43 +02:00
Daniel Berteaud
b2cc891890 Convert sensors config to a ini format
Automatically migrate from the old to the new format for existing install
2015-07-08 11:31:37 +02:00
Daniel Berteaud
fd6de22593 Update sample ipmi commands 2015-07-07 15:21:55 +02:00
Daniel Berteaud
bd4f16e59a Spec file update 2015-07-07 15:07:36 +02:00
Daniel Berteaud
da771ec287 Support different sensors types 2015-07-07 14:36:59 +02:00
Daniel Berteaud
0a99008074 Spec file update 2015-06-04 16:48:58 +02:00
Daniel Berteaud
310edb5c50 Code restructuration to detect pending healing processes
But won't work with GlusterFS < 3.6 as it'd trigger many false positive
2015-06-04 15:56:23 +02:00
Daniel Berteaud
30d1471c2f Spec file update 2015-06-04 11:35:56 +02:00
Daniel Berteaud
19d3b616d1 Fix gluster check if info heal-failed is not supported by gluster 2015-06-04 11:18:51 +02:00
Daniel Berteaud
49bb51e27e Spec file update 2015-04-15 18:06:47 +02:00
Daniel Berteaud
79dc37ac3a mdadm: Report rebuilding if status is resync 2015-04-15 18:02:06 +02:00
Daniel Berteaud
124782c8d6 Spec file update 2015-02-10 10:21:09 +01:00
Daniel Berteaud
ba5d5b558e Fix disco_filesystem
and switch to JSON module instead of manually printing (invalid) JSON data
2015-02-10 10:18:38 +01:00
Daniel Berteaud
5bb736fac7 Spec file update 2015-01-14 13:25:29 +01:00
Daniel Berteaud
0860c37f6f Fix check_qmail_sudo script 2015-01-08 10:40:13 +01:00
Daniel Berteaud
ac4b332bee Spec file update 2015-01-05 12:10:10 +01:00
Daniel Berteaud
adadd97f45 Add qmail configuration 2015-01-05 12:05:57 +01:00
Daniel Berteaud
4ee55e4708 Add a simple script to check qmail queue (requires qmqtool) 2015-01-05 12:04:17 +01:00
Daniel Berteaud
1769878d85 Spec file update 2014-11-12 09:33:39 +01:00
Daniel Berteaud
f83c0e60d1 Discover thin pools, and report thin pool allocation 2014-11-07 14:47:30 +01:00
Daniel Berteaud
54e4be8804 Spec file update 2014-09-15 09:42:19 +02:00
Daniel Berteaud
74bf264ddd Adapt squidclient arg to work on squid 3.1 too 2014-09-14 14:44:49 +02:00
Daniel Berteaud
0de4e58976 Spec file update 2014-07-16 11:07:54 +02:00
Daniel Berteaud
5e8df466e0 Better debug info and more robust execution for GlusterFS scripts 2014-07-16 10:36:30 +02:00
Daniel Berteaud
329eb1557a Report a more verbose status for peers and volumes instead of a boolean
So it'll be easier to diag
2014-07-11 19:05:06 +02:00
Daniel Berteaud
a42acdda65 Add simple scripts to discover and check GlusterFS status 2014-07-11 18:23:40 +02:00
Daniel Berteaud
9cff9cf2e0 SPec file update 2014-07-10 13:00:19 +02:00
Daniel Berteaud
a5f8da6aa0 Support LLD discovery for MegaRAID controllers 2014-07-10 12:55:14 +02:00
Daniel Berteaud
6153c4aa15 Add missing UserParam for the mdadm discovery script 2014-07-09 16:35:15 +02:00
Daniel Berteaud
fd366d9e15 Spec file update 2014-07-09 16:31:46 +02:00
Daniel Berteaud
d21824d09f Add discovery script for mdadm based RAID devices 2014-07-09 16:30:41 +02:00
Daniel Berteaud
304c723037 Spec file update 2014-05-06 13:01:44 +02:00
Daniel Berteaud
5214d25e14 Add a simple script to check nmbd lookups 2014-05-06 10:15:54 +02:00
Daniel Berteaud
33c78c9eb5 Spec file update 2014-02-19 09:31:54 +01:00
Daniel Berteaud
0bb8a01798 Remove scripts to monitor certificates, they are not generic enough 2014-02-19 09:29:35 +01:00
Daniel Berteaud
904138523b Spec file update 2014-02-18 18:27:20 +01:00
Daniel Berteaud
3a9dd8966f Move phpki conf to the correct location 2014-02-18 17:31:08 +01:00
Daniel Berteaud
0e53a7a486 Spec file update 2014-02-18 17:26:05 +01:00
Daniel Berteaud
7083976d5a Add scripts to monitor PHPki certificates 2014-02-18 13:03:33 +01:00
Daniel Berteaud
ff2021b1a9 spec file update 2013-11-29 09:56:43 +01:00
Daniel Berteaud
4bc1378781 Possibility to disable Zabbix monitoring of some hosts in BackupPC by adding {ZabbixMonitoring} = 0 in their conf 2013-11-29 09:54:49 +01:00
Daniel Berteaud
b18688f582 update spec file 2013-10-28 18:31:22 +01:00
Daniel Berteaud
1e991ce9e2 Do not skip 'removable' devices in disco_block_device, some real disks show a removable flag 2013-10-28 18:30:34 +01:00
Daniel Berteaud
e4489a71fb update spec file 2013-10-01 14:17:22 +02:00
Daniel Berteaud
ddcbbc85dd fix macros names in disco_raid_hp_sudo 2013-10-01 14:16:33 +02:00
Daniel Berteaud
8b36956856 update spec file 2013-10-01 13:05:33 +02:00
Daniel Berteaud
217d4a932a Add simple scripts to monitor HP Smart Arrays (requires hpacucli) 2013-10-01 11:12:35 +02:00
Daniel Berteaud
a74483d3b8 update spec file 2013-04-23 17:15:00 +02:00
Daniel Berteaud
974b41a11a Return No backup warning and max errors threshold in backuppc discovery 2013-04-23 13:02:52 +02:00
Daniel Berteaud
a734699fc9 Initialize an empty array in disco_backuppc_sudo 2013-04-23 12:57:53 +02:00
Daniel Berteaud
708cdf2747 Skip block which won't support smart data 2013-04-22 19:25:24 +02:00
Daniel Berteaud
bb5e69ff30 update spec file 2013-04-22 19:21:26 +02:00
Daniel Berteaud
5229ca1e37 Use full path to smartctl binary so system call works with sudo 2013-04-22 19:20:35 +02:00
Daniel Berteaud
2d2bfe703c Fix permissions on sudoers conf 2013-04-22 18:35:50 +02:00
Daniel Berteaud
848c9ac61c update spec file 2013-04-22 16:54:37 +02:00
Daniel Berteaud
754005c380 Rewrite disco_smart_sudo in perl and switch to read /sys/block so it can detect smart capable drives with older smartctl 2013-04-22 16:53:52 +02:00
Daniel Berteaud
523b45b406 initialize an empty array in disco_sensors 2013-04-19 18:18:16 +02:00
Daniel Berteaud
828a502ea2 initialize an empty array in disco_lvm_sudo 2013-04-19 18:17:07 +02:00
Daniel Berteaud
54ffe7c67c initialize an empty array in disco_block_devices 2013-04-19 18:16:28 +02:00
Daniel Berteaud
40241f2fb8 Return an empty set of ups instead of ZBX_NOTSUPPORTED if no UPS is found or upsc is not installed 2013-04-19 18:15:30 +02:00
Daniel Berteaud
7d7f7e27de update spec file 2013-04-19 11:30:48 +02:00
Daniel Berteaud
fc6d32923d Add discovery script and conf for nut ups 2013-04-18 14:37:28 +02:00
Daniel Berteaud
1015709880 Possibility to base64 encode the regex passed to zabbix agent 2013-04-18 14:03:22 +02:00
Daniel Berteaud
34ef6daf64 Pass a regex to backuppc discovery. Default is .*, so every hosts are returned. You can use this to selectively ignore hosts 2013-04-18 13:36:07 +02:00
Daniel Berteaud
c9cc8235c4 On some system (debian), logical volume already have /dev/$group in their name
So fix the logical volume name in this case
2013-04-18 10:13:15 +02:00
Daniel Berteaud
6137dbbab2 Disable the manual net.if.discovery key as it conflicts with the builtin one 2013-04-18 09:24:42 +02:00
Daniel Berteaud
006f811f40 update spec file 2013-04-18 08:49:51 +02:00
Daniel Berteaud
f3ca95b83f do not prepend /dev to block device names 2013-04-17 17:07:25 +02:00
Daniel Berteaud
beadda2634 Fix network interface discovery script 2013-04-17 16:52:38 +02:00
Daniel Berteaud
9983c2ea30 Add scripts to discover network interfaces 2013-04-17 15:39:57 +02:00
Daniel Berteaud
2fb07c9291 update spec file 2013-04-17 14:00:00 +02:00
Daniel Berteaud
b1c1224496 Fix a typo in smart.conf 2013-04-17 13:58:56 +02:00
Daniel Berteaud
50e3e357ff update spec file, first stable release 2013-04-17 13:44:06 +02:00
150 changed files with 38462 additions and 367 deletions

3
.tito/packages/.readme Normal file
View File

@ -0,0 +1,3 @@
the .tito/packages directory contains metadata files
named after their packages. Each file has the latest tagged
version and the project's relative directory.

View File

@ -0,0 +1 @@
0.2.172-1 ./

1
.tito/releasers.conf Symbolic link
View File

@ -0,0 +1 @@
../../tito_libs/releasers.conf

6
.tito/tito.props Normal file
View File

@ -0,0 +1,6 @@
[buildconfig]
builder = tito.builder.Builder
tagger = tito.tagger.VersionTagger
changelog_do_not_remove_cherrypick = 0
changelog_format = %s (%ae)
lib_dir = ../tito_libs

View File

@ -1,24 +0,0 @@
# You can configure here the sensors
# Format is <sensors_name>=<command>!<high threshold>!<low threshold>
# An alert is triggerd if the temperature is above the high threshold
# The alert is cleared if the temperature is less than low threshold
# Example:
#
#
## Examples with ipmitool
# cpu0 = /usr/bin/ipmitool sdr get 'P1 Therm Margin' | grep 'Sensor Reading' | cut -d':' -f 2 | awk '{print$1}'!-30!-39
# mb = /usr/bin/ipmitool sdr get 'Baseboard Temp' | grep 'Sensor Reading' | cut -d':' -f 2 | awk '{print$1}'!50!45
#
## Examples with smartctl
# sda = /usr/sbin/smartctl -a /dev/sda | grep Temperature_Celsius | awk '{print $10}'!45!40
# sdb = /usr/sbin/smartctl -a /dev/sdb | grep Temperature_Celsius | awk '{print $10}'!45!50
#
## Examples with lm_sensors
# cpu0=/usr/bin/sensors | grep temp1 | cut -d':' -f 2 | awk '{print $1'} | sed -e "s/+//g" -e "s/.C//g"!65!55
#
## Examples with acpi
# cpu0=cat /proc/acpi/thermal_zone/THRM/temperature | awk '{print $2}'!65!55
#
#
# !!! WARNING !!!
# All the commands will be executed with root privileges

45
conf/sensors.ini Normal file
View File

@ -0,0 +1,45 @@
# This file lets you configure which sensors will be monitored by Zabbix
# Sensors defined here will be sent to Zabbix through its low level discovery feature
# You then have to create discovery rules and prototypes to make use of them
#
# This file is in ini format, each sensor has its own block and a set of key/value pair
#
# Example:
#
# [cpu0]
# description=Temperature of the first CPU
# threshold_high=60
# threshold_low=50
# cmd="/usr/bin/sensors | grep temp1 | cut -d':' -f 2 | awk '{print $1}' | sed -e 's/+//g' -e 's/.C//g'"
# type=temp
# unit=°C
#
# [mb]
# description=Motherboard's temperature
# threshold_high=50
# threshold_low=45
# cmd="/usr/bin/ipmitool sdr get 'Baseboard Temp' | grep 'Sensor Reading' | awk '{print $4}'"
# type=temp
# unit=°C
#
# [sda]
# description=hard drive temperature
# threshold_high=50
# threshold_low=45
# cmd="/usr/sbin/smartctl -A /dev/sda | grep Temperature_Celsius | awk '{print $10}'"
# type=temp
# unit=°C
#
# [fan1]
# description=front fan
# threshold_high=12000
# threshold_low=1400
# cmd="/usr/bin/ipmitool sdr get 'Fan1A RPM' | grep 'Sensor Reading' | awk '{print $4}'"
# type=fan
# unit=rpm
#
#
# !!! WARNING !!!
# * All the commands will be executed with root privileges
# * If your cmd contains quotes, you must double quote the whole command
# * If your cmd contains double quotes, you must escape them as \"

View File

@ -1,2 +1,4 @@
Defaults:zabbix !requiretty
zabbix ALL=(root) NOPASSWD: /var/lib/zabbix/bin/*_sudo
Cmnd_Alias ZABBIX_AGENT = /var/lib/zabbix/bin/*_sudo
Defaults!ZABBIX_AGENT !syslog
zabbix ALL=(root) NOPASSWD: ZABBIX_AGENT

View File

@ -0,0 +1,63 @@
package Zabbix::Agent::Addons::Disks;
use strict;
use warnings;
# Return an array of block devices, skip if size == 0
sub list_block_dev {
my @bd = ();
opendir(my $dh, "/sys/block") or die "Couldn't open /sys/block: $!";
my @blocks = grep { $_ !~ m/^\./ } readdir($dh);
closedir($dh);
foreach my $block (@blocks){
my $size = 1;
if ( -e "/sys/block/$block/size"){
open SIZE, "/sys/block/$block/size";
$size = join "", <SIZE>;
close SIZE;
chomp($size);
next if ($size eq '0');
}
push @bd, $block;
}
return @bd;
}
sub list_smart_hdd{
my ($param) = shift || {};
my @shd = ();
if (-x "/usr/sbin/smartctl"){
BLOCK: foreach my $block (list_block_dev()){
# Skip block we already know won't support SMART
next if ($block =~ m/^(ram|loop|md|dm\-)\d+/);
my $smart_enabled = 0;
my @smart_info = qx(/usr/sbin/smartctl -i /dev/$block);
next unless ($? == 0);
foreach my $line (@smart_info){
if ($line =~ m/^SMART support is:\s+Enabled/i){
$smart_enabled = 1;
last;
} elsif ($line =~ m/NVMe/i){
$smart_enabled = 1;
last;
} elsif ($line =~ m/^Transport protocol:\s+iSCSI/i){
# Skip iSCSI block
next BLOCK;
}
}
# Skip block unless S.M.A.R.T is advertized as enabled
next unless ($smart_enabled);
if ($param->{skip_remouvable} && -e "/sys/block/$block/removable"){
open REMOVABLE, "/sys/block/$block/removable";
my $removable = join "", <REMOVABLE>;
close REMOVABLE;
chomp($removable);
next if ($removable eq '1');
}
push @shd, $block;
}
}
return @shd;
}
1;

View File

@ -0,0 +1,917 @@
package Zabbix::Agent::Addons::LVM;
# Forked from Linux::LVM
# with support for thin pools
use 5.006;
use strict;
use warnings;
require Exporter;
our @ISA = qw(Exporter);
# Items to export into callers namespace by default. Note: do not export
# names by default without a very good reason. Use EXPORT_OK instead.
# Do not simply export all your public functions/methods/constants.
# This allows declaration use Zabbix::Agent::Addons::LVM ':all';
# If you do not need this, moving things directly into @EXPORT or @EXPORT_OK
# will save memory.
our %EXPORT_TAGS = ( 'all' => [ qw( get_volume_group_list
get_volume_group_information
get_logical_volume_information
get_physical_volume_information
get_vg_information
get_pv_info
get_lv_info
) ] );
our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
our @EXPORT = qw( get_volume_group_list
get_volume_group_information
get_logical_volume_information
get_physical_volume_information
get_vg_information
get_pv_info
get_lv_info
);
our $VERSION = '0.18';
our $units;
# Preloaded methods go here.
# Autoload methods go after =cut, and are processed by the autosplit program.
#-----------------------------------------------------------------------#
# Subroutine: units #
#-----------------------------------------------------------------------#
# Description: Set units to be used for pe_size, lv_size, etc. #
# legal values are same as lvm --units: #
# hbskmgtpeHBSKMGTPE #
# (h)uman-readable, (b)ytes, (s)ectors, (k)ilobytes, #
# (m)egabytes, (g)igabytes, (t)erabytes, (p)etabytes, #
# (e)xabytes. Capitalise to use multiples of 1000 (S.I.) #
# instead of 1024. #
# Can also specify custom units e.g. --units 3M #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a array with the volume group names. #
#-----------------------------------------------------------------------#
sub units {
shift;
$units = shift() if @_;
return $units;
}
#-----------------------------------------------------------------------#
# Subroutine: get_volume_group_list #
#-----------------------------------------------------------------------#
# Description: This function will return a sorted list of all of the #
# active volume groups on the system. #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a array with the volume group names. #
#-----------------------------------------------------------------------#
sub get_volume_group_list() {
my %vg = get_vg_information();
return (sort keys(%vg));
} # End of the get_volume_group_list routine.
#-----------------------------------------------------------------------#
# Subroutine: get_volume_group_information #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# data about the specified volume group. #
#-----------------------------------------------------------------------#
# Parameters: A string containing a volume group name. #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with the volume group data. #
#-----------------------------------------------------------------------#
sub get_volume_group_information($) {
my $volume_group = $_[0];
my %vg_info;
my %vg = get_vg_information();
foreach(sort keys %{$vg{$volume_group}}) {
if ( $_ eq "pvols" ) { next; }
elsif( $_ eq "lvols" ) { next; }
else {
$vg_info{$_} = $vg{$volume_group}->{$_};
}
}
return %vg_info;
} # End of the get_volume_group_information routine.
#-----------------------------------------------------------------------#
# Subroutine: get_volume_group_information #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# data about the specified volume group. #
#-----------------------------------------------------------------------#
# Parameters: A string containing a volume group name. #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with the volume group data. #
#-----------------------------------------------------------------------#
sub get_logical_volume_information($) {
my $volume_group = $_[0];
my %lv_info;
my $lvname;
my %vg = get_vg_information();
foreach $lvname (sort keys %{$vg{$volume_group}->{lvols}}) {
foreach(sort keys %{$vg{$volume_group}->{lvols}->{$lvname}}) {
$lv_info{$lvname}->{$_} = $vg{$volume_group}->{lvols}->{$lvname}->{$_};
}
}
return %lv_info;
} # End of the get_logical_volume_information routine.
#-----------------------------------------------------------------------#
# Subroutine: get_volume_group_information #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# data about the specified volume group. #
#-----------------------------------------------------------------------#
# Parameters: A string containing a volume group name. #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with the volume group data. #
#-----------------------------------------------------------------------#
sub get_physical_volume_information($) {
my $volume_group = $_[0];
my %pv_info;
my $pvname;
my %vg = get_vg_information();
foreach $pvname (sort keys %{$vg{$volume_group}->{pvols}}) {
foreach(sort keys %{$vg{$volume_group}->{pvols}->{$pvname}}) {
$pv_info{$pvname}->{$_} = $vg{$volume_group}->{pvols}->{$pvname}->{$_};
}
}
return %pv_info;
} # End of the get_physical_volume_information routine.
#-----------------------------------------------------------------------#
# Subroutine: get_vg_information #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# volume group information for the system. #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with all of the vg information. #
#-----------------------------------------------------------------------#
sub get_vg_information() {
my %vghash;
my $vgn;
my $lvn;
my $pvn;
my @vginfo;
my $units_arg = '';
$units_arg = " --units $units " if ($units);
if ( -e "/usr/sbin/vgdisplay" ) {
@vginfo = `/usr/sbin/vgdisplay -v $units_arg 2>/dev/null`;
} else {
if( ! -e "/sbin/vgdisplay" ) { die("LVM utilities not installed in /sbin or /usr/sbin"); }
@vginfo = `/sbin/vgdisplay -v $units_arg 2>/dev/null`;
}
VGINF: foreach(@vginfo) {
chomp;
s/^\s+//g;
s/\s+$//g;
next VGINF if m/^$/;
# Parse the volume group name.
if( m/VG Name\s+(\S+)/ ) {
$vgn = $1; $vghash{$vgn}->{vgname} = $1;
next VGINF; }
# Parse the volume group access.
elsif( m/VG Access\s+(\S+)/ ) {
$vghash{$vgn}->{access} = $1;
next VGINF; }
# Parse the volume group status.
elsif( m/VG Status\s+(.+)/ ) {
$vghash{$vgn}->{status} = $1;
next VGINF; }
# Parse the volume group number.
elsif( m/VG #\s+(\S+)/ ) {
$vghash{$vgn}->{vg_number} = $1;
next VGINF; }
# Parse the maximum logical volume size and size unit for the volume group.
elsif( m/MAX LV Size\s+(\S+) (\S+)/ ) {
$vghash{$vgn}->{max_lv_size} = $1;
$vghash{$vgn}->{max_lv_size_unit} = $2;
next VGINF; }
# Parse the maximum number of logical volumes for the volume group.
elsif( m/MAX LV\s+(\S+)/ ) {
$vghash{$vgn}->{max_lv} = $1;
next VGINF; }
# Parse the current number of logical volumes for the volume group.
elsif( m/Cur LV\s+(\S+)/ ) {
$vghash{$vgn}->{cur_lv} = $1;
next VGINF; }
# Parse the number of open logical volumes for the volume group.
elsif( m/Open LV\s+(\S+)/ ) {
$vghash{$vgn}->{open_lv} = $1;
next VGINF; }
# Parse the number of physical volumes accessible to the volume group.
elsif( m/Max PV\s+(\S+)/ ) {
$vghash{$vgn}->{max_pv} = $1;
next VGINF; }
# Parse the current number of physical volumes in the volume group.
elsif( m/Cur PV\s+(\S+)/ ) {
$vghash{$vgn}->{cur_pv} = $1;
next VGINF; }
# Parse the number of active physical volumes in the volume group.
elsif( m/Act PV\s+(\S+)/ ) {
$vghash{$vgn}->{act_pv} = $1;
next VGINF; }
# Parse the size of the volume group.
elsif( m/VG Size\s+(\S+) (\S+)/ ) {
$vghash{$vgn}->{vg_size} = $1;
$vghash{$vgn}->{vg_size_unit} = $2;
next VGINF; }
# Parse the physical extent size and unit for one extent of volume group.
elsif( m/PE Size\s+(\S+) (\S+)/ ) {
$vghash{$vgn}->{pe_size} = $1;
$vghash{$vgn}->{pe_size_unit} = $2;
next VGINF; }
# Parse the total number and number of free physical extents from the physical disk.
elsif( m/Total PE \/ Free PE\s+(\S+) \/ (\S+)/m ) {
$vghash{$vgn}->{pvols}->{$pvn}->{total_pe} = $1;
$vghash{$vgn}->{pvols}->{$pvn}->{free_pe} = $2;
next VGINF; }
# Parse the total number of physical extents from the volume group.
elsif( m/Total PE\s+(\S+)/ ) {
$vghash{$vgn}->{total_pe} = $1;
next VGINF; }
# Parse the number of allocated physical extents from the volume group.
elsif( m/Alloc PE \/ Size\s+(\S+) \/ (\S+)(?:\s+(\S+))?/ ) {
$vghash{$vgn}->{alloc_pe} = $1;
$vghash{$vgn}->{alloc_pe_size} = $2;
$vghash{$vgn}->{alloc_pe_size_unit} = $3 || "B";
next VGINF; }
# Parse the volume group name.
elsif( m/Free PE \/ Size\s+(\S+) \/ (\S+) (\S+)/ ) {
$vghash{$vgn}->{free_pe} = $1;
$vghash{$vgn}->{free_pe_size} = $2;
$vghash{$vgn}->{free_pe_size_unit} = $3;
next VGINF; }
# Parse the volume group uuid.
elsif( m/VG UUID\s+(\S+)/ ) {
$vghash{$vgn}->{uuid} = $1;
next VGINF; }
# Parse the logical volume name.
elsif( m/LV Name\s+(\S+)/ ) {
$lvn = $1;
$vghash{$vgn}->{lvols}->{$lvn}->{name} = $1;
next VGINF; }
# since version 2.02.89 'LV Name' is no longer the full path, 'LV Path' is.
# LV Path may be bogus or missing in some cases, such as thin pools.
if( m/LV Path\s+(\S+)/ ) {
$lvn = $1 unless $lvn;
$vghash{$vgn}->{lvols}->{$lvn}->{name} = $1;
next VGINF; }
# Parse the logical volume UUID.
elsif( m/LV UUID\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{uuid} = $1;
next VGINF; }
# Parse the logical volume UUID.
elsif( m/Segments\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{segments} = $1;
next VGINF; }
# Parse the logical volume size and unit.
elsif( m/LV Size\s+(\S+) (\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{lv_size} = $1;
$vghash{$vgn}->{lvols}->{$lvn}->{lv_size_unit} = $2;
next VGINF; }
# Parse the logical volume write access.
elsif( m/LV Write Access\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{write_access} = $1;
next VGINF; }
# Parse the logical volume status.
elsif( m/LV Status\s+(.+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{status} = $1;
next VGINF; }
# Parse the number of logical extents in the logical volume.
elsif( m/Current LE\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{cur_le} = $1;
next VGINF; }
# Parse the number of allocated logical extents in the logical volume.
elsif( m/Allocated LE\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{alloc_le} = $1;
next VGINF; }
# Parse the allocation type for the logical volume.
elsif( m/Allocation\s+(.+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{allocation} = $1;
next VGINF; }
# Parse the volume number.
elsif( m/LV #\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{lv_number} = $1;
next VGINF; }
# Parse the number of times the logical volume is open.
elsif( m/# open\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{open_lv} = $1;
next VGINF; }
# Parse the block device of the logical volume.
elsif( m/Block device\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{device} = $1;
next VGINF; }
# Parse the value for the read ahead sectors of the logical volume.
elsif( m/Read ahead sectors\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{read_ahead} = $1;
next VGINF; }
elsif( m/Allocated to snapshot\s+(\S+)%/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'allocated_to_snapshot'} = $1;
next VGINF; }
elsif( m/COW-table size\s+([0-9\.]+)\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'cow_table_size'} = $1;
$vghash{$vgn}->{lvols}->{$lvn}->{'cow_table_unit'} = $2;
next VGINF; }
# Thin pools have data and metadata allocations
elsif( m/Allocated pool data\s+(\S+)%/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'allocated_pool_data'} = $1;
next VGINF; }
elsif( m/Allocated metadata\s+(\S+)%/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'allocated_meta_data'} = $1;
next VGINF; }
elsif( m/Mirrored volumes\s+(.+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'mirrored_volumes'} = $1;
next VGINF; }
# Parse the physical disk name.
elsif( m/PV Name\s+(\S+)/ ) {
$pvn = $1;
$vghash{$vgn}->{pvols}->{$pvn}->{device} = $1;
next VGINF; }
# Parse the status of the physical disk.
elsif( m/PV Status\s+(.+)/ ) {
$vghash{$vgn}->{pvols}->{$pvn}->{status} = $1;
next VGINF; }
# Parse the status of the physical disk.
elsif( m/PV UUID\s+(.+)/ ) {
$vghash{$vgn}->{pvols}->{$pvn}->{uuid} = $1;
next VGINF; }
}
return %vghash;
} # End of the get_vg_information routine.
#-----------------------------------------------------------------------#
# Subroutine: get_pv_info #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# information about the specified physical volume. #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with all of the pv information. #
#-----------------------------------------------------------------------#
sub get_pv_info($) {
my $pvname = $_[0];
my %pvhash;
my @pvinfo;
if( ! -e "$pvname" ) { die("Physical Disk: $pvname does not exist."); }
my $units_arg = '';
$units_arg = " --units $units " if ($units);
if ( -e "/usr/sbin/pvdisplay" ) {
@pvinfo = `/usr/sbin/pvdisplay $units_arg $pvname`;
} else {
if( ! -e "/sbin/pvdisplay" ) { die("LVM utilities not installed in /sbin or /usr/sbin"); }
@pvinfo = `/sbin/pvdisplay $units_arg $pvname`;
}
PVINF: foreach(@pvinfo) {
# Get the name of the physical volume.
if( m/PV Name\s+(\S+)/ ) {
$pvhash{pv_name} = $1;
next PVINF; }
# Get the name of the volume group the physical volume belongs to.
if( m/VG Name\s+(\S+)/ ) {
$pvhash{vg_name} = $1;
next PVINF; }
# Get the size information of the physical volume.
if( m/PV Size\s+(\S+) (\S+)/ ) {
$pvhash{size} = $1;
$pvhash{size_unit} = $2;
next PVINF; }
# Get the physical volume number.
if( m/PV\#\s+(\S+)/ ) {
$pvhash{pv_number} = $1;
next PVINF; }
# Get the status of the physical volume.
if( m/PV Status\s+(.+)/ ) {
$pvhash{status} = $1;
next PVINF; }
# Get the allocation status of the physical volume.
if( m/Allocatable\s+(.+)/ ) {
$pvhash{allocatable} = $1;
next PVINF; }
# Get the number of logical volumes on the physical volume.
if( m/Cur LV\s+(\S+)/ ) {
$pvhash{num_lvols} = $1;
next PVINF; }
# Get the physical extent size and unit of the physical volume.
if( m/PE Size \((\S+)\)\s+(\S+)/ ) {
$pvhash{pe_size} = $2;
$pvhash{pe_size_unit} = $1;
next PVINF; }
# Get the total numver of physical extents on the physical volume.
if( m/Total PE\s+(\S+)/ ) {
$pvhash{total_pe} = $1;
next PVINF; }
# Get the number of free extents on the physical volume.
if( m/Free PE\s+(\S+)/ ) {
$pvhash{free_pe} = $1;
next PVINF; }
# Get the number of allocated physical extents on the physical volume.
if( m/Allocated PE\s+(\S+)/ ) {
$pvhash{alloc_pe} = $1;
next PVINF; }
# Get the UUID of the physical volume.
if( m/PV UUID\s+(\S+)/ ) {
$pvhash{uuid} = $1;
next PVINF; }
}
return %pvhash;
} # End of the get_pv_info routine.
#-----------------------------------------------------------------------#
# Subroutine: get_lv_info #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# information about the specified logical volume. #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with all of the lv information. #
#-----------------------------------------------------------------------#
sub get_lv_info($) {
my $lvname = $_[0];
my %lvhash;
my @lvinfo;
my $units_arg = '';
$units_arg = " --units $units " if ($units);
if ( -e "/usr/sbin/vgdisplay" ) {
@lvinfo = `/usr/sbin/lvdisplay $units_arg $lvname`;
} else {
if( ! -e "/sbin/vgdisplay" ) { die("LVM utilities not installed in /sbin or /usr/sbin"); }
@lvinfo = `/sbin/lvdisplay $units_arg $lvname`;
}
LVINF: foreach(@lvinfo) {
# Get the logical volume name.
if( m/LV Name\s+(\S+)/ ) {
$lvhash{lv_name} = $1;
next LVINF; }
# since version 2.02.89 'LV Name' is no longer the full path, 'LV Path' is.
# LV Path may be bogus or missing in some cases, such as thin pools.
if( m/LV Path\s+(\S+)/ ) {
$lvhash{lv_name} = $1;
next LVINF; }
# Get the volume group name.
if( m/VG Name\s+(\S+)/ ) {
$lvhash{vg_name} = $1;
next LVINF; }
# Get the volume group name.
if( m/LV UUID\s+(\S+)/ ) {
$lvhash{uuid} = $1;
next LVINF; }
# Get the logical volume write status.
if( m/LV Write Access\s+(.+)/ ) {
$lvhash{access} = $1;
next LVINF; }
# Get the logical volume status.
if( m/LV Status\s+(.+)/ ) {
$lvhash{status} = $1;
next LVINF; }
# Get the logical volume number.
if( m/LV \#\s+(\S+)/ ) {
$lvhash{lv_number} = $1;
next LVINF; }
# Get the number of opens for the logical volume.
if( m/\# open\s+(\S+)/ ) {
$lvhash{lv_open} = $1;
next LVINF; }
# Get the logical volume size and size unit.
if( m/LV Size\s+(\S+) (\S+)/ ) {
$lvhash{size} = $1;
$lvhash{size_unit} = $2;
next LVINF; }
# Get the number of extents assigned to the logical volume.
if( m/Current LE\s+(\S+)/ ) {
$lvhash{current_le} = $1;
next LVINF; }
# Get the number of extents allocated to the logical volume.
if( m/Allocated LE\s+(\S+)/ ) {
$lvhash{alloc_le} = $1;
next LVINF; }
# Get the extent allocation type of the logical volume.
if( m/Allocation\s+(.+)/ ) {
$lvhash{allocation} = $1;
next LVINF; }
# Get the number of read ahead sectors for the logical volume.
if( m/Read ahead sectors\s+(\S+)/ ) {
$lvhash{read_ahead} = $1;
next LVINF; }
# Get the block device of the logical volume.
if( m/Block device\s+(\S+)/ ) {
$lvhash{block_device} = $1;
next LVINF; }
if( m/Allocated to snapshot\s+(\S+)%/ ) {
$lvhash{allocated_to_snapshot} = $1;
next LVINF; }
elsif( m/COW-table size\s+([0-9\.]+)\s+(\S+)/ ) {
$lvhash{'cow_table_size'} = $1;
$lvhash{'cow_table_unit'} = $2;
next LVINF; }
# Thin pools have data and metadata allocation
if( m/Allocated pool data\s+(\S+)%/ ) {
$lvhash{allocated_pool_data} = $1;
next LVINF; }
if( m/Allocated metadata\s+(\S+)%/ ) {
$lvhash{allocated_meta_data} = $1;
next LVINF; }
}
return %lvhash;
} # End of the get_lv_info routine.
1;
__END__
# Below is stub documentation for your module. You'd better edit it!
=head1 NAME
Zabbix::Agent::Addons::LVM - Perl extension for accessing Logical Volume Manager(LVM)
data structures on Linux.
=head1 SYNOPSIS
use Zabbix::Agent::Addons::LVM;
Zabbix::Agent::Addons::LVM->units('G');
=head1 ABSTRACT
The live data used in the examples is included in the DESCRIPTION area
for your convenience and reference.
=head1 DESCRIPTION
units() Get or set the units used to report sizes of LVs, PVs, etc.
legal values: hbskmgtpeHBSKMGTPE
see man lvm documentation of --units
get_volume_group_list() This routine will return an array that
contains the names of the volume groups.
@vgs = get_volume_group_list(); print "@vgs \n";
Would yield the following: vg00
get_volume_group_information($) This routine will return all of
the volume group information about
the specified volume group.
%vg = get_volume_group_information("vg00");
foreach(sort keys %vg) {
print "$_ = $vg{$_}\n";
}
Would yield the following:
access = read/write
act_pv = 2
alloc_pe = 3840
alloc_pe_size = 15
alloc_pe_size_unit = GB
cur_lv = 3
cur_pv = 2
free_pe = 864
free_pe_size = 3.38
free_pe_size_unit = GB
max_lv = 256
max_lv_size = 255.99
max_lv_size_unit = GB
max_pv = 256
open_lv = 0
pe_size = 4
pe_size_unit = MB
status = available/resizable
total_pe = 4704
uuid = BBq8si-NyRR-9ZNW-3J5e-DoRO-RBHK-ckrszi
vg_number = 0
vg_size = 18.38
vg_size_unit = GB
vgname = vg00
get_logical_volume_information($) This routine will return all of the
logical volume information associated
with the specified volume group.
%lv = get_logical_volume_information("vg00");
foreach $lvname (sort keys %lv) {
foreach(sort keys %{$lv{$lvname}}) {
print "$_ = $lv{$lvname}->{$_}\n";
}
print "\n";
}
Would yield the following results:
alloc_le = 1024
allocation = next free
cur_le = 1024
device = 58:0
lv_number = 1
lv_size = 4
lv_size_unit = GB
name = /dev/vg00/lvol1
open_lv = 0
read_ahead = 1024
status = available
write_access = read/write
alloc_le = 1280
allocation = next free
cur_le = 1280
device = 58:1
lv_number = 2
lv_size = 5
lv_size_unit = GB
name = /dev/vg00/lvol2
open_lv = 0
read_ahead = 1024
status = available
write_access = read/write
alloc_le = 1536
allocation = next free
cur_le = 1536
device = 58:2
lv_number = 3
lv_size = 6
lv_size_unit = GB
name = /dev/vg00/lvol3
open_lv = 0
read_ahead = 1024
status = available
write_access = read/write
get_physical_volume_information($) This routine will return all of the information
information about the physical volumes assigned
to the specified volume group.
%pv = get_physical_volume_information("vg00");
foreach $pvname (sort keys %pv) {
foreach(sort keys %{$pv{$pvname}}) {
print "$_ = $pv{$pvname}->{$_}\n";
}
print "\n";
}
Would yield the following results:
device = /dev/hda3
free_pe = 0
pv_number = 1
status = available / allocatable
total_pe = 2160
device = /dev/hda4
free_pe = 864
pv_number = 2
status = available / allocatable
total_pe = 2544
get_lv_info($) This routine will return all of the information about the
specified logical volume. The information will be returned
in a hash.
get_lv_info
%lv = get_lv_info("/dev/vg00/lvol1");
foreach (sort keys %lv) {
print "$_ = $lv{$_} \n";
}
Would yield the following results:
access = read/write
alloc_le = 1024
allocation = next free
block_device = 58:0
current_le = 1024
lv_name = /dev/vg00/lvol1
lv_number = 1
lv_open = 0
read_ahead = 1024
size = 4
size_unit = GB
status = available
vg_name = vg00
get_pv_info($) This routine will return all of the information about the
specified physical volume. The information will be returned
in a hash.
%pv = get_pv_info("/dev/hda3");
foreach (sort keys %pv) {
print "$_ = $pv{$_} \n";
}
Would yield the following results:
alloc_pe = 2160
allocatable = yes (but full)
free_pe = 0
num_lvols = 2
pe_size = 4096
pe_size_unit = KByte
pv_name = /dev/hda3
pv_number = 1
sectors = 17703630
size = 8.44
size_info = NOT usable 4.19 MB [LVM: 136 KB]
size_unit = GB
status = available
total_pe = 2160
uuid = 2c5ADu-oEdt-ovCe-rqp0-MWpF-I5u1-8XigH4
vg_name = vg00
Command Output Used In The Above Examples: /sbin/vgdisplay -v
--- Volume group ---
VG Name vg00
VG Access read/write
VG Status available/resizable
VG # 0
MAX LV 256
Cur LV 3
Open LV 0
MAX LV Size 255.99 GB
Max PV 256
Cur PV 2
Act PV 2
VG Size 18.38 GB
PE Size 4 MB
Total PE 4704
Alloc PE / Size 3840 / 15 GB
Free PE / Size 864 / 3.38 GB
VG UUID BBq8si-NyRR-9ZNW-3J5e-DoRO-RBHK-ckrszi
--- Logical volume ---
LV Name /dev/vg00/lvol1
VG Name vg00
LV Write Access read/write
LV Status available
LV # 1
# open 0
LV Size 4 GB
Current LE 1024
Allocated LE 1024
Allocation next free
Read ahead sectors 1024
Block device 58:0
--- Logical volume ---
LV Name /dev/vg00/lvol2
VG Name vg00
LV Write Access read/write
LV Status available
LV # 2
# open 0
LV Size 5 GB
Current LE 1280
Allocated LE 1280
Allocation next free
Read ahead sectors 1024
Block device 58:1
--- Logical volume ---
LV Name /dev/vg00/lvol3
VG Name vg00
LV Write Access read/write
LV Status available
LV # 3
# open 0
LV Size 6 GB
Current LE 1536
Allocated LE 1536
Allocation next free
Read ahead sectors 1024
Block device 58:2
--- Physical volumes ---
PV Name (#) /dev/hda3 (1)
PV Status available / allocatable
Total PE / Free PE 2160 / 0
PV Name (#) /dev/hda4 (2)
PV Status available / allocatable
Total PE / Free PE 2544 / 864
=head1 SEE ALSO
L<vgdisplay>(1M)
L<lvdisplay>(1M)
L<pvdisplay>(1M)
=head1 AUTHOR
Chad Kerner, E<lt>chadkerner@yahoo.comE<gt>
=head1 COPYRIGHT AND LICENSE
Copyright 2003 by Chad Kerner
This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
Modified by Daniel Berteaud <daniel@firewall-services.com> to add
support for LVM thin
=cut

View File

@ -0,0 +1,21 @@
package Zabbix::Agent::Addons::UPS;
use warnings;
use strict;
use File::Which;
# List configured UPS (only nut is supported)
sub list_ups {
my @ups = ();
my $upsc = which('upsc');
if ($upsc && -x $upsc){
my @out = qx($upsc -l 2>/dev/null);
if ($? == 0){
@ups = @out;
chomp @ups;
}
}
return @ups;
};
1;

View File

@ -0,0 +1,27 @@
module zabbix-agent-addons 1.0;
require {
type kernel_t;
type devlog_t;
type zabbix_var_lib_t;
type sudo_exec_t;
type proc_mdstat_t;
type zabbix_agent_t;
class process setrlimit;
class capability { audit_write dac_override sys_resource };
class file { execute execute_no_trans getattr ioctl open read };
class netlink_audit_socket { create nlmsg_relay };
class sock_file write;
class unix_dgram_socket { connect create sendto };
}
#============= zabbix_agent_t ==============
allow zabbix_agent_t devlog_t:sock_file write;
allow zabbix_agent_t kernel_t:unix_dgram_socket sendto;
allow zabbix_agent_t proc_mdstat_t:file { getattr ioctl open read };
allow zabbix_agent_t self:capability { audit_write dac_override sys_resource };
allow zabbix_agent_t self:netlink_audit_socket { create nlmsg_relay };
allow zabbix_agent_t self:process setrlimit;
allow zabbix_agent_t self:unix_dgram_socket { connect create };
allow zabbix_agent_t sudo_exec_t:file { execute execute_no_trans };
allow zabbix_agent_t zabbix_var_lib_t:file { execute execute_no_trans ioctl open read };

View File

@ -1,7 +1,11 @@
%if 0%{?rhel} && 0%{?rhel} < 7
%global _without_selinux 1
%endif
Summary: Scripts for Zabbix monitoring
Name: zabbix-agent-addons
Version: 0.0.1
Release: 1.beta3
Version: 0.2.172
Release: 1%{?dist}
Source0: %{name}-%{version}.tar.gz
BuildArch: noarch
@ -13,8 +17,22 @@ Requires: zabbix-agent
Requires: perl(Getopt::Long)
Requires: perl(Getopt::Std)
Requires: perl(JSON)
Requires: perl(Linux::LVM)
Requires: perl(POSIX)
Requires: perl(MIME::Base64)
Requires: perl(File::Which)
Requires: perl(Config::Simple)
Requires: perl(Statistics::Descriptive)
%if 0%{?rhel} > 6
# used by samba4 scripts, which wont run on anything < 7
Requires: perl(File::ReadBackwards)
%endif
Requires: fping
BuildRequires: perl
%if ! 0%{?_without_selinux}
Requires: policycoreutils
BuildRequires: selinux-policy-devel
BuildRequires: checkpolicy
%endif
AutoReqProv: no
@ -28,6 +46,11 @@ LVM, RAID status, S.M.A.R.T. drives, BackupPC etc...
%setup -q
%build
%if ! 0%{?_without_selinux}
pushd selinux
make -f %{_datadir}/selinux/devel/Makefile
popd
%endif
%install
@ -38,32 +61,784 @@ LVM, RAID status, S.M.A.R.T. drives, BackupPC etc...
%{__install} -m 0755 zabbix_scripts/* $RPM_BUILD_ROOT%{_localstatedir}/lib/zabbix/bin
# Install Zabbix conf
%{__install} -d $RPM_BUILD_ROOT%{_sysconfdir}/zabbix/zabbix_agentd.conf.d/
%{__install} -m 0755 zabbix_conf/* $RPM_BUILD_ROOT%{_sysconfdir}/zabbix/zabbix_agentd.conf.d/
%{__install} -m 0644 zabbix_conf/* $RPM_BUILD_ROOT%{_sysconfdir}/zabbix/zabbix_agentd.conf.d/
# Install perl modules
%{__install} -d -m 0755 $RPM_BUILD_ROOT%{perl_vendorlib}
cp -r lib/* $RPM_BUILD_ROOT%{perl_vendorlib}/
# Install sensors conf
%{__install} -m 0755 conf/sensors.conf $RPM_BUILD_ROOT%{_sysconfdir}/zabbix/
%{__install} -m 0755 conf/sensors.ini $RPM_BUILD_ROOT%{_sysconfdir}/zabbix/
# Install sudo conf
%{__install} -d 750 $RPM_BUILD_ROOT%{_sysconfdir}/sudoers.d
%{__install} -m 600 conf/sudo.conf $RPM_BUILD_ROOT%{_sysconfdir}/sudoers.d/zabbix_agent
# Install SELinux policy
%if ! 0%{?_without_selinux}
%{__install} -d 750 $RPM_BUILD_ROOT%{_datadir}/selinux/packages/%{name}
%{__install} -m644 selinux/%{name}.pp $RPM_BUILD_ROOT%{_datadir}/selinux/packages/%{name}/%{name}.pp
%endif
%clean
%{__rm} -rf $RPM_BUILD_ROOT
%pre
%preun
%post
if [ $1 -eq 2 ] ; then
if [ -e "/etc/zabbix/sensors.conf" ]; then
/var/lib/zabbix/bin/util_convert_sensors_ini /etc/zabbix/sensors.conf
fi
fi
%files
%defattr(-,root,root,-)
%doc README CHANGELOG.git
%doc README
%doc zabbix_templates/*
%dir %attr(0750,zabbix,zabbix) %{_localstatedir}/lib/zabbix/bin
%{_localstatedir}/lib/zabbix/bin/*
%config(noreplace) %attr(0640,root,zabbix) %{_sysconfdir}/zabbix/sensors.conf
%{perl_vendorlib}
%config(noreplace) %attr(0640,root,zabbix) %{_sysconfdir}/zabbix/sensors.ini
%config(noreplace) %attr(0640,root,zabbix) %{_sysconfdir}/zabbix/zabbix_agentd.conf.d/*
%attr(0600,root,root) %{_sysconfdir}/sudoers.d/*
%attr(0440,root,root) %{_sysconfdir}/sudoers.d/*
%if ! 0%{?_without_selinux}
%{_datadir}/selinux/packages/%{name}/%{name}.pp
%endif
%changelog
* Mon Apr 15 2013 Daniel B. <daniel@firewall-services.com> - 0.0.1-1
* Thu Dec 21 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.172-1
- Add Zabbix template for storageDevices (dbd@ehtrace.com)
- Read raw value for SSL_Life_Left (dbd@ehtrace.com)
- Read SSD_Life_Left if available (dbd@ehtrace.com)
- /dev/bus/0 might not exist but can be queried (dbd@ehtrace.com)
- Report more info from some NVMe (dbd@ehtrace.com)
- Adjust default values for stor dev (dbd@ehtrace.com)
- Fix UserParam (dbd@ehtrace.com)
- Add new script for smart monitoring (dbd@ehtrace.com)
* Tue Sep 19 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.171-1
- Ignore samba NT_STATUS_PROTOCOL_UNREACHABLE errors (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.170-1
- Fix + discover NMVe (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.169-1
- Better sensor output parsing (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.168-1
- Drop ipmitool stderr and simplify output parsing (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.167-1
- Fix fan detection on some BMC boards (dbd@ehtrace.com)
- Update ZFS template (dbd@ehtrace.com)
* Sat Mar 26 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.166-1
- Fix counting samba computers auth tries (dbd@ehtrace.com)
* Mon Mar 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.165-1
- last_seen might not be defined in check_unifi (dbd@ehtrace.com)
* Mon Mar 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.164-1
- Use JSON bool in unifi scripts (dbd@ehtrace.com)
* Mon Jan 24 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.163-1
- Fix check_zimbra_sudo (dbd@ehtrace.com)
* Fri Jan 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.162-1
- Add alloc_ct for LVM VG when missing (dbd@ehtrace.com)
* Fri Jan 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.161-1
- Fix Zimbra discovery and check scripts (dbd@ehtrace.com)
* Thu Jan 20 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.160-1
- Add a {#DOCKER_CONTAINER_STATUS} LLD macro (dbd@ehtrace.com)
* Thu Jan 20 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.159-1
- Update Docker template (dbd@ehtrace.com)
- Don't query state in docker discovery (dbd@ehtrace.com)
* Thu Jan 13 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.158-1
- Small fixes for Docker check script and template (dbd@ehtrace.com)
* Thu Jan 13 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.157-1
- Enhacements in the Docker template (dbd@ehtrace.com)
* Wed Jan 12 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.156-1
- Add Docker scripts (dbd@ehtrace.com)
* Tue Jan 11 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.155-1
- Release bump
* Thu Dec 16 2021 Daniel Berteaud <dani@lapiole.org> 0.2.154-1
- Fix zpool iostat as /proc/spl/kstat/zfs/pool/io doesn't exist anymore
(dani@lapiole.org)
- Add nodata triggers for Elasticsearch (daniel@firewall-services.com)
- Include Zabbix template for Elasticsearch (daniel@firewall-services.com)
* Tue Oct 19 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.153-1
- Tweak elasticsearch monitoring scripts (daniel@firewall-services.com)
* Mon Oct 18 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.152-1
- Small fixes in elasticsearch scripts (daniel@firewall-services.com)
* Mon Oct 18 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.151-1
- Add Elasticsearch monitoring scripts (daniel@firewall-services.com)
- Updates and fixes in Zabbix templates (daniel@firewall-services.com)
* Fri Jul 16 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.150-1
- Do not count Unconfigured(good) drives as an error (daniel@firewall-
services.com)
- Remove duplicated templates (daniel@firewall-services.com)
- Typo in template filename (daniel@firewall-services.com)
- Update and provide more templates (daniel@firewall-services.com)
- Update and add more Zabbix templates (daniel@firewall-services.com)
- Remove health and capacity sanoid checks from discovery (daniel@firewall-
services.com)
* Thu May 27 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.149-1
- Support Debian lib path for BackupPC (daniel@firewall-services.com)
* Wed Feb 17 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.148-1
- Fix zfs pool monitoring when a pool has errors (daniel@firewall-services.com)
- Alert only if not samba monitoring for 25min (instead of 15)
(daniel@firewall-services.com)
* Thu Jan 14 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.147-1
- [check_samba_dc_sudo] Fix typo with GPO listing (daniel@firewall-
services.com)
* Thu Jan 14 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.146-1
- [check_samba_sudo] Update default audit log file path, and drop errors from
samba-tool (daniel@firewall-services.com)
* Thu Jan 14 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.145-1
- Add perl(File::ReadBackwards) dependency (daniel@firewall-services.com)
* Thu Jan 14 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.144-1
- Optimize samba audit_auth log parsing by reading from the tail of the file
(daniel@firewall-services.com)
* Wed Jan 13 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.143-1
- Update BackupPC template (daniel@firewall-services.com)
* Wed Jan 13 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.142-1
- Modernize lvm monitoring scripts (daniel@firewall-services.com)
- Don't catch stderr for vgdisplay commands (daniel@firewall-services.com)
* Tue Jan 12 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.141-1
- Small fixes in check_samba_dc (skip unparsable logs, and handle message with
NT_STATUS_NO_SUCH_USER (daniel@firewall-services.com)
* Mon Jan 11 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.140-1
- Add general stats to BackupPC monitoring script (daniel@firewall-
services.com)
* Mon Jan 11 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.139-1
- Add OU discovery to samba monitoring (daniel@firewall-services.com)
* Mon Jan 11 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.138-1
- Add missing Samba application name for aggregated items (daniel@firewall-
services.com)
- Minor fixes for samba script and template (daniel@firewall-services.com)
* Sat Jan 09 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.137-1
- Add scripts and template to monitor Samba 4 DC (daniel@firewall-services.com)
* Fri Jan 08 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.136-1
- Add guest counter for PVE cluster and node (daniel@firewall-services.com)
* Thu Dec 17 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.135-1
- Update Template_App_MySQL (daniel@firewall-services.com)
- Update Template_App_ZFS (daniel@firewall-services.com)
* Tue Dec 01 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.134-1
- Possibility to check certificate for Unifi API (daniel@firewall-services.com)
* Sat Nov 07 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.133-1
- Add perl in BuildReq for el8 (daniel@firewall-services.com)
* Mon Oct 26 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.132-1
- Run upsc commands with 2>/de/null (daniel@firewall-services.com)
- IPMI sensors can have / and - in their name (daniel@firewall-services.com)
* Thu Oct 22 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.131-1
- Don't return garbage in mpath discovery if command failed (daniel@firewall-
services.com)
* Tue Oct 20 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.130-1
- Add App_Multipath template (daniel@firewall-services.com)
- Add Linux_Server template (daniel@firewall-services.com)
* Tue Oct 20 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.129-1
- Add scripts to discover and check multipath devices (daniel@firewall-
services.com)
* Tue Sep 29 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.128-1
- Use MAC of device if no name is defined in Unifi device discovery
(daniel@firewall-services.com)
* Wed Sep 23 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.127-1
- Update scripts to work with ssacli (in adition to hpacucli) (daniel@firewall-
services.com)
* Fri Sep 04 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.126-1
- Add some compatibility for older MySQL servers (daniel@firewall-services.com)
* Tue Sep 01 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.125-1
- Allow empty --defaults opt for check_mysql_sudo (daniel@firewall-
services.com)
* Mon Aug 31 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.124-1
- Update Template_App_MySQL (daniel@firewall-services.com)
* Mon Aug 31 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.123-1
- check_mysql needs sudo permissions (daniel@firewall-services.com)
* Mon Aug 31 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.122-1
- Add MySQL monitoring script and template (daniel@firewall-services.com)
- Add Template_Vhost (daniel@firewall-services.com)
- Add templates for Windows (minimal and server) (daniel@firewall-services.com)
- Add /usr/local/BackupPC/lib as lib dir for BackupPC scripts (daniel@firewall-
services.com)
* Wed May 20 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.121-1
- Do not rely on distrib version to check if --output-format is needed for
check_pve_sudo (daniel@firewall-services.com)
* Fri Apr 03 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.120-1
- Fix mdadm when we have spares (daniel@firewall-services.com)
* Tue Mar 03 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.119-1
- Better detection of smart capable drives (daniel@firewall-services.com)
* Mon Mar 02 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.118-1
- Update Template_App_PVE_Cluster (daniel@firewall-services.com)
* Mon Mar 02 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.117-1
- Add basic SNMP templates (daniel@firewall-services.com)
- Add Template_App_Unifi (daniel@firewall-services.com)
- Add Template_OS_PfSense2 (daniel@firewall-services.com)
- Add Template_Ping (daniel@firewall-services.com)
- Fix cache when the same resource is queried with different options
(daniel@firewall-services.com)
- Remove debug statement in util_populate_pve_cache (daniel@firewall-
services.com)
* Mon Mar 02 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.116-1
- Default to accept cached value up to 5 min old for check_pve_sudo
(daniel@firewall-services.com)
* Mon Mar 02 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.115-1
- Add a script to populate check_pve_sudo cache (daniel@firewall-services.com)
- Enhance check_pve_sudo with a local cache support to speed up monitoring
(daniel@firewall-services.com)
* Tue Feb 25 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.114-1
- Automatic commit of package [zabbix-agent-addons] release [0.2.112-1].
(daniel@firewall-services.com)
- drop stderrr for upsc commands (daniel@firewall-services.com)
* Tue Feb 25 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.113-1
- Skip Core X temp sensors (daniel@firewall-services.com)
* Wed Feb 19 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.112-1
- drop stderrr for upsc commands (daniel@firewall-services.com)
* Mon Feb 17 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.111-1
- Update ZFS and BackupPC templates (daniel@firewall-services.com)
* Mon Feb 10 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.110-1
- Fix a typo in ZabbixSizeTooSmallFactor conf (daniel@firewall-services.com)
* Wed Feb 05 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.109-1
- Don't skip local node in PVE nodes discovery (daniel@firewall-services.com)
* Wed Jan 22 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.108-1
- Only skip RAID volumes checks when in HBA mode, not physical disks checks
(daniel@firewall-services.com)
- Declar variable in the correct scope for hba mode detection (daniel@firewall-
services.com)
- Handle megaraid controlers in HBO/JBOD mode (skip RAID checks)
(daniel@firewall-services.com)
- Use head -1 to be sure to get a single value for sensors (daniel@firewall-
services.com)
* Thu Jan 16 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.107-1
- Add Zabbix template for Squid (daniel@firewall-services.com)
* Thu Jan 16 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.106-1
- Remove uri from UsereParam args for squid (daniel@firewall-services.com)
* Tue Dec 17 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.105-1
- Fix ready sizeNew from last backup (except when link hasn't ran yet)
(daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.104-1
- Disable vfs.dev.discovery in default conf (daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.103-1
- Set min backup size to 0 in template (daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.102-1
- Fix key name for enabled value (daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.101-1
- Init complete JSON objects with default values in bheck_backuppc_sudo
(daniel@firewall-services.com)
- Remove unused variables (daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.100-1
- Only substract $new_size_of_last_full once (daniel@firewall-services.com)
* Fri Dec 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.99-1
- Fix when a host has a single backup with 0 new file size (daniel@firewall-
services.com)
* Fri Dec 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.98-1
- Fix backups total size computation when there's only one full
(daniel@firewall-services.com)
* Fri Dec 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.97-1
- Include Zabbix template to monitor BackupPC (daniel@firewall-services.com)
* Fri Dec 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.96-1
- Enhanced stats for BackupPC's entity (daniel@firewall-services.com)
* Wed Dec 11 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.95-1
- Wait for BackupPC_link to run before we take new sizes in our stat
(daniel@firewall-services.com)
* Wed Dec 11 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.94-1
- Fix BackupPC script when BackuPPC_link is waiting for the nightly cleanup to
finish (daniel@firewall-services.com)
* Fri Nov 29 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.93-1
- Don't use autoloader in our forked Linux::LVM (daniel@firewall-services.com)
- Don't requires Linux::LVM anymore (daniel@firewall-services.com)
- Replace Linux::LVM occurrences with Zabbix::Agent::Addons::LVM
(daniel@firewall-services.com)
- Bundle a fork of Linux::LVM with support for LVM thin pools (daniel@firewall-
services.com)
* Wed Nov 27 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.92-1
- Better compat with 4.4 vfs.dev.discovery (and use lsblk to get the list of
dev if available) (daniel@firewall-services.com)
* Tue Nov 26 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.91-1
- Add DEVNAME macro for vfs.dev.discovery to ease transition to 4.4
(daniel@firewall-services.com)
- Minor update in ZFS template (daniel@firewall-services.com)
* Sun Oct 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.90-1
- Fix some unifi stats for uap/usw in recent unifi versions (daniel@firewall-
services.com)
* Mon Oct 14 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.89-1
- Add Zabbix template for GlusterFS (daniel@firewall-services.com)
- Add Zabbix tempalte for DRBD (daniel@firewall-services.com)
- Add Zabbix template for Proxmox Mail Gateway (daniel@firewall-services.com)
- Add template to monitor a PVE cluster (daniel@firewall-services.com)
- ZFS ARC low hit ratio for data and global are calculated for 1h
(daniel@firewall-services.com)
* Fri Oct 11 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.88-1
- Add Zabbix template for ZFS (daniel@firewall-services.com)
* Fri Oct 11 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.87-1
- Enhance ZFS monitoring scripts to retrieve ARC stats (daniel@firewall-
services.com)
- Send an empty data array when Zimbra is not installed (daniel@firewall-
services.com)
* Tue Oct 01 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.86-1
- Fix pve script when no net or disk stats are available (daniel@firewall-
services.com)
* Sat Sep 21 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.85-1
- Check $sanoidmon is defined before checking its value (daniel@firewall-
services.com)
* Sat Sep 21 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.84-1
- Fix var name in disco_zfs (daniel@firewall-services.com)
* Sat Sep 21 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.83-1
- Better sanoïd monitoring integration (daniel@firewall-services.com)
* Fri Sep 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.82-1
- Remove trailing x for compressratio with ZoL < 0.8 (daniel@firewall-
services.com)
* Fri Sep 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.81-1
- Revert to suffix conversion for ZFS error count (daniel@firewall-
services.com)
* Fri Sep 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.80-1
- Rewrite ZFS monitoring from scratch (daniel@firewall-services.com)
- Set info in the data element for Zimbra discovery (daniel@firewall-
services.com)
* Fri Sep 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.79-1
- Add simple Zabbix service status scripts (daniel@firewall-services.com)
* Tue Sep 03 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.77-1
- Skip self PVE node (daniel@firewall-services.com)
* Tue Jul 30 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.76-1
- Add support for some NVMe temp sensors Found on OVH's Advanced servers for
example (daniel@firewall-services.com)
- Fix when running on Debian buster Which fails with RC 25 when using
File::Spec devnull (daniel@firewall-services.com)
* Tue May 21 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.75-1
- Add basic scripts to monitor VDO volumes (daniel@firewall-services.com)
* Tue Apr 16 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.74-1
- Don't fail if Statistics::Descriptive doesn't support quantile
(daniel@firewall-services.com)
* Mon Apr 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.73-1
- More work on BackupPC's monitoring scripts (daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.72-1
- Fix reporting MaxXferError (daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.71-1
- Fix a typo in check_backuppc_sudo (daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.70-1
- Fix counting entity size (daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.69-1
- Don't count vm as an entity in BackupPC's entities discovery
(daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.68-1
- Update BackupPC's discovery and monitoring scripts (daniel@firewall-
services.com)
* Wed Apr 03 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.67-1
- Add last_errors in backuppc JSON info (daniel@firewall-services.com)
- Update conf for BackupPC (daniel@firewall-services.com)
* Wed Apr 03 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.66-1
- Remove crazy and useless regex to exclude hosts from BackupPC
(daniel@firewall-services.com)
* Wed Apr 03 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.65-1
- Enhance backuppc reporting script Including reporting the new file size, and
sending all the info at once in JSON format (daniel@firewall-services.com)
- Some coding style updates (daniel@firewall-services.com)
- More compact BPCSTATUS (1/0 instead of enabled/disabled) (daniel@firewall-
services.com)
* Wed Feb 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.64-1
- Also report the number in the deferred queue (daniel@firewall-services.com)
* Wed Feb 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.63-1
- Report number of email in the active and hold queues (daniel@firewall-
services.com)
* Sat Jan 19 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.62-1
- Add scripts to ping other hosts (daniel@firewall-services.com)
* Mon Dec 10 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.61-1
- Save cookies to a file so we don't have to login at every invocation GLPI
#34449 (daniel@firewall-services.com)
* Sun Dec 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.60-1
- Print ZBX_NOTSUPPORTED in case of API error Prevent tons of error messages in
Zabbix Server's logs (daniel@firewall-services.com)
* Sun Dec 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.59-1
- Fix ZBX_NOTSUPPORTED string in several scripts (daniel@firewall-services.com)
* Thu Nov 15 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.57-0.beta1
- Add enhanced squid monitoring support (daniel@firewall-services.com)
* Fri Nov 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.56-1
- Add simple script for nginx (similar httpd) (daniel@firewall-services.com)
* Fri Oct 26 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.55-1
- Fix PVE storage monitoring GLPI #33910 (daniel@firewall-services.com)
* Wed Oct 24 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.54-1
- Rework PMG monitoring scripts (daniel@firewall-services.com)
* Thu Oct 18 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.52-0.beta1
- Add very basic script for PMG monitoring (daniel@firewall-services.com)
* Tue Sep 18 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.51-1
- check_unifi: also output satisfaction for stations (daniel@firewall-
services.com)
* Mon Sep 17 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.50-1
- Fix comparison with uninitialized value in check_unifi (daniel@firewall-
services.com)
* Sat Sep 15 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.49-1
- Report number of unarchived alarms in check_unifi --unifi (daniel@firewall-
services.com)
* Sat Sep 15 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.48-1
- More fixes for AP monitoring in check_unifi (daniel@firewall-services.com)
* Sat Sep 15 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.47-1
- Several fixes in check_unifi (daniel@firewall-services.com)
* Fri Sep 14 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.46-1
- Enhance Unifi discovery and monitoring Adding support for station monitoring
(daniel@firewall-services.com)
* Thu Sep 13 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.45-0.beta2
- Fix check_unifi when value is defined but false (daniel@firewall-
services.com)
* Thu Sep 13 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.44-0.beta1
- Add scripts to monitor Unifi sites (daniel@firewall-services.com)
* Tue Aug 21 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.43-1
- Fix PVE scripts to Work with new pvesh version (daniel@firewall-services.com)
* Mon Jul 23 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.42-1
- Initialize an empty json object (daniel@firewall-services.com)
* Mon Jul 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.41-1
- Don't log sudo usage for Zabbix (daniel@firewall-services.com)
* Wed Jul 04 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.40-1
- Fix ZFS pool stats retrieval (daniel@firewall-services.com)
* Wed Jun 13 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.39-1
- Fix computing pool CPU usage in check_pve (daniel@firewall-services.com)
* Thu Jun 07 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.38-1
- Add global net and disk stats for the cluster in check_pve_sudo
(daniel@firewall-services.com)
* Tue Jun 05 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.37-1
- Fix check_pve_sudo for single node monitoring (daniel@firewall-services.com)
* Tue Jun 05 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.36-1
- Remove redundant condition (daniel@firewall-services.com)
- Fix {#PVE_STOR_STATUS} macro (daniel@firewall-services.com)
- Only gather info about online nodes (daniel@firewall-services.com)
- Add some global cluster stats for PVE (daniel@firewall-services.com)
* Sun Jun 03 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.35-1
- Enhance PVE scripts and conf (daniel@firewall-services.com)
- Add basic scripts for PVE monitoring (daniel@firewall-services.com)
* Wed May 30 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.34-1
- Add stats for ZFS zpools (daniel@firewall-services.com)
* Tue May 29 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.33-1
- Ensure we always return a value for scan action status errors in check_zfs
(daniel@firewall-services.com)
* Tue May 29 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.32-1
- Handle situations where there's more than 1000 errors on a item in ZFS pools
(daniel@firewall-services.com)
* Tue May 29 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.31-1
- Various enhancements in check_zfs (daniel@firewall-services.com)
- Fix macro name for zfs zpool discovery (daniel@firewall-services.com)
* Mon May 28 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.30-1
- Rename vfs.zfs.discovery to vfs.zfs.zpool.discovery So later we'll be able to
add other discovery rules for say, datasets (daniel@firewall-services.com)
* Mon May 28 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.29-1
- Add scripts to discover and check ZFS zpools (daniel@firewall-services.com)
* Tue Mar 06 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.28-1
- Use "all" key to get all httpd stats in JSON format (daniel@firewall-
services.com)
* Tue Mar 06 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.27-1
- Respond with all stats as a JSON structure if no --what given
(daniel@firewall-services.com)
* Tue Mar 06 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.26-1
- Support space in httpd status key So total_accesses and total_kbytes are
available again (daniel@firewall-services.com)
* Tue Feb 06 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.25-1
- Fix mdadm RAID discovery condition (daniel@firewall-services.com)
* Tue Jan 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.24-1
- Don't WARN when device is being checked, only when it's rebuilding
(daniel@firewall-services.com)
- Don't detect mdadm RAID in containers (daniel@firewall-services.com)
* Thu Nov 30 2017 Daniel Berteaud <daniel@firewall-services.com> 0.2.23-1
- Check line format in check_httpd Instead of spliting errors in case server-
status redirect to somewhere else (daniel@firewall-services.com)
* Mon Nov 20 2017 Daniel Berteaud <daniel@firewall-services.com> 0.2.22-1
- Add script to monitor spamassassin's bayes database stats (daniel@firewall-
services.com)
- Symlink releasrs.conf to global's one (daniel@firewall-services.com)
* Tue Nov 14 2017 Daniel Berteaud <daniel@firewall-services.com> 0.2.21-1
- Remove now non existing CHANGELOG.git file (daniel@firewall-services.com)
* Tue Nov 14 2017 Daniel Berteaud <daniel@firewall-services.com> 0.2.20-1
- new package built with tito
* Thu Oct 12 2017 Daniel Berteaud <daniel@firewall-services.com> - 0.2.19-1
- Correctly handle Partially Degraded state
* Thu Aug 24 2017 Daniel Berteaud <daniel@firewall-services.com> - 0.2.18-1
- Only include SELinux policy module on el7
* Wed Aug 23 2017 Daniel Berteaud <daniel@firewall-services.com> - 0.2.17-1
- Add a SELinux policy module
* Wed Jun 14 2017 Daniel Berteaud <daniel@firewall-services.com> - 0.2.16-1
- Add kernel.openedfile UserParameter
* Thu Nov 24 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.15-1
- Fix discovery scripts to always return a valid JSON value, even if empty
(sensors, lvm and nut_ups)
* Wed Nov 9 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.14-1
- Add scripts to monitor apache httpd
* Sun Oct 30 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.13-1
- Fix handling Airflow_Temperature_Cel label
* Fri Oct 28 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.12-1
- Support Airflow_Temperature_Cel as temp label for smartctl based sensors
* Thu Sep 1 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.11-1
- Add support for lm_sensors based sensors
* Thu Aug 25 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.10-1
- Add monitoring item for squid's FD
* Wed Apr 6 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.9-1
- Detect HDD temp sensors on sat+megaraid controllers
* Mon Mar 21 2016 Daniel B. <daniel@firewall-services.com> - 0.2.8-1
- Prevent running several gluster check commands at the same time
* Wed Sep 16 2015 Daniel B. <daniel@firewall-services.com> - 0.2.7-1
- Prevent GlusterFS heal false positive due to concurrent locking
* Mon Sep 14 2015 Daniel B. <daniel@firewall-services.com> - 0.2.6-1
- Add script to discover and monitor DRBD resources
* Wed Sep 9 2015 Daniel B. <daniel@firewall-services.com> - 0.2.5-1
- Support negative values for temp sensors
* Mon Jul 27 2015 Daniel B. <daniel@firewall-services.com> - 0.2.4-1
- Several enhancements in sensors ini generator
* Fri Jul 24 2015 Daniel B. <daniel@firewall-services.com> - 0.2.3-1
- Separate UPS default threshold
- Minor coding style updates
* Mon Jul 20 2015 Daniel B. <daniel@firewall-services.com> - 0.2.2-1
- Start working on perl libs to reduce code duplication
- Detect nut UPS temp sensors
* Fri Jul 10 2015 Daniel B. <daniel@firewall-services.com> - 0.2.1-1
- Fix GlusterFS brick count on 3.7.x
* Fri Jul 10 2015 Daniel B. <daniel@firewall-services.com> - 0.2.0-1
- Migrate sensors config to an ini format
- Add a generator script which detects available sensors
* Tue Jul 7 2015 Daniel B. <daniel@firewall-services.com> - 0.1.27-1
- Support different sensors types
* Thu Jun 4 2015 Daniel B. <daniel@firewall-services.com> - 0.1.26-1
- Alert if a self heal is in progress on a glusterfs vol
* Thu Jun 4 2015 Daniel B. <daniel@firewall-services.com> - 0.1.25-1
- Fix gluster checks if info heal-failed is not supported
* Wed Apr 15 2015 Daniel B. <daniel@firewall-services.com> - 0.1.24-1
- Report a warning if a RAID array is resyncing
* Tue Feb 10 2015 Daniel B. <daniel@firewall-services.com> - 0.1.23-1
- Fix disco_filesystem to output valid JSON
* Thu Jan 8 2015 Daniel B. <daniel@firewall-services.com> - 0.1.22-1
- Fix check_qmail_sudo
* Mon Jan 5 2015 Daniel B. <daniel@firewall-services.com> - 0.1.21-1
- Add scripts to check qmail (requires qmqtool)
* Fri Nov 7 2014 Daniel B. <daniel@firewall-services.com> - 0.1.20-1
- discover LVM thin pools
- report LVM thin pools allocation
* Sun Sep 14 2014 Daniel B. <daniel@firewall-services.com> - 0.1.19-1
- Adapt squidclient commands to work with squid 3.1
* Wed Jul 16 2014 Daniel B. <daniel@firewall-services.com> - 0.1.18-1
- Add simple discovery and status check for GlusterFS
* Thu Jul 10 2014 Daniel B. <daniel@firewall-services.com> - 0.1.17-1
- Add discovery for MegaRAID controllers
* Wed Jul 9 2014 Daniel B. <daniel@firewall-services.com> - 0.1.16-1
- Add discovery script for mdadm based RAID devices
* Tue May 6 2014 Daniel B. <daniel@firewall-services.com> - 0.1.15-1
- Add a simple script to check nmb lookups
* Wed Feb 19 2014 Daniel B. <daniel@firewall-services.com> - 0.1.14-1
- remove scripts to discover and monitor certificates, they are too specific
and are now in smeserver-zabbix-agent
* Tue Feb 18 2014 Daniel B. <daniel@firewall-services.com> - 0.1.13-1
- Move phpki conf to the correct location
* Tue Feb 18 2014 Daniel B. <daniel@firewall-services.com> - 0.1.12-1
- Add scripts to discover and monitor certificates (design to work with PHPki)
* Fri Nov 29 2013 Daniel B. <daniel@firewall-services.com> - 0.1.11-1
- Possibility to disable hosts monitoring in BackupPC by adding
$Conf{ZabbixMonitoring} = 0 in the conf file
* Mon Oct 28 2013 Daniel B. <daniel@firewall-services.com> - 0.1.10-1
- Do not skip removable devices in disco_block_device
* Tue Oct 1 2013 Daniel B. <daniel@firewall-services.com> - 0.1.9-1
- Fix macros names in disco_raid_hp_sudo script
* Tue Oct 1 2013 Daniel B. <daniel@firewall-services.com> - 0.1.8-1
- Add simple scripts to monitor HP Smart Arrays
* Tue Apr 23 2013 Daniel B. <daniel@firewall-services.com> - 0.1.7-1
- Initialize an empty array in disco_backuppc_sudo
- Return more usefull macros in disco_backuppc_sudo
- Skip some blocks (loop, ram, dm) in disco_smart_sudo
* Mon Apr 22 2013 Daniel B. <daniel@firewall-services.com> - 0.1.6-1
- Fix permissions on sudoers fragment
- Use full path to smartctl binary
* Mon Apr 22 2013 Daniel B. <daniel@firewall-services.com> - 0.1.5-1
- Rewrite disco_smart_sudo in perl
* Thu Apr 18 2013 Daniel B. <daniel@firewall-services.com> - 0.1.4-1
- Possibility to pass a (base64 encoded) regex for backuppc hosts discovery
- Add nut ups scripts
- Fix lvm discovery on some systems
* Thu Apr 18 2013 Daniel B. <daniel@firewall-services.com> - 0.1.3-1
- Comment the manual net.if.discovery key
* Thu Apr 18 2013 Daniel B. <daniel@firewall-services.com> - 0.1.2-1
- Add network interface discovery scripts
- do not prepend /dev to block devices (not supported on older Zabbix agent)
* Wed Apr 17 2013 Daniel B. <daniel@firewall-services.com> - 0.1.1-1
- Fix a typo in smart.conf
* Wed Apr 17 2013 Daniel B. <daniel@firewall-services.com> - 0.1.0-1
- Initial release

View File

@ -1,13 +1,24 @@
# Discovery of configured host
# Key: backuppc.host.discovery
# Macro: {#BPCSTATUS}
# Filter regex: enabled => true
# Filter regex: enabled|1 => true
# Other available macros:
# {#BPCPERIOD}: Max age (in day) the oldest backup should be
# {#BPCHOST}: name of the backup host
UserParameter=backuppc.host.discovery,/usr/bin/sudo /var/lib/zabbix/bin/disco_backuppc_sudo
UserParameter=backuppc.host.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_backuppc_sudo --hosts
UserParameter=backuppc.entity.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_backuppc_sudo --entities
# Item prototypes
# key: backuppc.host.info[{#BPCHOST},item]
# Valide item are: errors, max_errors, size, duration, age, notify
UserParameter=backuppc.host.info[*],/usr/bin/sudo /var/lib/zabbix/bin/check_backuppc_sudo $1 $2
# key: backuppc.host[{#BPCHOST}]
# or
# key: backuppc.entity[{#BPC_ENTITY}]
# Returns a JSON object, use dependent item to split it
UserParameter=backuppc.host[*],/usr/bin/sudo /var/lib/zabbix/bin/check_backuppc_sudo --host=$1
UserParameter=backuppc.entity[*],/usr/bin/sudo /var/lib/zabbix/bin/check_backuppc_sudo --entity=$1
# key: backuppc.host[{#BPCHOST}]
# or
# key: backuppc.general
# Same as entity checks for will process every hosts
# Returns a JSON object, use dependent item to split it
UserParameter=backuppc.general,/usr/bin/sudo /var/lib/zabbix/bin/check_backuppc_sudo --general

View File

@ -1,2 +1,3 @@
# Discover block devices
UserParameter=vfs.dev.discovery,/var/lib/zabbix/bin/disco_block_devices
# For Zabbix AGent < 4.4 you can uncomment this
#UserParameter=vfs.dev.discovery,/var/lib/zabbix/bin/disco_block_devices

9
zabbix_conf/docker.conf Normal file
View File

@ -0,0 +1,9 @@
# Discover Docker items
# $1 can be containers, networks, volumes
UserParameter=container.docker.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_docker_sudo --what $1
# Type: Agent or Agent (active)
# container.docker.check.all[type,id]
# Where type is what to monitor (global, container, network, volume)
# id is the id of the item to monitor. Can be a name or an ID. For the global check, there's no ID
UserParameter=container.docker.check[*],/usr/bin/sudo /var/lib/zabbix/bin/check_docker_sudo --$1 $2

6
zabbix_conf/drbd.conf Normal file
View File

@ -0,0 +1,6 @@
# Discover DRBD resources
UserParameter=drbd.resource.discovery[*],/var/lib/zabbix/bin/disco_drbd
# DRBD status
UserParameter=drbd.resource.status[*],/var/lib/zabbix/bin/check_drbd --resource=$1 --what=$2

View File

@ -0,0 +1,2 @@
UserParameter=elasticsearch.discovery[*],/var/lib/zabbix/bin/disco_elasticsearch --url=$1 --user=$2 --pass=$3 --$4
UserParameter=elasticsearch.check[*],/var/lib/zabbix/bin/check_elasticsearch --url=$1 --user=$2 --pass=$3 --$4 $5

View File

@ -0,0 +1 @@
UserParameter=kernel.openedfiles,cat /proc/sys/fs/file-nr | awk '{print $1}'

8
zabbix_conf/gluster.conf Normal file
View File

@ -0,0 +1,8 @@
# Discover GLusterFS volume or peers, based on the argument
UserParameter=gluster.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_gluster_sudo --what=$1
# Check GlusterFS volume or peer status
UserParameter=gluster.volume.status[*],/usr/bin/sudo /var/lib/zabbix/bin/check_gluster_sudo --what=volume --volume=$1 --bricks=$2
UserParameter=gluster.peer.status[*],/usr/bin/sudo /var/lib/zabbix/bin/check_gluster_sudo --what=peer --peer=$1

6
zabbix_conf/httpd.conf Normal file
View File

@ -0,0 +1,6 @@
# Discover if an httpd instance is running and has mod_status available on http://127.0.0.1/server-status
# Just return {#HTTPD_STATUS_AVAILABLE} => 'yes' if found
UserParameter=httpd.discovery,/var/lib/zabbix/bin/disco_httpd
# Stats to get
UserParameter=httpd[*],/var/lib/zabbix/bin/check_httpd --uri $1 --what $2

6
zabbix_conf/icmp.conf Normal file
View File

@ -0,0 +1,6 @@
# net.icmp takes two args. The host to ping (either an IP or a host name), and one of
# * all: returns info in JSON format
# * latency: returns latency in seconds. Floating number
# * respond: returns 0 if no response where received, 1 otherwise
# * loss: returns % of packet loss. Floating number
UserParameter=net.icmp[*],/usr/bin/sudo /var/lib/zabbix/bin/check_icmp_sudo $1 --info=$2

View File

@ -5,4 +5,5 @@ UserParameter=vfs.lvm.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_lvm_s
# Type: Agent or Agent (active)
# Key: vfs.lvm.lv[volume,key] where volume is the path to a logical volume and
# key can be size, allocation (for snapshots only) or status
UserParameter=vfs.lvm.lv[*],/usr/bin/sudo /var/lib/zabbix/bin/check_lvm_sudo $1 $2
UserParameter=vfs.lvm.lv[*],/usr/bin/sudo /var/lib/zabbix/bin/check_lvm_sudo --lv='$1' --what='$2'
UserParameter=vfs.lvm.vg[*],/usr/bin/sudo /var/lib/zabbix/bin/check_lvm_sudo --vg='$1' --what='$2'

6
zabbix_conf/mpath.conf Normal file
View File

@ -0,0 +1,6 @@
# Discover multipath devices
# $1 not used for now
UserParameter=vfs.mpath.discovery[*],sudo /var/lib/zabbix/bin/disco_mpath_sudo --$1
# Check multipath device
UserParameter=vfs.mpath.info[*],sudo /var/lib/zabbix/bin/check_mpath_sudo --mpath=$1

7
zabbix_conf/mysql.conf Normal file
View File

@ -0,0 +1,7 @@
# Type: Agent or Agent (active)
# You can also get all the stats in a json object by passing 'all',
# or retreive the value of a specific key
# Run the script without --what to get a list of available keys
UserParameter=db.mysql[*],sudo /var/lib/zabbix/bin/check_mysql_sudo --host '$1' --port '$2' --user '$3' --password '$4' --defaults '$5' --what '$6'

View File

@ -0,0 +1,2 @@
# Can replace the builtin net.if.discovery on agent < 2.0.0
#UserParameter=net.if.discovery,/var/lib/zabbix/bin/disco_net_interface

5
zabbix_conf/nginx.conf Normal file
View File

@ -0,0 +1,5 @@
# Discover if an nginx instance is running and has status handler running on http://localhost/nginx-status
UserParameter=nginx.discovery,/var/lib/zabbix/bin/disco_nginx
# Stats to get
UserParameter=nginx.status[*],/var/lib/zabbix/bin/check_nginx --uri $1 --what $2

View File

@ -7,7 +7,7 @@
# Units: %
# Multiplier: Do not use
# Store Value: As is
UserParameter=ups.load[*],upsc $1 ups.load
UserParameter=ups.load[*],upsc $1 ups.load 2>/dev/null
# Description: Nut UPS Battery Charge
# Type: Agent or Agent (active)
@ -16,17 +16,23 @@ UserParameter=ups.load[*],upsc $1 ups.load
# Units: %
# Multiplier: Do not use
# Store Value: As is
UserParameter=ups.battery.charge[*],upsc $1 battery.charge
UserParameter=ups.battery.charge[*],upsc $1 battery.charge 2>/dev/null
# Description: Nut UPS Status
# Type: Agent or Agent (active)
# Key: ups.status[UPS]
# Type of information: Character
# Show Value: As is (you can also define a dictionnary OL=>On Line etc...)
UserParameter=ups.status[*],upsc $1 ups.status
UserParameter=ups.status[*],upsc $1 ups.status 2>/dev/null
# Description: Nut UPS Model
# Type: Agent or Agent (active)
# Key: ups.model[UPS]
# Type of information: Text
UserParameter=ups.model[*],upsc $1 ups.model
UserParameter=ups.model[*],upsc $1 ups.model 2>/dev/null
# UPS discovery
UserParameter=hardware.ups.discovery[*],/var/lib/zabbix/bin/disco_nut_ups $1
# This is a new, more generic nut ups UserParameter
UserParameter=hardware.ups[*],upsc $1 $2 2>/dev/null

9
zabbix_conf/pmg.conf Normal file
View File

@ -0,0 +1,9 @@
# Discover PMG items
# $1 can be domains
UserParameter=pmg.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_pmg_sudo --what $1
# Type: Agent or Agent (active)
# pmg.check.all[type,id]
# Where type is what to monitor (domain)
# id is the id of the item to monitor. Eg, the domain name
UserParameter=pmg.check.all[*],/usr/bin/sudo /var/lib/zabbix/bin/check_pmg_sudo --timespan=$1 --spamthres=$2 --$3 $4

10
zabbix_conf/pve.conf Normal file
View File

@ -0,0 +1,10 @@
# Discover PVE items
# $1 can be nodes, guests, storage or pools
UserParameter=pve.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_pve_sudo --what $1
# Type: Agent or Agent (active)
# pve.check.all[type,id]
# Where type is what to monitor (pool, storage, node, cluster, guest)
# id is the id of the item to monitor. For a guest, it's his numerical ID, for a storage
# a node or a pool, its name. For the cluster, there's no ID
UserParameter=pve.check.all[*],/usr/bin/sudo /var/lib/zabbix/bin/check_pve_sudo --cache=300 --$1 $2

2
zabbix_conf/qmail.conf Normal file
View File

@ -0,0 +1,2 @@
# Qmail checks
UserParameter=qmail[*],/usr/bin/sudo /var/lib/zabbix/bin/check_qmail_sudo $1

17
zabbix_conf/raid_hp.conf Normal file
View File

@ -0,0 +1,17 @@
# Description: HP Smart Array status
# Type: Agent or Agent (active)
# Key: raid.hp.status
# Type of Information: Character
# Show Value: As is
# The value reported is like:
# OK
# If an error is found, the output will be:
# CRITICAL: <line with the first error>
# You can add a simple trigger on this check like:
# { hostname:raid.hp.status.str( OK ) }=0
UserParameter=raid.hp.status[*],/usr/bin/sudo /var/lib/zabbix/bin/check_raid_hp_sudo --slot=$1
# This is a discovery rule to find which slot are used
UserParameter=raid.hp.discovery,/usr/bin/sudo /var/lib/zabbix/bin/disco_raid_hp_sudo

View File

@ -9,4 +9,7 @@
# You can add a simple trigger on this check like:
# { hostname:raid.sw.status.str( OK ) }=0
UserParameter=raid.sw.status,/var/lib/zabbix/bin/check_raid_mdadm
UserParameter=raid.sw.status[*],/var/lib/zabbix/bin/check_raid_mdadm --device=$1
# This is a discovery rule to find configured RAID devices
UserParameter=raid.sw.discovery,/var/lib/zabbix/bin/disco_raid_mdadm

View File

@ -9,4 +9,7 @@
# You can add a simple trigger on this check like:
# { hostname:raid.mega.status.str( OK ) }=0
UserParameter=raid.mega.status,/usr/bin/sudo /var/lib/zabbix/bin/check_raid_megaraid_sudo
UserParameter=raid.mega.status[*],/usr/bin/sudo /var/lib/zabbix/bin/check_raid_megaraid_sudo -s $1 -o $2 -m $3
# Discover if there's a controller to check
UserParameter=raid.mega.discovery,/usr/bin/sudo /var/lib/zabbix/bin/disco_raid_megaraid_sudo

13
zabbix_conf/sa_learn.conf Normal file
View File

@ -0,0 +1,13 @@
# Description: SA Learn statistics
# Type: Agent or Agent (active)
# Key: mail.bayes
# Type of information: Text
# Units: N/A
# Custom multiplier: Do not use
# Store Value: As is
# This is a master item which must then be splited with preprocessing (Zabbix >= 3.4.0)
UserParameter=mail.bayes.all,/usr/bin/sudo /var/lib/zabbix/bin/check_sa_learn_sudo
# Or you can use individual items. Valid arg are ham, spam and token
UserParameter=mail.bayes[*],/usr/bin/sudo /var/lib/zabbix/bin/check_sa_learn_sudo --what=$1

2
zabbix_conf/samba.conf Normal file
View File

@ -0,0 +1,2 @@
# Check if nmbd is available and working
UserParameter=samba.nmbd.status,/var/lib/zabbix/bin/check_nmblookup --server=127.0.0.1 --host=linuxsrv

View File

@ -0,0 +1,7 @@
UserParameter=samba_dc.discovery[*],sudo /var/lib/zabbix/bin/disco_samba_dc_sudo --what='$1'
# Create a text item with key samba_dc.info[300] and a check interval of 300
# Then use dependent item to get individual counters
UserParameter=samba_dc.info[*],sudo /var/lib/zabbix/bin/check_samba_dc_sudo --since='$1'
# Create a text item with key samba_dc.ou[{#SAMBA_OU}], then use dependant items with JSONPath to get individual info
UserParameter=samba_dc.ou[*],sudo /var/lib/zabbix/bin/check_samba_dc_sudo --ou='$1'

View File

@ -1,6 +1,6 @@
# Sensors discovery
# See /etc/zabbix/sensors.conf
UserParameter=hardware.sensor.discovery,/var/lib/zabbix/bin/disco_sensors
UserParameter=hardware.sensor.discovery[*],/var/lib/zabbix/bin/disco_sensors --type=$1
# Sensors
UserParameter=hardware.sensor[*],/usr/bin/sudo /var/lib/zabbix/bin/check_sensors_sudo $1 $2

View File

@ -1,7 +1,11 @@
# Discover S.M.A.R.T. capable hard drives
# The only macro returned is {#SMARTDRIVE} and value is like /dev/sda
UserParameter=hardward.disk.smart.discovery,/usr/bin/sudo /var/lib/zabbix/bin/disco_smart_sudo
UserParameter=hardware.disk.smart.discovery,/usr/bin/sudo /var/lib/zabbix/bin/disco_smart_sudo
# Takes two args: the drives to check, and the value to get
# eg: hardward.disk.smart[/dev/sda,Reallocated_Sector_Ct]
UserParameter=hardward.disk.smart[*],/usr/bin/sudo /var/lib/zabbix/bin/check_smart_sudo $1 $2
UserParameter=hardware.disk.smart[*],/usr/bin/sudo /var/lib/zabbix/bin/check_smart_sudo $1 $2
# New smart disk discovery/monitoring
UserParameter=stor.dev.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_stor_dev_sudo
UserParameter=stor.dev.info[*],/usr/bin/sudo /var/lib/zabbix/bin/check_stor_dev_sudo --dev "$1" --type "$2"

View File

@ -1,5 +1,16 @@
# Squid
# Discover if a squid instance is running and has status handler running on http://127.0.0.1:3128/squid-internal-mgr/info
UserParameter=squid.discovery[*],/var/lib/zabbix/bin/disco_squid
# Stats to get
UserParameter=squid.check[*],/var/lib/zabbix/bin/check_squid --what $1
################################################
## LEGACY SQUID ITEMS
################################################
# Description: Squid Request Hit Ratio
# Type: Agent or Agent (active)
# Key: squid.request_hit_ratio
@ -8,7 +19,7 @@
# Custom multiplier: Do not use
# Store Value: As is
UserParameter=squid.request_hit_ratio,squidclient mgr:info|grep 'Request Hit Ratios:'|cut -d':' -f3|cut -d',' -f1|tr -d ' %'
UserParameter=squid.request_hit_ratio,squidclient mgr:info|grep -P '(Hits as % of all requests)|(Request Hit Ratios):'|cut -d':' -f3|cut -d',' -f1|tr -d ' %'
# Description: Squid Byte Hit Ratio
# Type: Agent or Agent (active)
@ -18,7 +29,7 @@ UserParameter=squid.request_hit_ratio,squidclient mgr:info|grep 'Request Hit Rat
# Custom multiplier: Do not use
# Store Value: As is
UserParameter=squid.byte_hit_ratio,squidclient mgr:info|grep 'Byte Hit Ratios:'|cut -d':' -f3|cut -d',' -f1|tr -d ' %'
UserParameter=squid.byte_hit_ratio,squidclient mgr:info|grep -P '(Hits as % of bytes sent)|(Byte Hit Ratios):'|cut -d':' -f3|cut -d',' -f1|tr -d ' %'
# Description: Squid Average HTTP request per minute
# Type: Agent or Agent (active)
@ -50,3 +61,33 @@ UserParameter=squid.cache_size_disk,squidclient mgr:info|grep 'Storage Swap size
UserParameter=squid.cache_size_mem,squidclient mgr:info|grep 'Storage Mem size:' | awk '{print $4}'
# Description: Squid FD limit
# Type: Agent or Agent (active)
# Key: squid.fd_max
# Type of information: Numeric (integer 64bits)
# Units: N/A
# Custom multiplier: Do not use
# Store Value: As is
UserParameter=squid.max_fd,squidclient mgr:info | grep 'Maximum number of file descriptors' | cut -d':' -f2 | tr -d ' \t'
# Description: Squid reserved FD
# Type: Agent or Agent (active)
# Key: squid.fd_reserved
# Type of information: Numeric (integer 64bits)
# Units: N/A
# Custom multiplier: Do not use
# Store Value: As is
UserParameter=squid.reserved_fd,squidclient mgr:info | grep 'Reserved number of file descriptors' | cut -d':' -f2 | tr -d ' \t'
# Description: Squid available FD
# Type: Agent or Agent (active)
# Key: squid.fd_available
# Type of information: Numeric (integer 64bits)
# Units: N/A
# Custom multiplier: Do not use
# Store Value: As is
UserParameter=squid.available_fd,squidclient mgr:info | grep 'Available number of file descriptors' | cut -d':' -f2 | tr -d ' \t'

2
zabbix_conf/unifi.conf Normal file
View File

@ -0,0 +1,2 @@
UserParameter=unifi.discovery[*],/var/lib/zabbix/bin/disco_unifi --url=$1 --user=$2 --pass=$3 --site=$4 --what=$5 --type=$6
UserParameter=unifi.check.all[*],/var/lib/zabbix/bin/check_unifi --url=$1 --user=$2 --pass=$3 --site=$4 --$5 $6

12
zabbix_conf/vdo.conf Normal file
View File

@ -0,0 +1,12 @@
# Discover VDO volumes
# $1 not used for now
UserParameter=vfs.vdo.discovery[*],/var/lib/zabbix/bin/disco_vdo_sudo --what=$1
# Type: Agent or Agent (active)
# Key: vfs.vdo.vol[volume,item] where volume is the name of the volume to monitor
# item can be one of the valid keys (run manually without --value arg to see available keys)
UserParameter=vfs.vdo.vol[*],sudo /var/lib/zabbix/bin/check_vdo_sudo --volume=$1 --value=$2
# Type: Agent or Agent (active)
# You can also get all the info about a vdo volume at once, in JSON
UserParameter=vfs.vdo.vol.all[*],sudo /var/lib/zabbix/bin/check_vdo_sudo --volume=$1

19
zabbix_conf/zfs.conf Normal file
View File

@ -0,0 +1,19 @@
# Discover ZFS zpools
# $1 not used for now
UserParameter=vfs.zfs.discovery[*],/var/lib/zabbix/bin/disco_zfs --$1
# Type: Agent or Agent (active)
# You can also get all the info about a zpool at once, in JSON
UserParameter=vfs.zfs.zpool.info[*],/var/lib/zabbix/bin/check_zfs --zpool=$1
# Type: Agent or Agent (active)
# FS, Zvol or Snap info in JSON
UserParameter=vfs.zfs.dataset.info[*],/var/lib/zabbix/bin/check_zfs --dataset=$1
# Type: Agent or Agent (active)
# Sanoïd snapshot monitoring
UserParameter=vfs.zfs.sanoid.check[*],/var/lib/zabbix/bin/check_zfs --sanoid=$1
# Type: Agent or Agent (active)
# ARC stats
UserParameter=vfs.zfs.stats.all[*],/var/lib/zabbix/bin/check_zfs --stats=$1

11
zabbix_conf/zimbra.conf Normal file
View File

@ -0,0 +1,11 @@
# Discovery of configured host
# Key: zimbra.discovery[services] or zimbra.discovery[servers]
# Macro: {#ZM_SERVICE}
# Other available macros:
UserParameter=zimbra.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_zimbra_sudo --$1
# Item prototypes
# key: zimbra.check[*]
# or
# Returns a JSON object, use dependent item to split it
UserParameter=zimbra.status[*],/usr/bin/sudo /var/lib/zabbix/bin/check_zimbra_sudo --status=$1

View File

@ -1,10 +1,27 @@
#!/usr/bin/perl
use lib "/usr/share/BackupPC/lib";
use lib "/usr/share/backuppc/lib";
use lib "/usr/local/BackupPC/lib";
use BackupPC::Lib;
use BackupPC::CGI::Lib;
use POSIX;
use JSON;
use Getopt::Long;
use Statistics::Descriptive;
use Data::Dumper;
my $general = 0;
my $host = undef;
my $entity = undef;
my $pretty = 0;
GetOptions(
"general" => \$general,
"host=s" => \$host,
"entity=s" => \$entity,
"pretty" => \$pretty
);
# We need to switch to backuppc UID/GID
my $uid = getuid();
@ -13,73 +30,157 @@ my (undef,undef,$bkpuid,$bkpgid) = getpwnam('backuppc');
setuid($bkpuid) if ($uid ne $bkpuid);
setgid($bkpgid) if ($gid ne $bkpgid);
my $host = $ARGV[0];
my $what = $ARGV[1];
my $bpc = BackupPC::Lib->new();
my @backups = $bpc->BackupInfoRead($host);
my $mainConf = $bpc->ConfigDataRead();
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $fullCnt = $incrCnt = 0;
my $fullAge = $incrAge = $lastAge = -1;
my $lastXferErrors = 0;
my $maxErrors = 0;
my $json = {};
for ( my $i = 0 ; $i < @backups ; $i++ ) {
if ( $backups[$i]{type} eq "full" ) {
$fullCnt++;
if ( $fullAge < 0 || $backups[$i]{startTime} > $fullAge ) {
$fullAge = $backups[$i]{startTime};
$fullSize = $backups[$i]{size};
$fullDur = $backups[$i]{endTime} - $backups[$i]{startTime};
}
}
else {
$incrCnt++;
if ( $incrAge < 0 || $backups[$i]{startTime} > $incrAge ) {
$incrAge = $backups[$i]{startTime};
}
}
}
if ( $fullAge > $incrAge && $fullAge >= 0 ) {
$lastAge = $fullAge;
}
else {
$lastAge = $incrAge;
}
if ( $lastAge < 0 ) {
$lastAge = "";
}
else {
$lastAge = sprintf("%.1f", (time - $lastAge) / (24 * 3600));
}
$lastXferErrors = $backups[@backups-1]{xferErrs} if ( @backups );
$maxErrors = $conf->{MaxXferError} if (defined $conf->{MaxXferError});
if ( $host ) {
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
$json = {
bkp => 0,
full_size => 0,
total_size => 0,
history_size => 0,
errors => 0,
new_size => 0,
new_size_avg => 0,
new_size_median => 0,
new_size_q1 => 0,
new_size_q3 => 0,
duration => 0,
comp_ratio => 0,
enabled => 0,
max_errors => 0,
age => 0,
type => 'none'
};
if ($what eq 'errors'){
print $lastXferErrors;
}
elsif ($what eq 'max_errors'){
print $maxErrors;
}
elsif ($what eq 'age'){
print $lastAge;
}
elsif ($what eq 'size'){
print $fullSize;
}
elsif ($what eq 'duration'){
print $fullDur;
}
elsif ($what eq 'notify'){
print $conf->{EMailNotifyOldBackupDays};
}
else{
my $new_size_of_last_full = 0;
my @bpc_info = $bpc->BackupInfoRead($host);
my $sizes = new Statistics::Descriptive::Full;
if ( scalar( @bpc_info ) ){
foreach my $backup ( @bpc_info ) {
# Skip partial or active backups
next if ( $backup->{type} !~ m/^full|incr$/ );
if ( $backup->{type} eq "full" ) {
$json->{full_size} = $backup->{size};
$new_size_of_last_full = $backup->{sizeNew} if $backup->{num} > 0;
}
# Push all the sizes in our data set to compute avg sizes
# Exclude backup N°0 as it'll always have much more new data than normal backups
# Also exclude if size is not defined. This can happen in BackupPC v3 when
# the BackupPC_link process is waiting for the nightly to finish
$sizes->add_data($backup->{sizeNew}) if ( $backup->{num} > 0 && $backup->{sizeNew} );
$json->{bkp}++;
}
# Ignore the last backup if it's not full or incr (which means it's either partial or active)
my $i = ( $bpc_info[-1]->{type} =~ m/^full|incr$/ ) ? -1 : -2;
$json->{errors} = $bpc_info[$i]->{xferErrs};
$json->{duration} = $bpc_info[$i]->{endTime} - $bpc_info[$i]->{startTime};
$json->{type} = $bpc_info[$i]->{type};
$json->{new_size_avg} = int $sizes->mean;
$json->{new_size_median} = int $sizes->median;
# Some old versions of Statistics::Descriptive (eg, on el5) do not support quantile
$json->{new_size_q1} = eval { int $sizes->quantile(1) } || 0;
$json->{new_size_q3} = eval { int $sizes->quantile(3) } || 0;
$json->{enabled} = ( $conf->{BackupsDisable} > 0 ) ? 0 : 1;
$json->{total_size} = $sizes->sum + $json->{full_size} - $new_size_of_last_full;
$json->{history_size} = $json->{total_size} - $json->{full_size};
$json->{age} = time - $bpc_info[$i]->{startTime};
# For sizeNew, we need to wait for BackupPC_link to run, which can be delayed
# if a nightly process is running. In this case, use the stats from the previous backup
# Except when we ave a single backup, in which case we read stats of this only backup
$i = ( $bpc_info[-1]->{sizeNew} || scalar @bpc_info == 1 ) ? -1 : -2;
$json->{new_size} = $bpc_info[$i]->{sizeNew};
$json->{comp_ratio} = ( $bpc_info[$i]->{sizeNew} > 0 ) ?
sprintf( "%.2f", 100 - ( $bpc_info[$i]->{sizeNewComp} * 100 / $bpc_info[$i]->{sizeNew} ) )
:
0;
$json->{max_errors} = $conf->{MaxXferError} || 0;
}
} elsif ( $entity or $general) {
$json = {
perf => 0,
size => 0,
full_size => 0,
total_size => 0,
history_size => 0,
hosts => 0,
bkp => 0,
ratio => 0
};
my $entity_total_new = 0;
my $entity_total_comp = 0;
foreach my $host ( keys %{ $bpc->HostInfoRead } ) {
next unless ($host =~ m/^(vm_)?\Q$entity\E_.*/ or $general);
my $full_size;
$json->{hosts}++;
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $freq = ( $conf->{FullPeriod} > $conf->{IncrPeriod} ) ? $conf->{IncrPeriod} : $conf->{FullPeriod};
my $host_duration = 0;
my $host_bkp_num = 0;
my $host_new_size = 0;
my $host_full_size = 0;
my $host_new_size_of_last_full = 0;
foreach my $backup ( $bpc->BackupInfoRead( $host ) ) {
next if ( $backup->{type} !~ m/^full|incr$/ );
# Save the total size of the last full backup
if ( $backup->{type} eq 'full' ) {
$host_full_size = $backup->{size};
$host_new_size_of_last_full = $backup->{sizeNew} if $backup->{num} > 0;
}
$host_new_size += $backup->{sizeNew} if ( $backup->{num} > 0 && $backup->{sizeNew} );
$entity_total_new += $backup->{sizeNew};
$entity_total_comp += $backup->{sizeNewComp};
$host_duration += $backup->{endTime} - $backup->{startTime};
$host_bkp_num++;
$json->{bkp}++;
}
# Compute the average cost as the number of hours per day spent
# to backup this host
$json->{perf} += ( $host_bkp_num > 0 ) ? $host_duration / ( 3600 * $host_bkp_num * $freq ) : 0;
# $json->{size} represents the total size used by this host.
# But we want to substract the new size of the last full, as for this one we
# do not count sizeNew but size
my $host_total_size = $host_new_size + $host_full_size - $host_new_size_of_last_full;
# This one is kept just for compatibility. New Zabbix template will use total_size
$json->{size} += $host_total_size;
$json->{total_size} += $host_total_size;
$json->{full_size} += $host_full_size;
$json->{history_size} += $host_total_size - $host_full_size;
}
$json->{ratio} = ( $entity_total_new > 0 ) ? 100 - ( $entity_total_comp * 100 / $entity_total_new ) : 0;
# Round some values
foreach my $key ( qw(ratio perf) ) {
$json->{$key} = sprintf( "%.2f", $json->{$key} );
}
} else {
print<<"EOF";
Usage: $0 <host> [errors|age|size|duration]
Usage: $0 --host=<host> or --entity=<entity>
EOF
}
print to_json( $json, { pretty => $pretty } );
exit(0);

View File

@ -0,0 +1,90 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
use Date::Parse;
my $docker = which('docker');
my $json = {};
my $pretty = 0;
my ($global, $container, $network, $volume) = undef;
GetOptions(
'global' => \$global,
'container=s' => \$container,
'network=s' => \$network,
'volume=s' => \$volume,
'pretty' => \$pretty
);
# Sanitize args
if (defined $container and not $container =~ m/^[a-zA-Z0-9\-_]+/){
die "Invalid container ID $container\n";
} elsif (defined $network and not $network =~ m/^[a-zA-Z0-9\-_]+/){
die "Invalid network ID\n";
} elsif (defined $volume and not $volume =~ m/^[a-zA-Z0-9\-_]+/){
die "Invalid volume name\n";
}
# Default formating
my $format = '{{ json . }}';
my $cmd;
if ($global){
$json->{info} = from_json(qx($docker info --format '$format'));
} elsif (defined $container) {
$json->{inspect} = from_json(qx($docker container inspect $container --format '$format'));
$json->{stats} = from_json(qx($docker container stats $container --format '$format' --no-stream));
# Remove percent sign so Zabbix can get raw value
foreach my $stat (qw(MemPerc CPUPerc)){
$json->{stats}->{$stat} =~ s/%$//;
}
# Extract mem usage vs mem limit, net in vs net out and blk read vs blk write
($json->{stats}->{MemCurrent}, $json->{stats}->{MemLimit}) = split(/\s*\/\s*/, $json->{stats}->{MemUsage});
($json->{stats}->{NetIOIn}, $json->{stats}->{NetIOOut}) = split(/\s*\/\s*/, $json->{stats}->{NetIO});
($json->{stats}->{BlockIORead}, $json->{stats}->{BlockIOWrite}) = split(/\s*\/\s*/, $json->{stats}->{BlockIO});
# Convert into Bytes
foreach my $stat (qw(MemCurrent MemLimit NetIOIn NetIOOut BlockIORead BlockIOWrite)){
$json->{stats}->{$stat} = convert_unit($json->{stats}->{$stat});
}
# Compute a useful Uptime from the StartedAt value
if ($json->{inspect}->{State}->{Running}){
$json->{stats}->{Uptime} = int(time() - str2time($json->{inspect}->{State}->{StartedAt}));
} else {
$json->{stats}->{Uptime} = 0;
}
} elsif (defined $network){
$json->{inspect} = from_json(qx($docker network inspect $network --format '$format'));
} elsif (defined $volume){
$json->{inspect} = from_json(qx($docker volume inspect $volume --format '$format'));
}
print to_json($json, { pretty => $pretty }) . "\n";
sub convert_unit {
my $val = shift;
my $suffix_multiplier = {
ki => 1024,
Ki => 1024,
Mi => 1024 * 1024,
Gi => 1024 * 1024 * 1024,
Ti => 1024 * 1024 * 1024 * 1024,
Pi => 1024 * 1024 * 1024 * 1024 * 1024,
k => 1000,
K => 1000,
M => 1000 * 1000,
G => 1000 * 1000 * 1000,
T => 1000 * 1000 * 1000 * 1000,
P => 1000 * 1000 * 1000 * 1000 * 1000
};
if ($val =~ m/^(\d+(\.\d+)?)(ki|Ki|Mi|Gi|Ti|Pi|k|K|M|G|T|P)?B/){
$val = int($1 * $suffix_multiplier->{$3}) if (defined $3 and defined $suffix_multiplier->{$3});
# Remove the Bytes suffix if remaining
$val =~ s/B$//;
}
return $val;
}

46
zabbix_scripts/check_drbd Normal file
View File

@ -0,0 +1,46 @@
#!/usr/bin/perl -w
use strict;
use File::Which;
use Getopt::Long;
my $what = 'cstate';
my $resource = undef;
my @supported = qw(cstate dstate role);
GetOptions(
"what=s" => \$what,
"resource=s" => \$resource
);
my $drbdadm = which('drbdadm');
unless($drbdadm){
die 'ZBX_NOTSUPPORTED';
}
sub usage(){
my $supp = join('|', @supported);
print <<"EOF";
usage: $0 --what=[$supp] --resource=<drbd resource name>
EOF
}
unless ((grep { $_ eq $what } @supported) && $resource){
usage();
exit 1;
}
open RES, '-|', $drbdadm, $what, $resource || die "Can't open pipe: $!";
my $out = join "", <RES>;
close RES || die "An error occured: $!\n";
chomp($out);
# We only want the state of the local node
if ($out =~ m{(.*)/.*}){
$out = $1;
}
print $out;
exit 0;

View File

@ -0,0 +1,94 @@
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Request::Common;
use URI;
use Data::Dumper;
my $user = undef;
my $pass = undef;
my $url = 'http://localhost:9200';
my $certcheck = 1;
my $cluster = 0;
my $node = undef;
my $index = undef;
my $pretty = 0;
my $json = {};
GetOptions (
'user:s' => \$user,
'password:s' => \$pass,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'cluster' => \$cluster,
'node=s' => \$node,
'index=s' => \$index,
'pretty' => \$pretty
);
# If no option is given, default to fetch the cluster status
if (not defined $cluster and not defined $node and not defined $index){
$cluster = 1;
}
my $uri = URI->new($url);
if (not defined $uri){
die "COuldn't parse $url as a valid url\n";
}
# If connecting over http or is host is localhost
# there's no need to check certificate
if ($uri->scheme eq 'http' or $uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my $resp;
my $sslopts = {};
if (not $certcheck){
$sslopts = {
verify_hostname => 0,
SSL_verify_mode => 0
}
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts
);
$ua->env_proxy;
if ($cluster){
$json = make_request('/_cluster/stats');
} elsif (defined $node){
my $resp = make_request('/_nodes/' . $node)->{'nodes'};
# We can specify node by ID, name or IP
if (defined $resp->{$node}){
$json = $resp->{$node};
} else {
my $node_id = (keys %{$resp})[0];
$json = $resp->{$node_id};
}
} elsif (defined $index){
$json = make_request('/_cluster/health/' . $index . '?level=indices')->{'indices'}->{$index};
}
print to_json($json, { pretty => $pretty });
sub make_request {
my $path = shift;
my $req_url = $url . $path;
my $req = GET $req_url;
if (defined $user and $user ne '' and defined $pass and $pass ne ''){
$req->authorization_basic($user, $pass);
}
my $resp = $ua->request($req);
die "Request to $req_url failed : " . $resp->message . "\n" if $resp->is_error;
return from_json($resp->decoded_content);
}

View File

@ -0,0 +1,158 @@
#!/usr/bin/perl -w
use strict;
use File::Which;
use Getopt::Long;
my $what = 'volume';
my $volume = undef;
my $peer = undef;
my $bricks = undef;
my $gluster = which('gluster');
my $lock = '/var/lock/gluster-zabbix.lock';
unless($gluster){
# Gluster is not installed, exit with an error
die "gluster command not found";
}
# Get an exclusive lock
open(LOCK, ">$lock") || die "Can't open $lock";
flock(LOCK, 2);
GetOptions(
"what=s" => \$what,
"volume=s" => \$volume,
"bricks=i" => \$bricks,
"peer=s" => \$peer
);
sub usage(){
print <<"EOF";
usage: $0 --what=[peer|volume]
If --what=volume you need to pass --volume=<volname>. The optional --bricks arg can be used to pass the number of expected bricks
If --what=peer you need to pass --peer=<host>
EOF
}
sub gluster($){
my $cmd = shift;
my $code = 256;
my @result = ();
# Loop to run gluster cmd as it can fail if two run at the same time
for (my $i = 0; ($code != 0 && $i < 3); $i++){
open (RES, "$cmd 2>/dev/null |")
|| die "error: Could not execute $cmd";
@result = <RES>;
close RES;
$code = $?;
sleep(1) unless ($code == 0);
}
return @result;
}
if (($what eq 'volume' && !$volume) ||
($what eq 'peer' && !$peer) ||
($what ne 'volume' && $what ne 'peer')){
usage();
}
if ($what eq 'volume'){
my $bricksfound = 0;
my @volinfo = gluster("$gluster vol status $volume");
unless (scalar @volinfo){
die "Error occurred while trying to get volume status for $volume";
}
foreach my $line (@volinfo){
# Check that all bricks are online
if ($line =~ m/^Brick\ ([\w\.]+:\/[\w\.\/]+)\s+\d+(\s+\d+)?\s+([A-Z])/){
$bricksfound++;
if ($3 ne 'Y') {
print "CRITICAL: brick status (reported $3 on $1)";
exit 1;
}
}
# Check the Self-Heal daemons are up and running
elsif ($line =~ m/^Self-heal\ Daemon\ on\ ([\w\.]+)\s+N\/A\\s+([A-Z])/ && $2 ne 'Y'){
print "CRITICAL: self-heal daemon (reported $2 on $1)";
exit 1;
}
}
# Check the number of bricks is the one we expect
if ($bricks && $bricks != $bricksfound){
print "CRITICAL: bricks count mismatch (found $bricksfound while expecting $bricks)";
exit 1;
}
@volinfo = gluster("$gluster vol heal $volume info");
unless (scalar @volinfo){
die "Error occurred while trying to get volume heal info for $volume";
}
foreach my $line (@volinfo){
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/ && $1 gt 0){
# Lets check a second time to limit false positives
sleep 1;
@volinfo = gluster("$gluster vol heal $volume info");
unless (scalar @volinfo){
die "Error occurred while trying to get volume heal info for $volume";
}
foreach my $line (@volinfo){
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/ && $1 gt 0){
print "CRITICAL: self-heal in progress ($1)";
exit 1;
}
}
}
}
@volinfo = gluster("$gluster vol heal $volume info heal-failed");
# the heal-failed command isn't supported on all version of GlusterFS
if (scalar @volinfo){
foreach my $line (@volinfo){
# Now, check we don't have any file which the Self-Heal daemon couldn't sync
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/ && $1 gt 0){
print "CRITICAL: self-heal error ($1)";
exit 1;
}
}
}
@volinfo = gluster("$gluster vol heal $volume info split-brain");
unless (scalar @volinfo){
die "Error occurred while trying to get split-brain info for $volume";
}
foreach my $line (@volinfo){
# Now, check we don't have any file in a split-brain situation
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/ && $1 gt 0){
print "CRITICAL: split-bran ($1)";
exit 1;
}
}
@volinfo = gluster("$gluster vol info $volume");
unless (scalar @volinfo){
die "Error occurred while trying to get volume info for $volume";
}
foreach my $line (@volinfo){
# Check the volume is started
if ($line =~ m/^Status:\s+(\w+)$/ && $1 ne 'Started'){
print 'CRITICAL: The volume is not started';
exit 1;
}
}
print 'OK';
}
elsif ($what eq 'peer'){
my @peers = gluster("$gluster pool list");
my $status = 'unknown';
foreach my $line (@peers){
if (($line =~ m/^$peer\s+/) ||
($line =~ m/^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}\s+$peer\s+/)){
(undef,undef,$status) = split(/\s+/, $line);
}
}
print $status;
}
close(LOCK);
exit(0);

View File

@ -0,0 +1,55 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use Getopt::Long;
use JSON;
my $uri = 'http://127.0.0.1/server-status';
my $what = 'all';
my $help = 0;
GetOptions(
"uri=s" => \$uri,
"what=s" => \$what,
"help" => \$help
);
my %res = ();
my $status = get($uri . '?auto');
unless ($status){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
foreach my $line (split(/\n/, $status)){
next unless ($line =~ m/^(\w+(\s\w+)?):\s([\.\d]+)/);
my ($key, $val) = ($1,$3);
$key =~ s/\s/_/g;
$key = lc $key;
# Remove leading and trailing spaces
$val =~ s/^\s+|\s+$//g;
# Add 0 before the . when needed
$val =~ s/^(\.\d+)$/0$1/;
$res{$key} = $val;
}
if ($help){
print "Valid keys are:\n\n";
print "$_\n" for keys %res;
exit 0;
}
if ($what eq 'all'){
print to_json(\%res);
}
elsif (defined $res{$what}){
print $res{$what};
}
else{
print 'ZBX_NOTSUPPORTED';
}
exit 0;

View File

@ -0,0 +1,45 @@
#!/usr/bin/perl -w
use warnings;
use strict;
use File::Which;
use Getopt::Long;
use JSON;
my $fping = which('fping');
unless ($fping){
die "ZBX_NOTSUPPOTED\n";
}
my $info = 'all';
my $pretty = 0;
my @valid_info = qw(all respond latency loss);
my $host = $ARGV[0];
GetOptions(
'info=s' => \$info,
'pretty' => \$pretty
);
unless (grep { $info eq $_ } @valid_info){
die "Usage: $0 [--info=<respond|latency|loss>] host\n";
}
my $ping = qx($fping -c 5 -p 10 -q $host 2>&1);
# Output looks like 10.29.254.2 : xmt/rcv/%loss = 5/5/0%, min/avg/max = 1.42/1.65/1.90
if ($ping =~ m|^$host : xmt/rcv/%loss = 5/(\d)/(\d+(?:\.\d+)?)%(?:, min/avg/max = (?:\d+(?:\.\d+)?)/(\d+(\.\d+))/(?:\d+(?:\.\d+)?))?$|){
my $stat = {
respond => ($1 > 0) ? 1 : 0,
loss => $2 + 0,
latency => (defined $3) ? $3 / 1000 : 0
};
if ($info ne 'all'){
print $stat->{$info} . "\n";
} else {
print to_json($stat, { pretty => $pretty }) . "\n";
}
} else {
die "ZBX_NOTSUPPOTED\n";
}
exit 0;

View File

@ -1,39 +1,76 @@
#!/usr/bin/perl -w
use Linux::LVM;
use Zabbix::Agent::Addons::LVM;
use Getopt::Long;
use JSON;
Linux::LVM->units(B);
Zabbix::Agent::Addons::LVM->units(B);
if (@ARGV < 2){
usage();
exit(1);
my $vg = undef;
my $lv = undef;
my $what = undef;
my $pretty = 0;
GetOptions(
'vg=s' => \$vg,
'lv=s' => \$lv,
'what:s' => \$what,
"pretty" => \$pretty
);
if (not defined $lv and not defined $vg){
$lv ||= $ARGV[0];
$what ||= $ARGV[1];
}
my $vol = $ARGV[0];
my $what = $ARGV[1];
if (not defined $lv and not defined $vg){
usage();
exit 1;
}
sub usage {
print<<"EOF";
Usage: $0 <logical volume> [size|allocation|status]
Usage: $0 <logical volume> [size|allocation|allocation_pool_data|allocation_metadata|status]
$0 --lv=<logical volume>
$0 --lv=<logical volume> --what=<size|allocation|allocation_pool_data|allocation_metadata|status|etc.>
$0 --vg=<volume group>
$0 --vg=<volume group> --what=<alloc_pe_size|vg_size|etc.>
EOF
}
my %info = get_lv_info($vol);
my $json;
if (defined $vg){
%{$json} = get_volume_group_information($vg);
# Depending on LVM version, alloc_ct might not be present
if (not defined $json->{alloc_ct}){
$json->{alloc_ct} = sprintf("%.1f", 100 * $json->{alloc_pe_size} / $json->{vg_size});
}
} elsif (defined $lv) {
%{$json} = get_lv_info($lv);
} else{
usage();
}
if ($what eq 'size'){
print $info{size};
# Normalize float values
foreach (qw(allocated_to_snapshot allocated_pool_data allocated_meta_data)){
$json->{$_} =~ s/,/./g if (defined $json->{$_});
}
elsif ($what eq 'allocation'){
my $ret = (defined $info{allocated_to_snapshot}) ? $info{allocated_to_snapshot} : "ZBX_NOTSUPPORTED";
$ret =~ s/,/\./;
print $ret;
# Compat with older versions
my $old_keys = {
allocation => 'allocated_to_snapshot',
allocation_pool_data => 'allocated_pool_data',
allocation_metadata => 'allocated_meta_data'
};
if (defined $what && defined $old_keys->{$what}){
$what = $old_keys->{$what};
}
elsif ($what eq 'status'){
print $info{status};
}
else{
usage();
if (defined $what and $what ne ''){
print ((defined $json->{$what}) ? $json->{$what} : 'ZBX_NOTSUPPOTED');
} else {
print to_json($json, { pretty => $pretty });
}
exit(0);

View File

@ -0,0 +1,125 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $json = {};
my $mpath = undef;
my $help = 0;
my $pretty = 0;
GetOptions(
"mpath=s" => \$mpath,
"help" => \$help,
"pretty" => \$pretty
);
my $multipathd = which('multipathd');
if (not defined $multipathd){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
if ($help or not defined $mpath){
print <<_EOF;
Usage : $0 --mpath=<name of the mpath device> [--pretty]
* --mpath : the name of the device to check
* --pretty : output pretty JSON, easier to read for humans
* --help : display this help
_EOF
exit 2;
}
$json = {
mpath => $mpath,
size => 0,
dm_st => 'unknown',
features => '',
failures => 0,
path_failures => 0,
paths_num_total => 0,
paths_num_ko => 0,
paths_num_active => 0,
paths_num_inactive => 0,
paths_details => [],
paths_with_issue => [],
errors => []
};
my @res = qx($multipathd show maps raw format "%n|%N|%S|%f|%t|%x|%0");
if ($? != 0){
push @{$json->{errors}}, "Failed to run multipathd show maps raw format";
}
foreach (@res){
chomp;
next if $_ !~ /^$mpath\|/;
(undef, $json->{paths_num_total}, $json->{size}, $json->{features},
$json->{dm_st}, $json->{failures}, $json->{path_failures}) = split(/\s*\|\s*/, $_);
# Cast to int
foreach (qw(failures path_failures paths_num_total)){
$json->{$_} = 0 + $json->{$_};
}
# Convert size to bytes
my $unit = chop $json->{size};
if ($unit eq 'K'){
$json->{size} *= 1024;
} elsif ($unit eq 'M'){
$json->{size} *= 1024 * 1024;
} elsif ($unit eq 'G'){
$json->{size} *= 1024 * 1024 * 1024;
} elsif ($unit eq 'T'){
$json->{size} *= 1024 * 1024 * 1024 * 1024;
} elsif ($unit eq 'P'){
$json->{size} *= 1024 * 1024 * 1024 * 1024 * 1024;
}
# No need to process the other mpath here
last;
}
# Now check status of every path
@res = qx($multipathd show paths format "%m|%d|%t|%o|%T|%0|%z");
if ($? != 0){
push @{$json->{errors}}, "Failed to run multipathd show paths format";
}
# Skip header line
shift @res;
foreach (@res){
chomp;
next if $_ !~ /^$mpath\|/;
my (undef, $dev, $dm_st, $dev_st, $chk_st, $failures, $serial) = split(/\s*\|\s*/, $_);
push @{$json->{paths_details}}, {
dev => $dev,
dm_st => $dm_st,
dev_st => $dev_st,
chk_st => $chk_st,
failures => $failures + 0,
serial => $serial
};
if ($dm_st eq 'active'){
$json->{paths_num_active} += 1;
if ($dev_st ne 'running'){
$json->{paths_num_ko} += 1;
push @{$json->{paths_with_issue}}, $dev;
push @{$json->{errors}}, "dev $dev is not running";
} elsif ($chk_st ne 'ready' or $failures > 0){
$json->{paths_num_ko} += 1;
push @{$json->{paths_with_issue}}, $dev;
push @{$json->{errors}}, "dev $dev is not active";
} else {
$json->{paths_num_ok} += 1;
}
} else {
$json->{paths_num_inactive} += 1;
}
}
# We want easy usage from zabbix, so turn thos ones to strings
$json->{paths_with_issue} = join(',', @{$json->{paths_with_issue}});
$json->{errors} = join(',', @{$json->{errors}});
print to_json($json, { pretty => $pretty });
exit 0;

View File

@ -0,0 +1,109 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Data::Dumper;
use Getopt::Long;
my $what = 'all';
my $defaults = undef;
my $host = undef;
my $port = undef;
my $user = undef;
my $password = undef;
my $help = 0;
my $pretty = 0;
my $exit = 0;
my $json = {
zbx_error => "none"
};
GetOptions(
"what=s" => \$what,
"help" => \$help,
"defaults=s" => \$defaults,
"host=s" => \$host,
"port=s" => \$port,
"user=s" => \$user,
"password=s" => \$password,
"pretty" => \$pretty
);
# Basic input checks
if (defined $defaults and $defaults ne '' and not -e $defaults){
$json->{zbx_error} = "File $defaults doesn't exist";
$exit = 1;
} elsif (defined $host and $host ne '' and $host !~ m/^[\w\-\.]+$/){
$json->{zbx_error} = "Bad value for --host";
$exit = 1;
} elsif (defined $port and $port ne '' and ($port !~ m/^\d+$/ or $port lt 1 or $port gt 65535)){
$json->{zbx_error} = "Bad value for --port";
$exit = 1;
} elsif (defined $user and $user ne '' and $user !~ m/^[\w\-\.]+$/){
$json->{zbx_error} = "Bad value for --user";
$exit = 1;
} elsif (defined $password and $password ne '') {
# Just escape quotes as will protect the password with
$password =~ s/'/\\'/g;
}
if ($help){
print <<_EOF;
Usage: $0 [--what=key] [--help] [--pretty]
* --what : if a key is given (eg --what=Bytes_received) will print only this value.
Else, all the stats are printed in a json format.
Run once without --what to get a list of available keys
* --help : print this help and exit
* --defaults : set the file from which mysql will read defaults
* --host : set the hostname to connect to
* --user : set the user to connect as
* --password : set the password to use
* --pretty : prints JSON in a pretty, human readable format. Has no use when --what is also given
_EOF
exit 0;
}
if ($exit eq 0){
my $opt = "";
$opt .= " --defaults-file=$defaults" if (defined $defaults and $defaults ne '');
$opt .= " --host=$host" if (defined $host and $host ne '');
$opt .= " --user=$user" if (defined $user and $user ne '');
$opt .= " --password='$password'" if (defined $password and $password ne '');
my @status = qx(mysql $opt --batch --execute 'show global status;' 2>&1);
if ($? != 0){
$exit = $?;
$json->{zbx_error} = join '', @status;
} else {
foreach (@status){
chomp;
my ($key, $val) = split(/\t/, $_);
$json->{$key} = $val;
}
# Some older MySQL do not have all the variables we might want
if (not defined $json->{Acl_users}){
$json->{Acl_users} = qx(mysql $opt --batch --skip-column-names --execute 'select count(user) from user;' mysql);
chomp $json->{Acl_users};
}
if (not defined $json->{Max_statement_time_exceeded} and defined $json->{Max_execution_time_exceeded}){
$json->{Max_statement_time_exceeded} = $json->{Max_execution_time_exceeded}
}
}
}
if ($what ne 'all' and defined $json->{$what}){
print $json->{$what} . "\n";
} else {
print to_json($json, { pretty => $pretty });
}
exit $exit;

56
zabbix_scripts/check_nginx Executable file
View File

@ -0,0 +1,56 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use Getopt::Long;
use JSON;
my $uri = 'http://127.0.0.1/nginx-status';
my $what = 'all';
my $help = 0;
my $pretty = 0;
GetOptions(
"uri=s" => \$uri,
"what=s" => \$what,
"help" => \$help,
"pretty" => \$pretty
);
my $res = {};
my $status = get($uri);
unless ($status){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
foreach my $line (split(/\n/, $status)){
if ($line =~ m/^Active connections: (\d+)/){
$res->{active_connections} = $1;
} elsif ($line =~ m/\s*(\d+)\s+\d+\s+(\d+)/){
$res->{total_connections} = $1;
$res->{total_requests} = $2;
} elsif ($line =~ m/Waiting: (\d+)/){
$res->{keep_alive} = $1;
}
}
if ($help){
print "Valid keys are:\n\n";
print "$_\n" for keys %{$res};
exit 0;
}
if ($what eq 'all'){
print to_json($res, { pretty => $pretty });
}
elsif (defined $res->{$what}){
print $res->{$what};
}
else{
print 'ZBX_NOTSUPPORTED';
}
exit 0;

View File

@ -0,0 +1,29 @@
#!/usr/bin/perl -w
use strict;
use Getopt::Long;
my %opts;
$opts{server} = '127.0.0.1';
GetOptions(
"server=s" => \$opts{server},
"host=s" => \$opts{host}
);
if (!$opts{host}){
print "You have to specify a hostname to lookup with the --host argument\n";
exit(1);
}
my $res = qx(/usr/bin/nmblookup -U $opts{server} $opts{host});
my @lines = split /\n/, $res;
# Look at the last line
my $resp = $lines[$#lines];
my $status = 0;
if ($resp =~ m/^\d+\.\d+\.\d+\.\d+ $opts{host}/){
$status = 1;
}
print $status . "\n";

View File

@ -0,0 +1,104 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use Data::Dumper;
use PMG::DBTools;
my $json = {
count_in => 0,
count_out => 0,
bytes_in => 0,
bytes_out => 0,
spam_in => 0,
spam_out => 0,
virus_in => 0,
virus_out => 0,
ptime_in => 0,
ptime_out => 0,
queue_hold => 0,
queue_active => 0,
queue_deferred => 0
};
my $pretty = 0;
my ($domain,$what) = undef;
my $timespan = 900;
my $spamthres = 5;
my $resp = undef;
GetOptions(
'domain=s' => \$domain,
'what=s' => \$what,
'timespan=i' => \$timespan,
'spamthres=i' => \$spamthres,
'pretty' => \$pretty
);
my $dbh = PMG::DBTools::open_ruledb;
my $since = time - $timespan;
my $query = "SELECT cstatistic.direction AS direction, cstatistic.bytes AS bytes, cstatistic.spamlevel AS spamlevel, " .
"cstatistic.virusinfo AS virus, cstatistic.ptime AS ptime, cstatistic.sender AS sender, creceivers.receiver " .
"AS receiver FROM cstatistic LEFT JOIN creceivers ON cstatistic.rid = creceivers.cstatistic_rid" .
" WHERE time > ?";
my $sth = $dbh->prepare($query);
$sth->execute($since);
while (my $res = $sth->fetchrow_hashref){
if (not $res->{direction}){
next if (defined $domain and $res->{sender} !~ m/.*\@$domain$/);
$json->{bytes_out} += $res->{bytes};
$json->{count_out} += 1;
$json->{ptime_out} += $res->{ptime};
$json->{spam_out} += 1 if ($res->{spamlevel} ge $spamthres);
$json->{virus_out} += 1 if (defined $res->{virus});
} else {
next if (defined $domain and $res->{receiver} !~ /.*\@$domain$/);
$json->{bytes_in} += $res->{bytes};
$json->{count_in} += 1;
$json->{ptime_in} += $res->{ptime};
$json->{spam_in} += 1 if ($res->{spamlevel} ge $spamthres);
$json->{virus_in} += 1 if (defined $res->{virus});
}
}
# Init to 0 if missing
$json->{$_} //= 0 foreach (qw/bytes_out count_out ptime_out spam_out virus_out
bytes_in count_in ptime_in spam_in virus_in/);
# Compute averages
$json->{ptime_in} = $json->{ptime_in} / $json->{count_in} / 1000 if ($json->{count_in} > 0);
$json->{ptime_out} = $json->{ptime_out} / $json->{count_out} / 1000 if ($json->{count_out} > 0);
# Now, only for general stats, count early rejects, and queue stats
if (not defined $domain){
$query = "SELECT SUM(rblcount) AS rbl, SUM(pregreetcount) AS pregreet FROM localstat WHERE mtime > ?";
$sth = $dbh->prepare($query);
$sth->execute($since);
my $res = $sth->fetchrow_hashref;
$json->{$_} = $res->{$_} foreach (qw/rbl pregreet/);
# Here we count email in the queue (active, deferred and hold queues)
foreach my $res (qx(postqueue -j)){
$res = from_json($res);
foreach (qw/hold active deferred/){
$json->{'queue_' . $_} += 1 if ($res->{queue_name} eq $_);
}
}
}
$json->{$_} //= 0 foreach (qw/rbl pregreet/);
if (defined $what and not defined $json->{$what}){
print 'ZBX_NOTSUPPORTED';
exit 0;
} elsif (defined $what){
$resp = $json->{$what}
} else {
$resp = $json;
}
$resp = (ref $resp eq 'HASH' or ref $resp eq 'ARRAY') ? to_json($resp, { pretty => $pretty }) : $resp;
print $resp . "\n";

View File

@ -0,0 +1,182 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
use File::Path qw(make_path);
use File::Basename;
use Data::Dumper;
my $pvesh = which('pvesh');
my $json = {};
my $pretty = 0;
my ($cluster,$guest,$node,$storage,$pool) = undef;
# Max age of cached values
my $cache = 60;
my $cache_dir = '/tmp/zbx_pve_cache/';
GetOptions(
'cluster' => \$cluster,
'guest=i' => \$guest,
'node=s' => \$node,
'storage=s' => \$storage,
'pool=s' => \$pool,
'pretty' => \$pretty,
'cache=i' => \$cache,
'cache-dir=s' => \$cache_dir
);
# Old versions do not support, nore need --output-format=json
my $pvesh_opt = (system("$pvesh ls / --output-format=json 2>&1 > /dev/null") == 0) ? '--output-format=json' : '';
if ($cluster){
my $cluster = get_api_data('/cluster/status');
# Set default values so monitoring works for single node, without cluster setup
$json->{status} = {
all_online => 1,
quorate => 1,
nodes => 1,
name => 'default',
version => 1
};
# Set default global stats
$json->{network} = {
in => 0,
out => 0
};
$json->{disk} = {
read => 0,
write => 0
};
$json->{guests} = {
qemu => 0,
qemu_running => 0,
lxc => 0,
lxc_running => 0
};
my @nodes = ();
foreach my $item (@{$cluster}){
if ($item->{type} eq 'cluster'){
$json->{status}->{$_} = $item->{$_} foreach (qw(quorate nodes name version));
} elsif ($item->{type} eq 'node' and $item->{online}){
push @nodes, $item->{name};
} elsif ($item->{type} eq 'node'){
$json->{status}->{all_online} = 0;
}
}
foreach my $node (@nodes){
my $n = get_api_data("/nodes/$node/status");
# Here we gather (and sum) some info about individual nodes to get the total number of
# CPU, the amount of memory etc...
$json->{memory}->{$_} += $n->{memory}->{$_} foreach (qw(free total used));
$json->{ksm}->{$_} += $n->{ksm}->{$_} foreach (qw(shared));
$json->{cpuinfo}->{$_} += $n->{cpuinfo}->{$_} foreach (qw(cpus sockets));
$json->{loadavg}[$_] += $n->{loadavg}[$_] foreach (0..2);
}
# We want average load avg of the cluster, not the sum of individual loads
$json->{loadavg}[$_] = sprintf "%.2f", $json->{loadavg}[$_] / $json->{status}->{nodes} foreach (0..2);
my $guests = get_api_data('/cluster/resources', '--type=vm');
foreach my $guest (@{$guests}){
$json->{network}->{in} += $guest->{netin} || 0;
$json->{network}->{out} += $guest->{netout} || 0;
$json->{disk}->{read} += $guest->{diskread} || 0;
$json->{disk}->{write} += $guest->{diskwrite} || 0;
my $type = $guest->{type};
$json->{guests}->{$type}++;
$json->{guests}->{$type . '_running'}++ if ($guest->{status} eq 'running');
}
} elsif ($node){
$json->{guests} = {
qemu => 0,
qemu_running => 0,
lxc => 0,
lxc_running => 0
};
foreach my $item (qw(status version subscription)){
$json->{$item} = get_api_data("/nodes/$node/$item");
}
my $guests = get_api_data('/cluster/resources', '--type=vm');
foreach my $guest (@{$guests}){
next if ($guest->{node} ne $node);
my $type = $guest->{type};
$json->{guests}->{$type}++;
$json->{guests}->{$type . '_running'}++ if ($guest->{status} eq 'running');
}
} elsif ($guest){
my $guests = get_api_data('/cluster/resources', '--type=vm');
foreach my $g (@{$guests}){
if ($g->{vmid} eq $guest){
$json = $g;
last;
}
}
} elsif ($pool){
my $pool = get_api_data("/pools/$pool");
$json->{comment} = $pool->{comment};
foreach my $type (qw(qemu lxc)){
$json->{$_}->{$type} = 0 foreach (qw(guests templates));
}
foreach my $item (@{$pool->{members}}){
if ($item->{type} =~ m/^(qemu|lxc)$/ and !$item->{template}){
$json->{guests}->{$_} += $item->{$_} foreach (qw(maxcpu diskread diskwrite maxdisk mem maxmem netin netout));
$json->{guests}->{used_cpu} += $item->{cpu} * $item->{maxcpu};
$json->{guests}->{$item->{type}}++;
}
if ($item->{type} =~ m/^(qemu|lxc)$/ and $item->{template}){
$json->{templates}->{$_} += $item->{$_} foreach (qw(maxdisk));
$json->{templates}->{$item->{type}}++;
}
}
$json->{guests}->{$_} //= 0 foreach (qw(cpu maxcpu diskread diskwrite maxdisk mem maxmem netin netout));
$json->{templates}->{$_} //= 0 foreach (qw(maxdisk));
$json->{guests}->{cpu} = ($json->{guests}->{maxcpu} == 0) ? 0 : $json->{guests}->{used_cpu} / $json->{guests}->{maxcpu};
} elsif ($storage){
my $stores = get_api_data('/cluster/resources', '--type=storage');
foreach my $s (@{$stores}){
if ($s->{storage} eq $storage){
$json->{maxdisk} = $s->{maxdisk};
$json->{disk} = $s->{disk};
last;
}
}
} else{
print 'ZBX_NOTSUPPORTED';
exit 0;
}
print to_json($json, { pretty => $pretty }) . "\n";
# Helper which will either get data from
# the cache if its fresh enough, or query the API
# and save the result in the cache for later
sub get_api_data {
my ($path, $query_opt) = @_;
$query_opt ||= '';
my $opt_filename = $query_opt;
$opt_filename =~ s/[\-=]/_/g;
my $res;
# Is the cache existing and fresh enough ?
if (-f $cache_dir . $path . $opt_filename and int((-M $cache_dir . $path . $opt_filename)*60*60*24) < $cache){
{
local $/; # Enable slurp
open my $fh, "<", $cache_dir . $path . $opt_filename;
$res = <$fh>;
close $fh;
}
} else {
$res = qx($pvesh get $path $query_opt $pvesh_opt 2>/dev/null);
# Save the result in the cache for later retrival
eval{
my $dir = (fileparse($path))[1];
make_path($cache_dir . $dir, { chmod => 700 });
};
open my $fh, ">", $cache_dir . $path . $opt_filename;
print $fh $res;
close $fh;
}
return from_json($res);
}

View File

@ -0,0 +1,16 @@
#!/bin/bash
case $1 in
queue_local)
/usr/bin/qmqtool -s | perl -ne 'm/^Messages with local recipients: (\d+)/ && print $1'
;;
queue_remote)
/usr/bin/qmqtool -s | perl -ne 'm/^Messages with remote recipients: (\d+)/ && print $1'
;;
queue_total)
/usr/bin/qmqtool -s | perl -ne 'm/^Total messages in queue: (\d+)/ && print $1'
;;
*)
echo 'ZBX_NOTSUPPORTED'
;;
esac

View File

@ -0,0 +1,82 @@
#!/usr/bin/perl -w
use strict;
use File::Which;
use Getopt::Long;
my $slot = '';
my $cli = which('hpacucli') || which('ssacli');
my @validchecks = qw/controller array logicaldrive physicaldrive/;
my $check = join ',', @validchecks;
GetOptions ('slot=s' => \$slot,
'check=s' => \$check,
'help' => sub { &usage() }
);
sub usage(){
print <<"EOF";
$0 --slot=<slot number> --check=<what to check>
* slot must be a number. You can find on which slot you have controllers with the command:
$cli controller all show status
* check is a comma separated list of item to check. Default values (without --check option) will check everything
Valid values are:
EOF
print "$_\n" foreach (@validchecks);
exit(0);
}
if ($slot !~ /^\d+$/){
usage();
}
unless (-x $cli){
die "Cannot run $cli\n";
}
my @checks = split /\s?,\s?/, $check;
foreach my $check (@checks){
usage() unless (grep { $_ eq $check} @validchecks);
}
foreach my $param (@checks){
# Global controller checks
if ($param eq 'controller'){
open CLI, "$cli controller slot=$slot show status|" ||
die "An error occured while running $cli: $!";
foreach my $line (<CLI>){
if ( $line =~ /Status\:\s*([\w\s]+)$/ ) {
my $res = $1;
chomp($res);
if ($res ne 'OK'){
print "CRITICAL: $line\n";
exit(0);
}
}
}
close CLI;
}
else{
open CLI, "$cli controller slot=$slot $param all show status|" ||
die "An error occured while running $cli: $!";
foreach my $line (<CLI>){
if ( $line =~ /^\s*$param.*:\s*(\w+[\w\s]*)$/i ) {
my $res = $1;
chomp($res);
if ($res ne 'OK'){
print "CRITICAL: $line\n";
exit(0);
}
}
}
close CLI;
}
}
print 'OK';
exit(0);

View File

@ -27,20 +27,19 @@ use Getopt::Long;
#
# unused devices: <none>
my $file = "/proc/mdstat";
my $file = "/proc/mdstat";
my $device = "all";
# Get command line options.
GetOptions ('file=s' => \$file,
'device=s' => \$device,
'help' => sub { &usage() } );
GetOptions (
'file=s' => \$file,
'device=s' => \$device,
'help' => sub { &usage() }
);
## Strip leading "/dev/" from --device in case it has been given
$device =~ s/^\/dev\///;
## Return codes for Nagios
my %ERRORS=('OK'=>0,'WARNING'=>1,'CRITICAL'=>2,'UNKNOWN'=>3,'DEPENDENT'=>4);
## This is a global return value - set to the worst result we get overall
my $retval = 0;
@ -50,78 +49,86 @@ my $result = 'OK';
open FILE, "< $file" or die "Can't open $file : $!";
while (<FILE>) {
next if ! /^(md\d+)+\s*:/;
next if $device ne "all" and $device ne $1;
my $dev = $1;
push @raids, $dev;
next if ! /^(md\d+)+\s*:/;
next if $device ne "all" and $device ne $1;
my $dev = $1;
push @raids, $dev;
my @array = split(/ /);
$devs_total{$dev} = 0;
my $devs_up = 0;
my $missing = 0;
for $_ (@array) {
$level{$dev} = $1 if /^(raid\d+)$/;
next if ! /(\w+)\[\d+\](\(.\))*/;
$devs_total{$dev}++;
if ($2 eq "(F)") {
$failed_devs{$dev} .= "$1,";
}
elsif ($2 eq "(S)") {
$spare_devs{$dev} .= "$1,";
}
else {
$active_devs{$dev} .= "$1,";
$devs_up++;
}
}
if (! defined($active_devs{$dev})) { $active_devs{$dev} = "none"; }
else { $active_devs{$dev} =~ s/,$//; }
if (! defined($spare_devs{$dev})) { $spare_devs{$dev} = "none"; }
else { $spare_devs{$dev} =~ s/,$//; }
if (! defined($failed_devs{$dev})) { $failed_devs{$dev} = "none"; }
else { $failed_devs{$dev} =~ s/,$//; }
$_ = <FILE>;
/(\d+)\ blocks\ (.*)(\[.*\])\s?$/;
$size{$dev} = int($1/1024);
#print "$3\n";
$missing = 1 if ($3 =~ m/_/);
if ($size{$dev} > 1024){
$size{$dev} = int($size{$dev}/1024)."GB";
}
else{
$size{$dev} .= "MB";
}
$_ = <FILE>;
if (($devs_total{$dev} > $devs_up) || ($failed_devs{$dev} ne "none") || (($missing) && (!/recovery/))) {
$status{$dev} = "Degraded";
$result = "CRITICAL";
$retval = $ERRORS{"CRITICAL"};
}
else {
$status{$dev} = "Optimal";
}
if (/recovery/){
$status{$dev} = "Rebuilding";
if ($result eq "OK"){
$result = "WARNING";
$retval = $ERRORS{"WARNING"};
}
}
my @array = split(/ /);
$devs_total{$dev} = 0;
my $devs_up = 0;
my $missing = 0;
for $_ (@array) {
$level{$dev} = $1 if /^(raid\d+)$/;
next if ! /(\w+)\[\d+\](\(.\))*/;
$devs_total{$dev}++;
if ($2 eq "(F)") {
$failed_devs{$dev} .= "$1,";
}
elsif ($2 eq "(S)") {
$spare_devs{$dev} .= "$1,";
$devs_up++;
}
else {
$active_devs{$dev} .= "$1,";
$devs_up++;
}
}
if (! defined($active_devs{$dev})){
$active_devs{$dev} = "none";
}
else {
$active_devs{$dev} =~ s/,$//;
}
if (! defined($spare_devs{$dev})){
$spare_devs{$dev} = "none";
}
else {
$spare_devs{$dev} =~ s/,$//;
}
if (! defined($failed_devs{$dev})){
$failed_devs{$dev} = "none";
}
else {
$failed_devs{$dev} =~ s/,$//;
}
$_ = <FILE>;
/(\d+)\ blocks\ (.*)(\[.*\])\s?$/;
$size{$dev} = int($1/1024);
$missing = 1 if ($3 =~ m/_/);
if ($size{$dev} > 1024){
$size{$dev} = int($size{$dev}/1024)."GB";
}
else{
$size{$dev} .= "MB";
}
$_ = <FILE>;
if (($devs_total{$dev} > $devs_up) || ($failed_devs{$dev} ne "none") || (($missing) && (!/recovery/))) {
$status{$dev} = "Degraded";
$result = "CRITICAL";
}
else {
$status{$dev} = "Optimal";
}
if (/(recovery|resync)\s*=\s*\d{1,2}(\.\d)?%/){
$status{$dev} = "Rebuilding";
if ($result eq "OK"){
$result = "WARNING";
}
}
}
print "$result: ";
foreach my $raid (@raids){
print "$raid:$level{$raid}:$devs_total{$raid} drives:$size{$raid}:$status{$raid} ";
print "$raid:$level{$raid}:$devs_total{$raid} drives:$size{$raid}:$status{$raid} ";
}
print "\n";
close FILE;
exit $retval;
exit 0;
# =====
sub usage()
{
sub usage(){
printf("
Check status of Linux SW RAID

View File

@ -124,48 +124,66 @@ ADAPTER: for ( my $adp = 0; $adp < $adapters; $adp++ ) {
}
}
close LDGETNUM;
LDISK: for ( my $ld = 0; $ld < $ldnum; $ld++ ) {
# Get info on this particular logical drive
open (LDINFO, "$megacli -LdInfo -L$ld -a$adp -NoLog |")
|| die "error: Could not execute $megacli -LdInfo -L$ld -a$adp -NoLog";
my ($size, $unit, $raidlevel, $ldpdcount, $spandepth, $state);
while (<LDINFO>) {
if ( m/^Size\s*:\s*(\d+(\.\d+)?)\s*(MB|GB|TB)/ ) {
$size = $1;
$unit = $3;
# Adjust MB to GB if that's what we got
if ( $unit eq 'MB' ) {
$size = sprintf( "%.0f", ($size / 1024) );
$unit= 'GB';
}
} elsif ( m/^State\s*:\s*(\w+)/ ) {
$state = $1;
if ( $state ne 'Optimal' ) {
$status = 'CRITICAL';
}
} elsif ( m/^Number Of Drives( per span)?\s*:\s*(\d+)/ ) {
$ldpdcount = $2;
} elsif ( m/^Span Depth\s*:\s*(\d+)/ ) {
$spandepth = $1;
$ldpdcount = $ldpdcount * $spandepth;
} elsif ( m/^RAID Level\s*:\s*Primary-(\d)/ ) {
$raidlevel = $1;
}
open (CFGDSPLY, "$megacli -CfgDsply -a$adp -NoLog |")
|| die "error: Could not execute $megacli -CfgDsply -a$adp -NoLog";
my $hba = 0;
my $failgrouplist = 0;
while (<CFGDSPLY>) {
if ( m/Failed to get Disk Group list/ ) {
$failgrouplist = 1;
}
if ( m/Product Name:.*(JBOD|HBA)/ ) {
$hba = 1;
}
}
close CFGDSPLY;
# When controller is in HBA/JBOD mode, skip RAID volume checks
unless ($hba && $failgrouplist) {
LDISK: for ( my $ld = 0; $ld < $ldnum; $ld++ ) {
# Get info on this particular logical drive
open (LDINFO, "$megacli -LdInfo -L$ld -a$adp -NoLog |")
|| die "error: Could not execute $megacli -LdInfo -L$ld -a$adp -NoLog";
my ($size, $unit, $raidlevel, $ldpdcount, $spandepth, $state);
while (<LDINFO>) {
if ( m/^Size\s*:\s*(\d+(\.\d+)?)\s*(MB|GB|TB)/ ) {
$size = $1;
$unit = $3;
# Adjust MB to GB if that's what we got
if ( $unit eq 'MB' ) {
$size = sprintf( "%.0f", ($size / 1024) );
$unit= 'GB';
}
} elsif ( m/^State\s*:\s*(\w+(\s\w+)?)/ ) {
$state = $1;
if ( $state ne 'Optimal' ) {
$status = 'CRITICAL';
}
} elsif ( m/^Number Of Drives( per span)?\s*:\s*(\d+)/ ) {
$ldpdcount = $2;
} elsif ( m/^Span Depth\s*:\s*(\d+)/ ) {
$spandepth = $1;
$ldpdcount = $ldpdcount * $spandepth;
} elsif ( m/^RAID Level\s*:\s*Primary-(\d)/ ) {
$raidlevel = $1;
}
}
close LDINFO;
$result .= "$adp:$ld:RAID-$raidlevel:$ldpdcount drives:$size$unit:$state ";
} #LDISK
close LDINFO;
$result .= "$adp:$ld:RAID-$raidlevel:$ldpdcount drives:$size$unit:$state ";
} #LDISK
close LDINFO;
}
# Get info on physical disks for this adapter
open (PDLIST, "$megacli -PdList -a$adp -NoLog |")
|| die "error: Could not execute $megacli -PdList -a$adp -NoLog";
my ($slotnumber,$fwstate);
my ($slotnumber,$fwstate,$fwinfo);
PDISKS: while (<PDLIST>) {
if ( m/Slot Number:\s*(\d+)/ ) {
$slotnumber = $1;
@ -180,12 +198,15 @@ ADAPTER: for ( my $adp = 0; $adp < $adapters; $adp++ ) {
}
} elsif ( m/Predictive Failure Count:\s*(\d+)/ ) {
$prederrors += $1;
} elsif ( m/Firmware state:\s*(\w+)/ ) {
} elsif ( m/Firmware state:\s*(\w+)(.*)/ ) {
$fwstate = $1;
$fwinfo = $2;
if ( $fwstate =~ m/Hotspare/ ) {
$hotsparecount++;
} elsif ( $fwstate =~ m/^Online/ ) {
# Do nothing
} elsif ( $fwstate =~ m/^Unconfigured/ && defined $fwinfo && $fwinfo =~ m/^\(good\)/) {
# Do nothing
} elsif ( $slotnumber != 255 ) {
$pdbad++;
$status = 'CRITICAL';

View File

@ -0,0 +1,46 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Spec;
open STDERR, '>', File::Spec->devnull() or die "could not open STDERR: $!\n";
my $what = 'all';
GetOptions(
"what=s" => \$what
);
my @salearn = qx(sa-learn --dump magic);
my $data = {
spam => 0,
ham => 0,
token => 0
};
foreach my $line (@salearn){
if ($line =~ m/(\d+)\s*0\s*non-token\sdata:\snspam$/){
$data->{spam} = $1;
}
elsif ($line =~ m/(\d+)\s*0\s*non-token\sdata:\snham$/){
$data->{ham} = $1;
}
elsif ($line =~ m/(\d+)\s*0\s*non-token\sdata:\sntokens$/){
$data->{token} = $1;
}
}
if ($what eq 'spam'){
print $data->{spam} . "\n";
}
elsif ($what eq 'ham'){
print $data->{ham} . "\n";
}
elsif ($what eq 'token'){
print $data->{token} . "\n";
}
else{
print to_json($data);
}
exit(0);

View File

@ -0,0 +1,188 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
use Date::Parse;
use File::ReadBackwards;
use Data::Dumper;
my $samba_tool = which('samba-tool');
my $pdbedit = which('pdbedit');
# Number of seconds in the past to count authentications
my $since = 300;
my $pretty = 0;
my $general = 1;
my $ou = undef;
# This log is expected to be in JSON format. For example, in smb.conf :
# log level = 1 auth_audit:3 auth_json_audit:4@/var/log/samba/audit_auth.log
my $audit_auth_log = '/var/log/samba/json/auth.log';
if (not defined $samba_tool or not defined $pdbedit){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
GetOptions(
'pretty' => \$pretty,
'since=i' => \$since,
'audit-auth-log=s' => \$audit_auth_log,
'general' => \$general,
'ou=s' => \$ou
);
if ($since !~ m/^\d+$/){
die "Invalid value for since\n";
}
my $json = {};
if (defined $ou){
$json = {
objects => 0
};
if ($ou !~ m/^(?<RDN>(?<Key>(?:\\[0-9A-Fa-f]{2}|\\\[^=\,\\]|[^=\,\\]+)+)\=(?<Value>(?:\\[0-9A-Fa-f]{2}|\\\[^=\,\\]|[^=\,\\]+)+))(?:\s*\,\s*(?<RDN>(?<Key>(?:\\[0-9A-Fa-f]{2}|\\\[^=\,\\]|[^=\,\\]+)+)\=(?<Value>(?:\\[0-9A-Fa-f]{2}|\\\[^=\,\\]|[^=\,\\]+)+)))*$/){
die "Invalid OU\n";
}
foreach (qx($samba_tool ou listobjects '$ou' 2>/dev/null)){
die "Error while counting objects of OU $ou\n" if ($? != 0);
chomp;
$json->{objects}++;
}
} elsif ($general){
$json = {
accounts => {
users => 0,
inactive_users => 0,
active_users => 0,
groups => 0,
computers => 0
},
replication => 'UNKNWON',
processes => {
cldap_server => 0,
kccsrv => 0,
dreplsrv => 0,
ldap_server => 0,
kdc_server => 0,
dnsupdate => 0,
'notify-daemon' => 0,
rpc_server => 0,
winbind_server => 0,
nbt_server => 0,
dnssrv => 0,
samba => 0,
},
gpo => 0,
ou => 0,
activity => {
authentications => {
users => {
success => 0,
failure => 0
},
computers => {
success => 0,
failure => 0
}
},
authorizations => {
users => 0,
computers => 0
},
since => $since
}
};
# Get the numbers of users. pdbedit is prefered here because we can
# differentiate active and inactive users, which samba-tool can't do
# While at it, also get the computers
foreach (qx($pdbedit -L -v)){
next unless (m/^Account Flags:\s+\[(.*)\]/);
my $flags = $1;
if ($flags =~ m/U/){
$json->{accounts}->{users}++;
if ($flags =~ m/D/){
$json->{accounts}->{inactive_users}++;
} else {
$json->{accounts}->{active_users}++;
}
} elsif ($flags =~ m/W/){
$json->{accounts}->{computers}++;
}
}
# Now count groups
foreach (qx($samba_tool group list 2>/dev/null)){
$json->{accounts}->{groups}++;
}
# Get replication status
# We want just a quick summary, so only output the first line
# manual checks will be needed to get the details, but if this field doesn't contains [ALL GOOD],
# then something is probably wrong
$json->{replication} = (split(/\n/, qx($samba_tool drs showrepl --summary 2>/dev/null)))[0];
# Get the list of workers
foreach (qx($samba_tool processes 2>/dev/null)){
if (/^([^\(\s]+).+\d+$/){
$json->{processes}->{$1}++;
}
}
# Get the number of GPO
foreach (qx($samba_tool gpo listall 2>/dev/null)){
next unless (/^GPO/);
$json->{gpo}++;
}
# Get the number of OU
foreach (qx($samba_tool ou list 2>/dev/null)){
$json->{ou}++;
}
if (-e $audit_auth_log){
my $backward = File::ReadBackwards->new( $audit_auth_log ) or die "Couldn't open $audit_auth_log : $!\n";
while (defined (my $line = $backward->readline)){
my $event;
eval {
$event = from_json($line);
};
# Skip the log entry if we can't parse JSON
next if (not defined $event);
my $type = $event->{type};
# We're only interested in Authentication and Authorization messages
next if ($type ne 'Authentication' and $type ne 'Authorization');
# Parse the date in the timstamp field
my $timestamp = str2time($event->{timestamp});
# Skip if date couldn't be parsed
next if (not defined $timestamp);
# As we're reading in reverse order, if we reached an events prior to now - since, then we can stop, as all the other will be even earlier
last if (time() - $timestamp > $since);
my $subject;
if ($type eq 'Authentication'){
if ($event->{Authentication}->{status} eq 'NT_STATUS_PROTOCOL_UNREACHABLE'){
# Ignore NT_STATUS_PROTOCOL_UNREACHABLE as they are harmless
next;
}
# Accounts ending with $ are for computers
$subject = (($event->{$type}->{mappedAccount} || $event->{$type}->{clientAccount} || '')=~ m/\$(\@.+)?$/) ? 'computers' : 'users';
if ($event->{Authentication}->{status} eq 'NT_STATUS_OK'){
$json->{activity}->{authentications}->{$subject}->{success}++;
} else {
$json->{activity}->{authentications}->{$subject}->{failure}++;
}
} else {
$subject = ($event->{$type}->{account} =~ m/\$$/) ? 'computers' : 'users';
$json->{activity}->{authorizations}->{$subject}++;
}
}
}
}
print to_json($json, { pretty => $pretty });

View File

@ -1,44 +1,32 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use Config::Simple;
my $what = $ARGV[0];
my $thres = $ARGV[1];
unless (defined $what){
usage();
exit(1);
usage();
exit(1);
}
open SENSORS, ('</etc/zabbix/sensors.conf') ||
die "Couldn't open /etc/zabbix/sensors.conf: $!\n";
my $cfg = new Config::Simple;
$cfg->read('/etc/zabbix/sensors.ini');
my $ret = 'ZBX_NOTSUPPORTED';
foreach (<SENSORS>){
next unless (/^$what(\s+)?=(\s+)?(.*)!(\-?\d+)!(\-?\d+)$/);
my $cmd = $3;
my $high = $4;
my $low = $5;
if (!defined $thres){
$ret = `$cmd`;
}
elsif ($thres eq 'high'){
$ret = $high
}
elsif ($thres eq 'low'){
$ret = $low;
}
else {
usage();
exit(1);
}
my $sensor = $cfg->get_block($what);
if ($sensor && $sensor->{cmd}){
$ret = qx($sensor->{cmd});
}
print $ret;
exit(0);
sub usage {
print <<"EOF";
Usage: $0 sensor_name [high|low]
Usage: $0 sensor_name
EOF
}

View File

@ -0,0 +1,87 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use Getopt::Long;
use JSON;
my $uri = 'http://127.0.0.1:3128/squid-internal-mgr/info';
my $what = 'all';
my $help = 0;
my $pretty = 0;
GetOptions(
"uri=s" => \$uri,
"what=s" => \$what,
"help" => \$help,
"pretty" => \$pretty
);
my $res = {};
my $status = get($uri);
unless ($status){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
foreach my $line (split(/\n/, $status)){
if ($line =~ m/^Squid Object Cache: Version (\d+(\.\d+)*)/){
$res->{version} = $1;
} elsif ($line =~ m/^\s+Number of clients accessing cache:\s+(\d+)/){
$res->{clients_num} = $1 * 1;
} elsif ($line =~ m/^\s+Number of HTTP requests received:\s+(\d+)/){
$res->{requests} = $1 * 1;
} elsif ($line =~ m/^\s+Hits as % of all requests:\s+5min:\s+(\d+(\.\d+)?)%,/){
$res->{hits_req_percent} = $1 * 1;
} elsif ($line =~ m/^\s+Hits as % of bytes sent:\s+5min:\s+(\d+(\.\d+)?)%,/){
$res->{hits_bytes_percent} = $1 * 1;
} elsif ($line =~ m/^\s+Memory hits as % of hit requests:\s+5min:\s+(\d+(\.\d+)?)%,/){
$res->{mem_hits_req_percent} = $1 * 1;
} elsif ($line =~ m/^\s+Disk hits as % of hit requests:\s+5min:\s+(\d+(\.\d+)?)%,/){
$res->{disk_hits_req_percent} = $1 * 1;
} elsif ($line =~ m/^\s+Storage Swap size:\s+(\d+)\sKB/){
$res->{stor_swap_size} = $1 * 1024;
} elsif ($line =~ m/^\s+Storage Swap capacity:\s+(\d+(\.\d+)?)% used, (\d+(\.\d+)?)% free/){
($res->{stor_swap_used_percent}, $res->{stor_swap_free_percent}) = ($1 * 1, $3 * 1);
} elsif ($line =~ m/^\s+Storage Mem size:\s+(\d+)\sKB/){
$res->{stor_mem_size} = $1 * 1024;
} elsif ($line =~ m/^\s+Storage Mem capacity:\s+(\d+(\.\d+)?)% used, (\d+(\.\d+)?)% free/){
($res->{stor_mem_used_percent}, $res->{stor_mem_free_percent}) = ($1 * 1, $3 * 1);
} elsif ($line =~ m/^\s+Mean Object Size:\s+(\d+(\.\d+)?)\sKB/){
$res->{mean_object_size} = int($1 * 1024);
} elsif ($line =~ m/^\s+CPU Time:\s+(\d+(\.\d+)?)\sseconds/){
$res->{cpu_time} = $1 * 1;
} elsif ($line =~ m/^\s+CPU Usage:\s+(\d+(\.\d+)?)%/){
$res->{cpu_usage} = $1 * 1;
} elsif ($line =~ m/^\s+CPU Usage, 5 minute avg:\s+(\d+(\.\d+)?)%/){
$res->{cpu_usage_avg_5min} = $1 * 1;
} elsif ($line =~ m/^\s+Maximum number of file descriptors:\s+(\d+)/){
$res->{fd_max} = $1 * 1;
} elsif ($line =~ m/^\s+Number of file desc currently in use:\s+(\d+)/){
$res->{fd_used} = $1 * 1;
}
}
if (defined $res->{fd_max} and defined $res->{fd_used}){
$res->{fd_used_percent} = int($res->{fd_used} * 100/ $res->{fd_max});
}
if ($help){
print "Valid keys are:\n\n";
print "$_\n" for keys %{$res};
exit 0;
}
if ($what eq 'all'){
print to_json($res, { pretty => $pretty });
}
elsif (defined $res->{$what}){
print $res->{$what};
}
else{
print 'ZBX_NOTSUPPORTED';
}
exit 0;

View File

@ -0,0 +1,120 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $dev = undef;
my $type = 'auto';
my $what = 'json';
my $pretty = 0;
GetOptions(
'device=s' => \$dev,
'type=s' => \$type,
'what=s' => \$what,
'pretty' => \$pretty
);
if (not defined $dev or $dev !~ m|^/dev/\w+(/\w+)*$|){
print "Invalid --device\n";
exit 1;
} elsif ($what !~ m/^\w+$/){
print "Invalid --what\n";
exit 1;
} elsif ($type !~ m/^\w+\+*\w+(,\w+)*$/){
print "Invalid --type\n";
exit 1;
}
my $json = {
temperature_celsius => 25,
power_on_hours => 0,
power_cycle_count => 0,
reallocated_sector_count => 0,
current_pending_sector => 0,
offline_uncorrectable => 0,
percent_lifetime_remain => 100,
firmware_version => 0
};
my $smartctl = which('smartctl');
sub print_out {
if ($what eq 'json'){
print to_json($json, { pretty => $pretty });
exit 0;
} elsif (defined $json->{$what}){
print $json->{$what} . "\n";
exit 0;
} else {
print "ZBX_NOTSUPPORTED\n";
exit 1;
}
}
sub get_smart_attr {
my $smart = shift;
my $attr = shift;
if (defined $smart->{ata_smart_attributes}->{table}){
foreach (@{$smart->{ata_smart_attributes}->{table}}){
if ($_->{name} eq $attr){
return $_;
}
}
}
return undef;
}
if (not defined $smartctl){
$what = 'error';
print_out();
}
my $data = from_json(qx($smartctl -a $dev -d $type --json=c));
if (defined $data->{temperature}->{current}){
$json->{temperature_celsius} = $data->{temperature}->{current};
}
if (defined $data->{power_on_time}->{hours}){
$json->{power_on_hours} = $data->{power_on_time}->{hours};
}
if (defined $data->{power_cycle_count}){
$json->{power_cycle_count} = $data->{power_cycle_count};
}
if (defined $data->{firmware_version}){
$json->{firmware_version} = $data->{firmware_version};
}
my ($pending, $realloc, $offline, $remain);
if ($pending = get_smart_attr($data, 'Current_Pending_Sector')){
$json->{current_pending_sector} = $pending->{raw}->{value};
}
if ($realloc = get_smart_attr($data, 'Reallocated_Sector_Ct') || get_smart_attr($data, 'Reallocated_Event_Count')){
$json->{reallocated_sector_count} = $realloc->{raw}->{value};
} elsif (defined $data->{nvme_smart_health_information_log}->{media_errors}){
# NMVe can report media error, so report it as reallocated sectors
$json->{reallocated_sector_count} = $data->{nvme_smart_health_information_log}->{media_errors};
}
if ($offline = get_smart_attr($data, 'Offline_Uncorrectable')){
$json->{offline_uncorrectable} = $offline->{raw}->{value};
}
if ($remain = get_smart_attr($data, 'Percent_Lifetime_Remain')){
$json->{percent_lifetime_remain} = $remain->{value};
} elsif ($remain = get_smart_attr($data, 'SSD_Life_Left')){
$json->{percent_lifetime_remain} = $remain->{raw}->{value};
} elsif ($remain = get_smart_attr($data, 'Wear_Leveling_Count')){
$json->{percent_lifetime_remain} = $remain->{value};
} elsif (defined $data->{nvme_smart_health_information_log}->{percentage_used}){
# NMVe sometime report the estimated life used, instead of the remaining
$json->{percent_lifetime_remain} = 100 - $data->{nvme_smart_health_information_log}->{percentage_used};
}
print_out();

262
zabbix_scripts/check_unifi Executable file
View File

@ -0,0 +1,262 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Cookies;
use URI;
use Data::Dumper;
umask 077;
my $user = 'zabbix';
my $pass = 'secret';
my $site = 'default';
my $url = 'https://localhost:8443';
my $certcheck = 1;
my $unifi;
my $dev;
my $station;
my $net;
my $wlan;
my $pretty = 0;
my $json = {};
my $site_id;
GetOptions (
'user=s' => \$user,
'password|p=s' => \$pass,
'site=s' => \$site,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'unifi' => \$unifi,
'dev=s' => \$dev,
'station=s' => \$station,
'net=s' => \$net,
'wlan=s' => \$wlan,
'pretty' => \$pretty
);
# If connecting to localhost, no need to check certificate
my $uri = URI->new($url);
if ($uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my @radio_proto = qw/a b g na ng ac/;
my $resp;
my $username = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<);
my $cj = HTTP::Cookies->new(
file => "/tmp/.unifi_$username.txt",
autosave => 1,
ignore_discard => 1
);
my $sslopts = {};
if (not $certcheck){
$sslopts = { verify_hostname => 0, SSL_verify_mode => 0 }
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts,
cookie_jar => $cj
);
# Check if we need to login
$resp = $ua->get($url . '/api/self/sites');
if ($resp->is_error){
# Log into the API
$resp = $ua->post(
$url . '/api/login',
Content => to_json({ username => $user, password => $pass }),
Content_Type => 'application/json;charset=UTF-8'
);
die "Login failed: " . $resp->message . "\n" if $resp->is_error;
$resp = $ua->get($url . '/api/self/sites');
die $resp->message . "\n" if $resp->is_error;
}
# Now, we need to get the site ID
foreach (@{from_json($resp->decoded_content)->{data}}){
if ($_->{name} eq $site || $_->{desc} eq $site){
$site_id = $_->{_id};
# If site is referenced by description, translate it to name
$site = $_->{name} if ($_->{name} ne $site);
last;
}
}
die "Site $site not found\n" unless ($site_id);
# Global info about the instance of Unifi
if ($unifi){
$resp = $ua->get($url . '/api/s/' . $site . '/stat/health');
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
if ($entry->{subsystem} eq 'wlan'){
$json->{wireless_clients} = $entry->{num_user};
$json->{wireless_guests} = $entry->{num_guest};
} elsif ($entry->{subsystem} eq 'lan'){
$json->{wired_clients} = $entry->{num_user};
$json->{wired_guests} = $entry->{num_guest};
}
foreach (qw/adopted pending disabled/){
$json->{'dev_' . $_} += $entry->{'num_' . $_} if (defined $entry->{'num_' . $_});
}
foreach (qw/num_ap num_sw num_gw/){
$json->{$_} += $entry->{$_} if ($entry->{$_});
}
}
$json->{$_} ||= 0 foreach (qw/wireless_clients wireless_guests
wired_clients wired_guests dev_adopted
dev_pending dev_disabled num_ap num_sw
num_gw/);
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sysinfo');
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
$json->{$_} = from_json($resp->decoded_content)->{data}->[0]->{$_}
foreach (qw/version build update_available/);
# Get unarchived alarms
$resp = $ua->post($url . '/api/s/' . $site . '/stat/alarm',
Content => to_json({ archived => 'false' }),
Content_Type => 'application/json;charset=UTF-8'
);
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
$json->{alarm} = scalar @{from_json($resp->decoded_content)->{data}};
} elsif ($dev) {
# Dev is identified by MAC
$resp = $ua->get($url . '/api/s/' . $site . '/stat/device/' . $dev);
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
my $obj = from_json($resp->decoded_content)->{data}->[0];
foreach (qw/sys_stats locating serial name num_sta user-num_sta
guest-num_sta inform_url version model state type
cfgversion adopted avg_client_signal/){
$json->{$_} = $obj->{$_} if (defined $obj->{$_});
}
# Convert last seen into a relative time
$json->{last_seen} = (defined $obj->{last_seen}) ? time - $obj->{last_seen} : time;
# Add some more info in sys_stats
$json->{sys_stats}->{$_} = $obj->{'system-stats'}->{$_} foreach (qw/cpu mem uptime/);
# If this is an ap
if ($obj->{type} eq 'uap'){
foreach (qw/guest-rx_packets guest-tx_packets guest-rx_bytes
guest-tx_bytes user-rx_packets user-tx_packets
user-rx_bytes user-tx_bytes rx_packets tx_packets
rx_bytes tx_bytes rx_errors tx_errors
rx_dropped tx_dropped/){
$json->{net_stats}->{$_} = $obj->{stat}->{ap}->{$_} if (defined $obj->{stat}->{ap}->{$_});
}
# Count the number of SSID served
$json->{num_wlan} = scalar @{$obj->{radio_table}};
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sta');
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
foreach my $proto (@radio_proto){
$json->{$_ . $proto} = 0 foreach (qw/num_sta_ avg_rx_rate_ avg_tx_rate_/);
}
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
next if (not $entry->{ap_mac} or $entry->{ap_mac} ne $dev or $entry->{is_wired} == JSON::true);
foreach (@radio_proto){
if ($entry->{radio_proto} eq $_){
$json->{'num_sta_' . $_}++;
$json->{'avg_rx_rate_' . $_} += $entry->{rx_rate};
$json->{'avg_tx_rate_' . $_} += $entry->{tx_rate};
}
}
$json->{$_} += $entry->{$_} foreach (qw/rx_bytes tx_bytes rx_packets tx_packets/);
$json->{'avg_' . $_} += $entry->{$_} foreach (qw/satisfaction tx_power signal noise/);
}
# Now lets compute average values
$json->{'avg_' . $_} = ($json->{num_sta} == 0) ? undef : $json->{'avg_' . $_} / $json->{num_sta}
foreach (qw/satisfaction tx_power signal noise/);
foreach my $proto (@radio_proto){
$json->{'avg_' . $_ . '_rate_' . $proto} = ($json->{'num_sta_' . $proto} == 0) ?
undef : $json->{'avg_' . $_ . '_rate_' . $proto} / $json->{'num_sta_' . $proto}
foreach (qw/tx rx/);
}
} elsif ($obj->{type} eq 'usw'){
foreach (qw/rx_packets tx_packets
rx_bytes tx_bytes rx_errors tx_errors
rx_dropped tx_dropped/){
$json->{net_stats}->{$_} = $obj->{stat}->{sw}->{$_} if (defined $obj->{stat}->{sw}->{$_});
}
}
} elsif ($station) {
# Client is identified by MAC
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sta/' . $station);
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
my $obj = from_json($resp->decoded_content)->{data}->[0];
my @client_base = qw/rx_packets tx_packets rx_bytes tx_bytes hostname last_seen ip authorized oui is_guest/;
foreach (@client_base){
$json->{$_} = $obj->{$_} || 0;
}
# Convert last_seen to relative
$json->{last_seen} = (defined $obj->{last_seen}) ? time - $obj->{last_seen} : time;
# For wireless stations, we gather some more info
if ($obj->{is_wired} == JSON::false){
my @client_wireless = qw/rx_rate tx_rate essid ap_mac tx_power radio_proto signal noise satisfaction/;
foreach (@client_wireless){
$json->{$_} = $obj->{$_} || 0;
}
# We have the MAC of the AP, lets try to find the name of this AP
$resp = $ua->get($url . '/api/s/' . $site . '/stat/device/' . $json->{ap_mac});
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
$json->{ap} = from_json($resp->decoded_content)->{data}->[0]->{name};
}
} elsif ($wlan) {
# Wlan is identified by ID
$resp = $ua->get($url . '/api/s/' . $site . '/rest/wlanconf/' . $wlan);
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
my $obj = from_json($resp->decoded_content)->{data}->[0];
foreach (qw/name security mac_filter_policy vlan/){
$json->{$_} = $obj->{$_};
}
# For boolean, we need to convert
foreach (qw/enabled is_guest mac_filter_enabled vlan_enabled/){
$json->{$_} = (defined $obj->{$_} and $obj->{$_} == JSON::PP::true) ? 1 : 0;
}
# Now, we need to count stations for each SSID
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sta');
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
# Set default values to 0
$json->{num_sta} = 0;
$json->{'num_sta_' . $_} = 0 foreach (@radio_proto);
$json->{$_} = 0 foreach (qw/rx_bytes tx_bytes rx_packets tx_packets/);
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
next if (not $entry->{essid} or $entry->{essid} ne $json->{name} or $entry->{is_wired} == JSON::PP::true);
$json->{num_sta}++;
foreach (@radio_proto){
if ($entry->{radio_proto} eq $_){
$json->{'num_sta_' . $_}++;
$json->{'avg_rx_rate_' . $_} += $entry->{rx_rate};
$json->{'avg_tx_rate_' . $_} += $entry->{tx_rate};
}
}
$json->{$_} += $entry->{$_} foreach (qw/rx_bytes tx_bytes rx_packets tx_packets/);
$json->{'avg_' . $_} += $entry->{$_} foreach (qw/satisfaction tx_power signal noise/);
}
# Now lets compute average values
$json->{'avg_' . $_} = ($json->{num_sta} == 0) ? undef : $json->{'avg_' . $_} / $json->{num_sta}
foreach (qw/satisfaction tx_power signal noise/);
foreach my $proto (@radio_proto){
$json->{'avg_' . $_ . '_rate_' . $proto} = ($json->{'num_sta_' . $proto} == 0) ?
undef : $json->{'avg_' . $_ . '_rate_' . $proto} / $json->{'num_sta_' . $proto}
foreach (qw/tx rx/);
}
}
print to_json($json, { pretty => $pretty });

View File

@ -0,0 +1,45 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $vdostats = which('vdostats');
my $json = {};
my $pretty = 0;
my $volume = undef;
my $val = undef;
GetOptions(
'volume=s' => \$volume,
'value=s' => \$val,
'pretty' => \$pretty
);
if ($volume) {
if ($volume !~ m/^\w+$/){
die "Invalide volume name\n";
}
foreach my $line (qx($vdostats --all $volume)){
if ($line =~ m/^\s+([^:]+)\s+:\s+([\d\w]+)/){
my ($key,$val) = ($1,$2);
# Cleanup key
$key =~ s/\s+$//;
$key =~ s/\s+/_/g;
$key =~ s/\(|\)//g;
$key =~ s/%/pct/g;
$json->{lc $key} = $val;
}
}
} else {
print 'ZBX_NOTSUPPORTED';
exit 0;
}
if (defined $val) {
print $json->{$val} || 'ZBX_NOTSUPPORTED';
} else {
print to_json($json, { pretty => $pretty });
}
print "\n";

182
zabbix_scripts/check_zfs Normal file
View File

@ -0,0 +1,182 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use File::Which;
use Getopt::Long;
my $json = {};
my $pool = undef;
my $dataset = undef;
my $sanoidmon = undef;
my $stats = undef;
my $pretty = 0;
GetOptions(
"zpool|pool=s" => \$pool,
"dataset=s" => \$dataset,
"sanoid=s" => \$sanoidmon,
"stats=s" => \$stats,
"pretty" => \$pretty
);
my $zpool = which('zpool');
my $zfs = which('zfs');
my $sanoid = which('sanoid');
if (not $zpool or not $zfs){
print 'ZBX_NOTSUPPOTED';
exit 0;
}
if (defined $sanoidmon and not $sanoid){
die 'ZBX_NOTSUPPOTED';
}
if (defined $sanoidmon and not grep { $_ eq $sanoidmon } qw(snapshot capacity health)){
die 'ZBX_NOTSUPPOTED';
}
if (not $pool and not $dataset and not $sanoidmon and not $stats){
print <<_EOF;
Usage:
$0 [--zpool=<name>|--dataset=<fs zvol or snap>|--sanoid=<snapshot|capacity|health>]
_EOF
exit 1;
}
# Value map. For Zabbix, we want 0 instead of none
# We also prefer on/off represented as 1/0 as it's more efficient
my $map = {
18446744073709551615 => 0, # See https://github.com/zfsonlinux/zfs/issues/9306
none => 0,
on => 1,
off => 0
};
if ($pool){
foreach (qx($zpool get all $pool -p -H)){
chomp;
my @parse = split /\t+/, $_;
$json->{$parse[1]} = (defined $map->{$parse[2]}) ? $map->{$parse[2]} : $parse[2];
$json->{errors} = get_zpool_errors($pool);
$json->{stats} = get_zpool_stats($pool);
}
} elsif ($dataset){
# Convert %40 back to @ (we send them as %40 in the discovery because @ is not allowed in item keys
$dataset =~ s/%40/\@/g;
foreach (qx($zfs get all $dataset -p -H)){
chomp;
my @parse = split /\t+/, $_;
$json->{$parse[1]} = (defined $map->{$parse[2]}) ? $map->{$parse[2]} : $parse[2];
if ($parse[1] =~ m/compressratio$/){
# Remove trailing x for compressratio and refcompressratio as before 0.8.0 it can be like 1.23x
$json->{$parse[1]} =~ s/x$//;
}
}
} elsif ($sanoidmon){
print qx($sanoid --monitor-$sanoidmon);
exit $?;
} elsif ($stats){
if (not -e '/proc/spl/kstat/zfs/' . $stats){
print 'ZBX_NOTSUPPORTED';
exit 0;
}
open STATS, '</proc/spl/kstat/zfs/' . $stats;
while (<STATS>){
next unless (m/^(\w+)\s+4\s+(\d+)$/);
$json->{$1} = $2;
}
}
print to_json($json, { pretty => $pretty }) . "\n";
exit 0;
sub get_zpool_errors {
my $pool = shift;
my $errors = {
read_errors => 0,
write_errors => 0,
cksum_errors => 0
};
my $i = 0;
my $index = {};
foreach my $line (qx($zpool status $pool 2>/dev/null)){
# Output looks like
# pool: rpool
# state: ONLINE
# status: One or more devices has experienced an unrecoverable error. An
# attempt was made to correct the error. Applications are unaffected.
# action: Determine if the device needs to be replaced, and clear the errors
# using 'zpool clear' or replace the device with 'zpool replace'.
# see: http://zfsonlinux.org/msg/ZFS-8000-9P
# scan: scrub repaired 0B in 0h5m with 0 errors on Tue May 29 10:04:31 2018
# config:
#
# NAME STATE READ WRITE CKSUM
# rpool ONLINE 0 0 0
# mirror-0 ONLINE 0 0 0
# sda2 ONLINE 0 0 0
# sdb2 ONLINE 0 0 474
#
# errors: No known data errors
# We want to save status, action, scan and errors
if ($line =~ m/^\s*(scan|action|status|errors):\s+(\w+.*)/){
$errors->{$1} = $2;
$index->{$i} = $1;
} elsif ($line !~ /:/ and defined $index->{$i-1}){
# Here, we reconstitute multiline values (like status and action)
chomp($line);
$line =~ s/\s+/ /g;
$errors->{$index->{$i-1}} .= $line;
} elsif ($line =~ m/\s+[a-zA-Z0-9_\-]+\s+[A-Z]+\s+(?<read>\d+(\.\d+)?)(?<read_suffix>[KMT])?\s+(?<write>\d+(\.\d+)?)(?<write_suffix>[KMT])?\s+(?<cksum>\d+(\.\d+)?)(?<cksum_suffix>[KMT])?/){
# And here, we count the number of read, write and checksum errors
# Note that on ZoL 0.8.0 we could use zpool status -p to get rid of the suffixes
# But -p is not supported on 0.7 and earlier, so, we just convert them manually
$errors->{read_errors} += convert_suffix($+{'read'},$+{'read_suffix'});
$errors->{write_errors} += convert_suffix($+{'write'},$+{'write_suffix'});
$errors->{cksum_errors} += convert_suffix($+{'write'},$+{'write_suffix'});
}
$i++;
}
# Ensure evey item returns something
$errors->{$_} ||= '' foreach (qw(scan action status errors));
return $errors;
}
# Error counter can be suffixed. Apply this suffix to get raw error numbers
sub convert_suffix {
my $val = shift;
my $suf = shift;
if (!$suf){
return $val;
} elsif ($suf eq 'K'){
$val *= 1000;
} elsif ($suf eq 'M') {
$val *= 1000000;
} elsif ($suf eq 'T') {
$val *= 1000000000;
}
return $val;
}
sub get_zpool_stats {
my $pool = shift;
my $stats = {};
open UPTIME, "</proc/uptime";
$_ = <UPTIME>;
chomp;
my ($uptime , undef) = split;
$uptime = int $uptime;
close UPTIME;
foreach my $line (qx($zpool iostat $pool -pH)){
if ($line =~ m/^$pool\s+\d+\s+\d+\s+(?<reads>\d+)\s+(?<writes>\d+)\s+(?<nread>\d+)\s+(?<nwritten>\d+)/){
# zpool iostat shows average IO since boot, so just multiply it
# by the uptime in seconds to get cumulated IO since boot
# Zabbix server will then be able to calculate the delta between two values
$stats->{$_} = $+{$_} * $uptime foreach (keys %+);
last;
}
}
return $stats;
}

View File

@ -0,0 +1,59 @@
#!/usr/bin/perl -w
use JSON;
use POSIX;
use Getopt::Long;
use Net::Domain qw(hostfqdn);
use Data::Dumper;
my $pretty = 0;
my $status = 'all';
GetOptions(
"pretty" => \$pretty,
"status=s" => \$status
);
if (defined $service and $service !~ m/^\w+$/){
die "Invelid service name\n";
}
my $zmprov = '/opt/zimbra/bin/zmprov';
my $zmcontrol = '/opt/zimbra/bin/zmcontrol';
my $hostname = hostfqdn();
# We need to switch to zimbra
my $uid = getuid();
my $gid = getgid();
my (undef,undef,$zimuid,$zimgid) = getpwnam('zimbra');
if (not defined $zimuid or not defined $zimgid or not -e $zmprov){
print 'ZBX_NOTSUPPOTED';
exit;
}
setuid($zimuid) if ($uid ne $zimuid);
setgid($zimgid) if ($gid ne $zimgid);
# If there's no zimbra user or no zmcontrol, return unsupported item
if (not defined $zimuid or not defined $zimgid or not -e $zmprov){
print 'ZBX_NOTSUPPOTED';
exit 0;
}
my $output = {};
if (defined $status){
foreach my $line (qx($zmcontrol status)){
if ($line =~ m/^\s+(\w+)(\swebapp)?\s+(Running|Stopped)/){
$output->{$1} = ($3 eq 'Running') ? 1 : 0;
}
}
if ($status eq 'all'){
print to_json($output, { pretty => $pretty });
} elsif (defined $output->{$status}){
print $output->{$status}
} else {
print 'ZBX_NOTSUPPORTED';
}
}

View File

@ -1,10 +1,23 @@
#!/usr/bin/perl
use lib "/usr/share/BackupPC/lib";
use lib "/usr/share/backuppc/lib";
use lib "/usr/local/BackupPC/lib";
use BackupPC::Lib;
use BackupPC::CGI::Lib;
use POSIX;
use JSON;
use Getopt::Long;
my $hosts = 1;
my $entities = 0;
my $pretty = 0;
GetOptions(
"hosts" => \$hosts,
"entities" => \$entities,
"pretty" => \$pretty
);
# We need to switch to backuppc UID/GID
my $uid = getuid();
@ -13,22 +26,41 @@ my (undef,undef,$bkpuid,$bkpgid) = getpwnam('backuppc');
setuid($bkpuid) if ($uid ne $bkpuid);
setgid($bkpgid) if ($gid ne $bkpgid);
my $bpc = BackupPC::Lib->new();
my $hosts = $bpc->HostInfoRead();
my $bpc = BackupPC::Lib->new();
my $hosts = $bpc->HostInfoRead();
my $mainConf = $bpc->ConfigDataRead();
my $json;
foreach my $host (keys %$hosts){
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $period = ($conf->{FullPeriod} >= $conf->{IncrPeriod}) ? $conf->{IncrPeriod} : $conf->{FullPeriod};
my $status = ($conf->{BackupsDisable} eq 1) ? 'disabled':'enabled';
push @{$json->{data}},
{
"{#BPCHOST}" => $host,
"{#BPCPERIOD}" => $period,
"{#BPCSTATUS}" => $status,
};
@{$json->{data}} = ();
if ($entities) {
my %entities = ();
foreach my $host ( keys %$hosts ){
if ( $host =~ m/^(?:vm_)?([^_]+)_.*/ and $1 ne 'vm' ) {
$entities{$1}= 1;
}
}
push @{$json->{data}}, { '{#BPC_ENTITY}' => $_ } foreach ( keys %entities );
} elsif ($hosts){
foreach my $host ( keys %$hosts ){
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $warning = $conf->{EMailNotifyOldBackupDays};
my $errors = ( defined $conf->{MaxXferError} ) ? $conf->{MaxXferError} : '0';
my $monitoring = $conf->{ZabbixMonitoring} || 1;
my $sizeTooBigFactor = $conf->{ZabbixSizeTooBigFactor} || 6;
my $sizeTooSmallFactor = $conf->{ZabbixSizeTooSmallFactor} || 3;
my $status = ( $conf->{BackupsDisable} gt 0 or $monitoring eq '0' ) ? '0' : '1';
push @{$json->{data}},
{
"{#BPCHOST}" => $host,
"{#BPCNOBACKUPWARNING}" => $warning,
"{#BPCMAXERROR}" => $errors,
"{#BPCSTATUS}" => $status,
"{#BPC_TOO_BIG_FACTOR}" => $sizeTooBigFactor,
"{#BPC_TOO_SMALL_FACTOR}" => $sizeTooSmallFactor,
};
}
}
print to_json($json);
print to_json( $json, { pretty => $pretty } );
exit(0);

View File

@ -1,30 +1,39 @@
#!/usr/bin/perl -w
use warnings;
use strict;
use Zabbix::Agent::Addons::Disks;
use JSON;
use File::Which;
use Getopt::Long;
my $pretty = 0;
GetOptions(
'pretty' => \$pretty
);
my $lsblk = which('lsblk');
opendir(my $dh, "/sys/block") or die "Couldn't open /sys/block: $!";
my @blocks = grep { $_ !~ m/^\./ } readdir($dh);
closedir($dh);
my $json;
foreach my $block (@blocks){
my $removable = 0;
my $size = 1;
if ( -e "/sys/block/$block/removable"){
open REMOVABLE, "/sys/block/$block/removable";
$removable = join "", <REMOVABLE>;
close REMOVABLE;
chomp($removable);
next if ($removable eq '1');
}
if ( -e "/sys/block/$block/size"){
open SIZE, "/sys/block/$block/size";
$size = join "", <SIZE>;
close SIZE;
chomp($size);
next if ($size eq '0');
}
my $dev = '/dev/' . $block;
push @{$json->{data}}, { "{#BLOCKDEVICE}" => $dev, "{#BLOCKSIZE}" => $size };
@{$json->{data}} = ();
if (defined $lsblk){
foreach my $line (qx($lsblk -o KNAME,TYPE,SIZE -r -n -b)){
my ($block,$type,$size) = split(/\s+/, $line);
push @{$json->{data}}, {
"{#BLOCKDEVICE}" => $block, # Compat with previous zabbix-agent-addons
"{#DEVNAME}" => $block, # New macro name for the native vfs.dev.discovery key in 4.4
"{#DEVTYPE}" => $type,
};
}
} else {
# Fallback if lsblk is not available
foreach my $block (Zabbix::Agent::Addons::Disks::list_block_dev()){
push @{$json->{data}}, {
"{#BLOCKDEVICE}" => $block,
"{#DEVNAME}" => $block,
"{#DEVTYPE}" => 'disk'
};
}
}
print to_json($json);
print to_json($json, { pretty => $pretty });
exit(0);

View File

@ -0,0 +1,68 @@
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
use Getopt::Long;
use File::Which;
use Data::Dumper;
my $what = 'containers';
my $pretty = 0;
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
my $json = [];
my $docker = which('docker');
# If the docker cli is not available, terminate now
if (not defined $docker){
print $json . "\n";
exit(0);
}
my $format;
my $cmd;
if ($what =~ m/^containers?/){
$format = '{' .
'"{#DOCKER_CONTAINER_ID}":"{{ .ID }}",' .
'"{#DOCKER_CONTAINER_IMAGE}": "{{ .Image }}",' .
'"{#DOCKER_CONTAINER_NAME}":"{{ .Names }}",' .
'"{#DOCKER_CONTAINER_STATUS}":"{{ .Status }}"' .
'}';
$cmd = "$docker container list --all --format '$format'";
} elsif ($what =~ m/^networks?/){
$format = '{' .
'"{#DOCKER_NET_ID}":"{{ .ID }}",' .
'"{#DOCKER_NET_NAME}":"{{ .Name }}",' .
'"{#DOCKER_NET_DRIVER}":"{{ .Driver }}",' .
'"{#DOCKER_NET_SCOPE}":"{{ .Scope }}"' .
'}';
$cmd = "$docker network list --format '$format'";
} elsif ($what =~ m/^volumes?/){
$format = '{' .
'"{#DOCKER_VOL_NAME}":"{{ .Name }}",' .
'"{#DOCKER_VOL_DRIVER}":"{{ .Driver }}",' .
'"{#DOCKER_VOL_SCOPE}":"{{ .Scope }}"' .
'}';
$cmd = "$docker volume list --format '$format'";
} else {
print <<_EOF
Usage: $0 --what=<item to discover> [--pretty]
with available item being
* containers : list containers, including stopped ones
* networks : list networks
* volumes : list volumes
_EOF
}
foreach my $line (qx($cmd)){
chomp $line;
push @{$json}, from_json($line);
}
print to_json($json, { pretty => $pretty }) . "\n";
exit(0);

26
zabbix_scripts/disco_drbd Normal file
View File

@ -0,0 +1,26 @@
#!/usr/bin/perl -w
use warnings;
use strict;
use File::Which;
use JSON;
my $json;
@{$json->{data}} = ();
my $drbdoverview = which('drbd-overview');
if ($drbdoverview){
open RES, "$drbdoverview |" || die "Couldn't execute $drbdoverview";
foreach my $l (<RES>){
if ($l =~ m{(\d+):(\w+)/\d+}){
push @{$json->{data}}, {
"{#DRBD_RES_ID}" => $1,
"{#DRBD_RES_NAME}" => $2
};
}
}
close RES;
}
print to_json($json);
exit(0);

View File

@ -0,0 +1,95 @@
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Request::Common;
use URI;
use Data::Dumper;
my $user = undef;
my $pass = undef;
my $url = 'http://localhost:9200';
my $certcheck = 1;
my $nodes = 0;
my $indices = 0;
my $pretty = 0;
my $json = [];
GetOptions (
'user:s' => \$user,
'password:s' => \$pass,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'nodes' => \$nodes,
'indices' => \$indices,
'pretty' => \$pretty
);
if ($nodes and $indices){
die "--nodes and --indices are mutually exclusive\n";
}
my $uri = URI->new($url);
if (not defined $uri){
die "$url is not a valid URL\n";
}
# If connecting over http or is host is localhost
# there's no need to check certificate
if ($uri->scheme eq 'http' or $uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my $sslopts = {};
if (not $certcheck){
$sslopts = {
verify_hostname => 0,
SSL_verify_mode => 0
}
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts
);
$ua->env_proxy;
if ($nodes){
foreach (@{make_request('/_cat/nodes?format=json&full_id&h=ip,role,master,name,id,version')}){
push @{$json}, {
'{#ES_NODE_NAME}' => $_->{name},
'{#ES_NODE_ROLE}' => $_->{role},
'{#ES_NODE_ID}' => $_->{id},
'{#ES_NODE_VERSION}' => $_->{version},
'{#ES_NODE_MASTER}' => $_->{master}
};
}
} elsif ($indices){
foreach (@{make_request('/_cat/indices?format=json')}){
push @{$json}, {
'{#ES_INDEX_NAME}' => $_->{index},
'{#ES_INDEX_STATUS}' => $_->{status},
'{#ES_INDEX_UUID}' => $_->{uuid}
};
}
}
print to_json($json, { pretty => $pretty });
sub make_request {
my $path = shift;
my $req_url = $url . $path;
my $req = GET $req_url;
if (defined $user and $user ne '' and defined $pass and $pass ne ''){
$req->authorization_basic($user, $pass);
}
my $resp = $ua->request($req);
die "Request to $req_url failed : " . $resp->message . "\n" if $resp->is_error;
return from_json($resp->decoded_content);
}

View File

@ -1,9 +1,8 @@
#!/usr/bin/perl
$first = 1;
print "{\n";
print "\t\"data\":[\n\n";
use JSON;
my $json;
@{$json->{data}} = ();
my $cmd;
my $re;
@ -36,19 +35,15 @@ for (`$cmd`){
chomp($t);
$critical = $t if ($t =~ m/^\d+$/);
}
$fsname =~ s!/!\\/!g;
print "\t,\n" if not $first;
$first = 0;
print "\t{\n";
print "\t\t\"{#FSNAME}\":\"$fsname\",\n";
print "\t\t\"{#FSTYPE}\":\"$fstype\"\n";
print "\t\t\"{#FSDEVICE}\":\"$block\"\n";
print "\t\t\"{#FSWARNTHRES}\":\"$warning\"\n";
print "\t\t\"{#FSCRITTHRES}\":\"$critical\"\n";
print "\t}\n";
push @{$json->{data}}, {
"{#FSNAME}" => $fsname,
"{#FSTYPE}" => $fstype,
"{#FSDEVICE}" => $block,
"{#FSWARNTHRES}" => $warning,
"{#FSCRITTHRES}" => $critical
};
}
print "\n\t]\n";
print "}\n";
print to_json($json);
exit(0);

View File

@ -0,0 +1,121 @@
#!/usr/bin/perl -w
use strict;
use File::Which;
use Getopt::Long;
use JSON;
my $json;
@{$json->{data}} = ();
my $gluster = which('gluster');
my $lock = '/var/lock/gluster-zabbix.lock';
unless($gluster){
# Gluster is not installed, just return an empty JSON object
print to_json($json);
exit(0);
}
# Get an exclusive lock
open(LOCK, ">$lock") || die "Can't open $lock";
flock(LOCK, 2);
my $what = 'volumes';
GetOptions(
"what=s" => \$what,
);
sub usage (){
print <<"EOF";
Usage: $0 --what=[volumes|peers]
EOF
}
sub gluster($){
my $cmd = shift;
my $code = 256;
my @result = ();
# Loop to run gluster cmd as it can fail if two run at the same time
for (my $i = 0; ($code != 0 && $i < 10); $i++){
open (RES, "$cmd |")
|| die "error: Could not execute $cmd";
@result = <RES>;
close RES;
$code = $?;
sleep(1) unless ($code == 0);
}
die "error: Could not execute $cmd" unless ($code == 0);
return @result;
}
if ($what eq 'volumes'){
foreach my $line (gluster("$gluster vol info all")){
if ($line =~ m/^Volume\ Name:\ (\w+)$/){
my $vol = $1;
my ($type,$bricks,$uuid,$status,$transport) = ('unknown');
open (VOLUMEINFO, "$gluster vol info $vol |")
|| die "error: Could not execute gluster vol info $vol";
foreach my $info (<VOLUMEINFO>){
if ($info =~ m/^Type:\ (.*)$/){
$type = $1;
}
elsif ($info =~ m/^Volume\ ID:\ ([0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12})$/){
$uuid = $1;
}
elsif ($info =~ m/^Status:\ (\w+)$/){
$status = $1;
}
elsif ($info =~ m/^Transport-type:\ (\w+)$/){
$transport = $1;
}
elsif ($info =~ m/^Number\ of\ Bricks:\ \d+\ x\ \d+\ =\ (\d+)$/){
$bricks = $1;
}
}
push @{$json->{data}}, {
"{#GLUSTER_VOL_NAME}" => $vol,
"{#GLUSTER_VOL_TYPE}" => $type,
"{#GLUSTER_VOL_UUID}" => $uuid,
"{#GLUSTER_VOL_STATUS}" => $status,
"{#GLUSTER_VOL_TRANSPORT}" => $transport,
"{#GLUSTER_VOL_BRICKS}" => $bricks
};
}
}
}
elsif ($what eq 'peers'){
my $peerno = 0;
my ($host,$uuid,$status) = ('unknown');
foreach my $line (gluster("$gluster peer status")){
if ($line =~ m/^Number of Peers:\ (\d+)$/){
$peerno = $1;
}
elsif ($line =~ m/^Hostname:\ ([\w\.]+)$/){
$host = $1;
}
elsif ($line =~ m/Uuid:\ ([0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12})$/){
$uuid = $1;
}
elsif ($line =~ m/State:\ [\w\s]+\((\w+)\)$/){
$status = $1;
push @{$json->{data}}, {
"{#GLUSTER_PEER_HOST}" => $host,
"{#GLUSTER_PEER_UUID}" => $uuid,
"{#GLUSTER_PEER_STATUS}" => $status
};
}
}
}
else{
usage();
exit(1);
}
close(LOCK);
print to_json($json);
exit(0);

View File

@ -0,0 +1,18 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use JSON;
my $json;
@{$json->{data}} = ();
my $status = get('http://127.0.0.1/server-status?auto');
if ($status){
push @{$json->{data}}, {"{#HTTPD_STATUS_URI}" => 'http://127.0.0.1/server-status'};
}
print to_json($json);
exit(0);

View File

@ -1,6 +1,6 @@
#!/usr/bin/perl -w
use Linux::LVM;
use Zabbix::Agent::Addons::LVM;
use JSON;
my $what = $ARGV[0];
@ -8,37 +8,54 @@ my $what = $ARGV[0];
open STDERR, '>/dev/null';
my $json;
@{$json->{data}} = ();
if ($what eq "volumes"){
foreach my $group (get_volume_group_list()){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
push @{$json->{data}}, { "{#LVMVOL}" => "/dev/$group/$lv" };
my @vg = eval { get_volume_group_list() };
unless ($@){
if ($what eq "volumes"){
foreach my $group (@vg){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
$lv = ($lv =~ m!^/dev/$group!) ? $lv : "/dev/$group/$lv";
push @{$json->{data}}, { "{#LVMVOL}" => "$lv" };
}
}
}
elsif ($what eq "snapshots"){
foreach my $group (@vg){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
if (defined $lvs{$lv}->{allocated_to_snapshot}){
$lv = ($lv =~ m!^/dev/$group!) ? $lv : "/dev/$group/$lv";
push @{$json->{data}}, { "{#LVMSNAP}" => "$lv" };
}
}
}
}
elsif ($what eq "snapshots"){
foreach my $group (get_volume_group_list()){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
if (defined $lvs{$lv}->{allocated_to_snapshot}){
push @{$json->{data}}, { "{#LVMSNAP}" => "/dev/$group/$lv" };
}
}
elsif ($what eq "thin_pools"){
foreach my $group (@vg){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
if (defined $lvs{$lv}->{allocated_pool_data}){
$lv = ($lv =~ m!^/dev/$group!) ? $lv : "/dev/$group/$lv";
push @{$json->{data}}, { "{#LVMTHINP}" => "$lv" };
}
}
}
}
elsif ($what eq "groups"){
foreach my $group (get_volume_group_list()){
push @{$json->{data}}, { "{#LVMGRP}" => $group };
}
}
else{
}
elsif ($what eq "groups"){
foreach my $group (@vg){
push @{$json->{data}}, { "{#LVMGRP}" => $group }; }
}
else{
print <<"EOF";
Usage: $0 [volumes|snapshots|groups]
Usage: $0 [volumes|snapshots|thin_pools|groups]
EOF
exit 1;
}
}
print to_json($json);
exit(0);

View File

@ -0,0 +1,37 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $mpath = 1;
my $pretty = 0;
my $json = [];
GetOptions(
"mpath" => \$mpath,
"pretty" => \$pretty
);
my $multipath = which('multipath');
if (not defined $multipath){
print to_json($json, { pretty => $pretty });
exit 0;
}
my @dev = qx($multipath -l -v1);
# If command failed (eg no /etc/multipath.conf), then return an empty result
if ($? ne 0){
print to_json($json, { pretty => $pretty });
exit 1;
}
foreach (@dev){
chomp;
push @{$json}, { '{#MPATH_DEV}' => $_ };
}
print to_json($json, { pretty => $pretty });
exit 0;

View File

@ -0,0 +1,15 @@
#!/usr/bin/perl -w
use JSON;
opendir(my $dh, "/sys/class/net") or die "Couldn't open /sys/class/net: $!";
my @nics = grep { $_ !~ m/^\./ } readdir($dh);
closedir($dh);
my $json;
foreach my $nic (@nics){
next unless ($nic =~ m/^(\w+[\.:]?(\d+)?)$/);
$nic = $1;
push @{$json->{data}}, { "{#IFNAME}" => $nic};
}
print to_json($json) if (defined $json->{data});
exit(0);

18
zabbix_scripts/disco_nginx Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use JSON;
my $json;
@{$json->{data}} = ();
my $status = get('http://127.0.0.1/nginx-status');
if ($status){
push @{$json->{data}}, {"{#NGINX_STATUS_URI}" => 'http://127.0.0.1/nginx-status'};
}
print to_json($json);
exit(0);

View File

@ -0,0 +1,13 @@
#!/usr/bin/perl -w
use JSON;
use Zabbix::Agent::Addons::UPS;
my $json;
@{$json->{data}} = ();
foreach my $ups (Zabbix::Agent::Addons::UPS::list_ups()){
push @{$json->{data}}, {"{#UPSNAME}" => $ups};
}
print to_json($json);
exit(0);

View File

@ -0,0 +1,35 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $what = 'nodes';
my $pretty = 0;
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
my $pmgsh = which('pmgsh');
my $json = {};
@{$json->{data}} = ();
unless($pmgsh){
print to_json($json) . "\n";
exit 0;
}
if ($what eq 'domains'){
my $domains = from_json(qx($pmgsh get /config/domains 2>/dev/null));
foreach my $item (@{$domains}){
push @{$json->{data}}, {
'{#PMG_RELAY_DOMAIN}' => $item->{domain},
};
}
}
print to_json($json, { pretty => $pretty }) . "\n";

View File

@ -0,0 +1,73 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
use Sys::Hostname;
my $what = 'nodes';
my $pretty = 0;
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
my $pvesh = which('pvesh');
my $json = {};
@{$json->{data}} = ();
unless($pvesh){
print to_json($json) . "\n";
exit 0;
}
# Are we using the new pvesh for which we have to specify the output format ?
my $pvesh_opt = (system("$pvesh get /version --output-format=json >/dev/null 2>&1") == 0) ? '--output-format=json' : '';
if ($what eq 'nodes'){
my $cluster_status = from_json(qx($pvesh get /cluster/status $pvesh_opt 2>/dev/null));
foreach my $item (@{$cluster_status}){
next if ($item->{type} ne 'node');
push @{$json->{data}}, {
'{#PVE_NODE_NAME}' => $item->{name},
'{#PVE_NODE_IP}' => $item->{ip},
'{#PVE_NODE_ID}' => $item->{nodeid},
'{#PVE_NODE_LOCAL}' => $item->{local}
};
}
} elsif ($what eq 'guests'){
my $guests = from_json(qx($pvesh get /cluster/resources --type=vm $pvesh_opt 2>/dev/null));
foreach my $guest (@{$guests}){
push @{$json->{data}}, {
'{#PVE_GUEST_ID}' => $guest->{vmid},
'{#PVE_GUEST_NODE}' => $guest->{node},
'{#PVE_GUEST_TYPE}' => $guest->{type},
'{#PVE_GUEST_NAME}' => $guest->{name},
'{#PVE_GUEST_TEMPLATE}' => $guest->{template}
};
}
} elsif ($what eq 'storage'){
my $stores = from_json(qx($pvesh get /storage $pvesh_opt 2>/dev/null));
foreach my $store (@{$stores}){
push @{$json->{data}}, {
'{#PVE_STOR_ID}' => $store->{storage},
'{#PVE_STOR_TYPE}' => $store->{type},
'{#PVE_STOR_STATUS}' => (($store->{disable}) ? 0 : 1),
'{#PVE_STOR_SHARED}' => ($store->{shared} || 0),
'{#PVE_STOR_CONTENT}' => $store->{content}
};
}
} elsif ($what eq 'pools'){
my $pools = from_json(qx($pvesh get /pools $pvesh_opt 2>/dev/null));
foreach my $pool (@{$pools}){
push @{$json->{data}}, {
'{#PVE_POOL_ID}' => $pool->{poolid},
'{#PVE_POOL_DESC}' => $pool->{comment}
};
}
}
print to_json($json, { pretty => $pretty }) . "\n";

View File

@ -0,0 +1,31 @@
#!/usr/bin/perl -w
use strict;
use File::Which;
use JSON;
my $json;
@{$json->{data}} = ();
my $cli = which('hpacucli') || which('ssacli');
# hpacucli or ssacli utility is needed
if (not defined $cli){
print to_json($json);
exit(0);
}
open( CLI, "$cli controller all show status|" )
or die "An error occured while running $cli: $!";
foreach my $line (<CLI>){
if ( $line =~ m/Another instance of hpacucli is running! Stop it first\./i ){
die "Another instance of hpacucli is running\n";
}
elsif ( $line =~ m/(.*) in Slot (\d+)/i ) {
push @{$json->{data}}, {"{#MODEL}" => $1, "{#SLOT}" => $2};
}
}
close CLI;
print to_json($json);
exit(0);

View File

@ -0,0 +1,18 @@
#!/usr/bin/perl -w
use strict;
use JSON;
my $json;
@{$json->{data}} = ();
if (!-x '/usr/bin/systemd-detect-virt' || system('/usr/bin/systemd-detect-virt', '-qc') != 0){
open FILE, "< /proc/mdstat" or die "Can't open /proc/mdadm : $!";
foreach my $line (<FILE>) {
next unless ($line =~ m/^(md\d+)+\s*:/);
my ($md,undef,$status,$level) = split(/\ /, $line);
push @{$json->{data}}, {"{#DEVICE}" => $md, "{#STATUS}" => $status, "{#LEVEL}" => $level};
}
}
print to_json($json);
exit(0);

View File

@ -0,0 +1,40 @@
#!/usr/bin/perl -w
use strict;
use JSON;
my $json;
@{$json->{data}} = ();
my $megacli = undef;
if (-x '/opt/MegaRAID/MegaCli/MegaCli64'){
$megacli = '/opt/MegaRAID/MegaCli/MegaCli64';
}
elsif (-x '/opt/MegaRAID/MegaCli/MegaCli'){
$megacli = '/opt/MegaRAID/MegaCli/MegaCli';
}
unless($megacli){
print to_json($json);
exit(0);
}
my $adapters = 0;
open (ADPCOUNT, "$megacli -adpCount -NoLog |")
|| die "error: Could not execute MegaCli -adpCount";
while (<ADPCOUNT>) {
if ( m/Controller Count:\s*(\d+)/ ) {
$adapters = $1;
last;
}
}
close ADPCOUNT;
if ($adapters > 0){
push @{$json->{data}}, {"{#CONTROLLERNO}" => $adapters};
}
print to_json($json);
exit(0);

View File

@ -0,0 +1,34 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $what = 'ou';
my $pretty = 0;
my $json = [];
my $samba_tool = which('samba-tool');
if (not defined $samba_tool){
print $json;
exit 0;
}
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
if ($what eq 'ou'){
foreach (qx($samba_tool ou list)){
chomp;
push @{$json}, {
'{#SAMBA_OU}' => $_
}
}
}
print to_json($json, { pretty => $pretty });

View File

@ -1,21 +1,41 @@
#!/usr/bin/perl -w
use Config::Simple;
use Getopt::Long;
use JSON;
my $type = 'temp';
GetOptions(
"type:s" => \$type
);
# empty means temp
$type = ($type eq '') ? 'temp' : $type;
my $json;
@{$json->{data}} = ();
open SENSORS, ('</etc/zabbix/sensors.conf') ||
die "Couldn't open /etc/zabbix/sensors.conf: $!\n";
foreach (<SENSORS>){
next unless (/^(\w+)(\s+)?=(\s+)?(.*)!(\-?\d+)!(\-?\d+)$/);
my ($sensor,$threshigh,$threslow) = ($1,$5,$6);
push @{$json->{data}}, {
"{#SENSORNAME}" => $sensor,
"{#SENSORTHRESHIGH}" => $threshigh,
"{#SENSORTHRESLOW}" => $threslow
};
my $cfg = new Config::Simple;
$cfg->read('/etc/zabbix/sensors.ini');
$cfg->syntax('ini');
my %sensors = ();
foreach my $k (keys %{$cfg->vars}){
$k =~ s/\..*$//;
$sensors{$k} = 1 unless $sensors{$k};
}
close SENSORS;
print to_json($json) if (defined $json->{data});
foreach my $k (keys %sensors){
my $sensor = $cfg->get_block($k);
next if ($type ne 'all' && $type ne $sensor->{type});
push @{$json->{data}}, {
"{#SENSORNAME}" => $k,
"{#SENSORDESC}" => $sensor->{description},
"{#SENSORTHRESHIGH}" => $sensor->{threshold_high},
"{#SENSORTHRESLOW}" => $sensor->{threshold_low},
"{#SENSORTYPE}" => $sensor->{type},
"{#SENSORUNIT}" => $sensor->{unit}
};
}
print to_json($json);
exit(0);

View File

@ -1,13 +1,15 @@
#!/bin/sh
#!/usr/bin/perl -w
echo -e "{\n\t\"data\":[\n\n"
for DISK in $(smartctl --scan-open | cut -d' ' -f1); do
smartctl -A $DISK >/dev/null 2>&1
if [ $? -eq 0 ]; then
DISK=$(echo $DISK | sed -e 's|/|\\/|g')
echo -e "\t{\n"
echo -e "\t\t\"{#SMARTDRIVE}\":\"$DISK\""
echo -e "\t},"
fi
done
echo -e "\n\t]\n}\n"
use warnings;
use strict;
use Zabbix::Agent::Addons::Disks;
use JSON;
my $json;
@{$json->{data}} = ();
foreach my $block (Zabbix::Agent::Addons::Disks::list_smart_hdd({ skip_remouvable => 1 })){
push @{$json->{data}}, { "{#SMARTDRIVE}" => "/dev/$block" };
}
print to_json($json);
exit(0);

View File

@ -0,0 +1,18 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use JSON;
my $json;
@{$json->{data}} = ();
my $status = get('http://127.0.0.1:3128/squid-internal-mgr/info');
if ($status){
push @{$json->{data}}, {"{#SQUID_STATUS_URI}" => 'http://127.0.0.1:3128/squid-internal-mgr/info'};
}
print to_json($json);
exit(0);

View File

@ -0,0 +1,61 @@
#!/usr/bin/perl
use strict;
use JSON;
use Getopt::Long;
use Data::Dumper;
use File::Which;
my $pretty = 0;
GetOptions(
'pretty' => \$pretty
);
my $smartctl = which('smartctl');
my $json = [];
sub print_out {
print to_json($json, { pretty => $pretty });
}
if (not defined $smartctl){
print_out();
exit 0;
}
my $smart_scan = from_json(qx($smartctl --scan-open --json=c));
if (not defined $smart_scan){
print_out();
exit 0;
}
foreach my $device (@{$smart_scan->{devices}}){
my ($model, $sn, $has_smart) = "";
my $smart_info = from_json(qx($smartctl -i $device->{name} -d $device->{type} --json=c));
if (defined $smart_info){
$model = $smart_info->{model_name};
$sn = $smart_info->{serial_number};
$has_smart = (
$smart_info->{in_smartctl_database} or (
defined $smart_info->{smart_support} and
$smart_info->{smart_support}->{available} and
$smart_info->{smart_support}->{enabled}
)
) ? 1 : 0;
}
push @{$json}, {
'{#STOR_DEV_NAME}' => $device->{name},
'{#STOR_DEV_DESC}' => $device->{info_name},
'{#STOR_DEV_TYPE}' => $device->{type},
'{#STOR_DEV_PROTO}' => $device->{protocol},
'{#STOR_DEV_MODEL}' => $model,
'{#STOR_DEV_SN}' => $sn,
'{#STOR_DEV_SMART}' => int $has_smart
};
}
print_out();

141
zabbix_scripts/disco_unifi Executable file
View File

@ -0,0 +1,141 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Cookies;
use URI;
use Data::Dumper;
umask 077;
my $user = 'zabbix';
my $pass = 'secret';
my $site = 'default';
my $url = 'https://localhost:8443';
my $certcheck = 1;
my $what = 'devices';
my $type = 'all';
my $pretty = 0;
my $json = {};
@{$json->{data}} = ();
GetOptions (
'user=s' => \$user,
'password|p=s' => \$pass,
'site=s' => \$site,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'what=s' => \$what,
'type:s' => \$type,
'pretty' => \$pretty
);
# An empty type is the same as all
$type = 'all' if ($type eq '');
# If connecting to localhost, no need to check certificate
my $uri = URI->new($url);
if ($uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my $site_id;
my $resp;
my $username = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<);
my $cj = HTTP::Cookies->new(
file => "/tmp/.unifi_$username.txt",
autosave => 1,
ignore_discard => 1
);
my $sslopts = {};
if (not $certcheck){
$sslopts = { verify_hostname => 0, SSL_verify_mode => 0 }
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts,
cookie_jar => $cj
);
# Check if we need to login
$resp = $ua->get($url . '/api/self/sites');
if ($resp->is_error){
# Login on the API
$resp = $ua->post(
$url . '/api/login',
Content => to_json({ username => $user, password => $pass }),
Content_Type => 'application/json;charset=UTF-8'
);
die "Login failed: " . $resp->message . "\n" if $resp->is_error;
$resp = $ua->get($url . '/api/self/sites');
die $resp->message . "\n" if $resp->is_error;
}
foreach (@{from_json($resp->decoded_content)->{data}}){
if ($_->{name} eq $site || $_->{desc} eq $site){
$site_id = $_->{_id};
# If site is referenced by description, translate it to name
$site = $_->{name} if ($_->{name} ne $site);
last;
}
}
die "Site $site not found\n" unless ($site_id);
if ($what eq 'devices'){
$resp = $ua->get($url . '/api/s/' . $site . '/stat/device');
die $resp->message . "\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
next if ($type ne 'all' && $entry->{type} ne $type);
push @{$json->{data}}, {
'{#UNIFI_DEV_ID}' => $entry->{device_id},
'{#UNIFI_DEV_ADOPTED}' => $entry->{adopted},
'{#UNIFI_DEV_MODEL}' => $entry->{model},
'{#UNIFI_DEV_NAME}' => $entry->{name} || $entry->{mac},
'{#UNIFI_DEV_MAC}' => $entry->{mac},
'{#UNIFI_DEV_TYPE}' => $entry->{type}
};
}
} elsif ($what eq 'stations'){
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sta');
die $resp->message . "\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
# Ignore other sites
next if ($entry->{site_id} ne $site_id);
next if ($type eq 'wireless' and $entry->{is_wired} == JSON::true);
next if ($type eq 'wired' and $entry->{is_wired} == JSON::false);
push @{$json->{data}}, {
'{#UNIFI_STA_ID}' => $entry->{_id},
'{#UNIFI_STA_NAME}' => (defined $entry->{hostname}) ? $entry->{hostname} : $entry->{mac},
'{#UNIFI_STA_MAC}' => $entry->{mac}
};
}
} elsif ($what eq 'networks'){
$resp = $ua->get($url . '/api/s/' . $site . '/rest/networkconf');
die $resp->message . "\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
# Ignore other sites
next if ($entry->{site_id} ne $site_id);
next if ($type ne 'all' and $entry->{purpose} ne $type);
push @{$json->{data}}, {
'{#UNIFI_NET_ID}' => $entry->{_id},
'{#UNIFI_NET_NAME}' => $entry->{name}
};
}
} elsif ($what eq 'wlan') {
$resp = $ua->get($url . '/api/s/' . $site . '/rest/wlanconf');
die $resp->message . "\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
push @{$json->{data}}, {
'{#UNIFI_WLAN_ID}' => $entry->{_id},
'{#UNIFI_WLAN_NAME}' => $entry->{name}
};
}
}
print to_json($json, { pretty => $pretty });

View File

@ -0,0 +1,31 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use File::Which;
use Getopt::Long;
my $json;
@{$json->{data}} = ();
my $what = 'volumes';
my $pretty = 0;
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
my $vdostats = which('vdostats');
if (defined $vdostats) {
foreach my $line (qx($vdostats)) {
if ($line =~ m|^/dev/mapper/([^\s]+)|) {
push @{$json->{data}}, {
'{#VDO_VOL}' => $1
};
}
}
}
print to_json($json, { pretty => $pretty }) . "\n";

77
zabbix_scripts/disco_zfs Normal file
View File

@ -0,0 +1,77 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use File::Which;
use Getopt::Long;
my $json;
@{$json->{data}} = ();
my $zpool = which('zpool');
my $zfs = which('zfs');
my $sanoid = which('sanoid');
if (not $zpool or not $zfs){
print 'ZBX_NOTSUPPOTED';
exit(0);
}
my $pools = 1;
my $fs = 0;
my $zvol = 0;
my $snap = 0;
my $sanoidmon = 0;
my $arcstats = 0;
my $pretty = 0;
GetOptions(
"pools" => \$pools,
"fs|filesystems" => \$fs,
"zvols|volumes" => \$zvol,
"snapshots" => \$snap,
"sanoid" => \$sanoidmon,
"arcstats" => \$arcstats,
"pretty" => \$pretty
);
if ($fs or $zvol or $snap or $sanoidmon or $arcstats){
$pools = 0;
}
if ($pools + $fs + $zvol + $snap + $sanoidmon + $arcstats != 1){
die "One and only one type of discovery should be provided\n";
}
if ($sanoidmon and not $sanoid){
print to_json($json);
exit 0;
}
if ($pools){
foreach (qx($zpool list -H -o name)){
chomp;
push @{$json->{data}}, { '{#ZPOOL}' => $_ };
}
} elsif ($fs){
foreach (qx($zfs list -H -o name -t filesystem)){
chomp;
push @{$json->{data}}, { '{#ZFS_FS}' => $_ };
}
} elsif ($zvol){
foreach (qx($zfs list -H -o name -t volume)){
chomp;
push @{$json->{data}}, { '{#ZFS_ZVOL}' => $_ };
}
} elsif ($snap){
foreach (qx($zfs list -H -o name -t snap)){
chomp;
# Remove @ as they are not allowed in item key names
# They will be converted back to @ by check_zfs script
$_ =~ s/\@/%40/g;
push @{$json->{data}}, { '{#ZFS_SNAP}' => $_ };
}
} elsif ($sanoidmon){
push @{$json->{data}}, { '{#ZFS_SANOID}' => $_ } foreach (qw(snapshot));
} elsif ($arcstats){
push @{$json->{data}}, { '{#ZFS_STATS}' => 'arcstats' };
}
print to_json($json, { pretty => $pretty });

View File

@ -0,0 +1,63 @@
#!/usr/bin/perl -w
use JSON;
use POSIX;
use Getopt::Long;
use Net::Domain qw(hostfqdn);
use Data::Dumper;
my $json = [];
my $pretty = 0;
my $services = 1;
my $servers = 0;
GetOptions(
"pretty" => \$pretty,
"services" => \$services,
"servers" => \$servers
);
if ($servers) {
$services = 0;
}
my $uid = getuid();
my $gid = getgid();
my (undef,undef,$zimuid,$zimgid) = getpwnam('zimbra');
my $zmprov = '/opt/zimbra/bin/zmprov';
my $hostname = hostfqdn();
# If there's no zimbra user or no zmcontrol, just return an empty list
if (not defined $zimuid or not defined $zimgid or not -e $zmprov){
print to_json($json);
exit;
}
# Switch to Zimbra user
setuid($zimuid) if ($uid ne $zimuid);
setgid($zimgid) if ($gid ne $zimgid);
if ($services){
# zmconfigd is always enabled and should be running
push @{$json}, {
'{#ZM_SERVICE}' => 'zmconfigd'
};
foreach my $service (qx($zmprov getServer $hostname zimbraServiceEnabled)){
if ($service =~ m/^zimbraServiceEnabled:\s+(\w+)/){
push @{$json}, {
'{#ZM_SERVICE}' => $1
};
}
}
} elsif ($servers){
foreach my $server (qx($zmprov getAllServers)){
chomp $server;
push @{$json}, {
'{#ZM_SERVER}' => $server
};
}
}
print to_json($json, { pretty => $pretty });

View File

@ -0,0 +1,47 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use Config::Simple '-strict';
use JSON;
my $old = shift;
$old ||= '/etc/zabbix/sensors.conf';
my $new = '/etc/zabbix/sensors.ini';
my $sensors = {};
my $units = {
temp => '°C',
fan => 'rpm',
power => 'W'
};
open OLDSENSORS, ("<$old") ||
die "Couldn't open $old: $!\n";
foreach (<OLDSENSORS>){
next unless (/^(\w+)(\s+)?=(\s+)?(.*)!(\-?\d+)!(\-?\d+)(!(\w+))?$/);
my ($sensor,$cmd,$threshigh,$threslow,$type) = ($1,$4,$5,$6,$8);
$type ||= 'temp';
$sensors->{$sensor} = {
description => $sensor,
cmd => $cmd,
threshold_high => $threshigh,
threshold_low => $threslow,
type => $type,
unit => $units->{$type}
};
}
my $cfg = new Config::Simple(syntax => 'ini');
foreach my $k (keys %$sensors){
$cfg->set_block($k, $sensors->{$k});
}
$cfg->write($new);
rename $old, $old . '.bak';

View File

@ -0,0 +1,403 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use Config::Simple '-strict';
use Getopt::Long;
use File::Which;
use File::Basename;
use Zabbix::Agent::Addons::Disks;
use Zabbix::Agent::Addons::UPS;
# Output file
my $output = undef;
# When a threshold can be automatically detected,
# you may want to be notified before it's reached, so you can
# set a margin which will be substracted from the real threshold
my $temp_margin = '20';
my $temp_hd_margin = '10';
my $pwr_margin = '200';
my $pwr_rel_margin = '20';
# This value will be substracted from the higher threshold to define the low one
# so you can have hysteresis to prevent flip-flop
my $temp_hyst = '10';
my $temp_hd_hyst = '5';
my $temp_ups_hyst = '5';
my $pwr_hyst = '200';
my $pwr_rel_hyst = '10';
# Default threshold if not detected
my $def_temp_thres_high = '50';
my $def_hd_temp_thres_high = '50';
my $def_ups_temp_thres_high = '40';
my $def_fan_thres_high = '1000';
my $def_fan_thres_low = '700';
my $def_pwr_thres_high = '1000';
my $def_pwr_rel_thres_high = '80';
GetOptions(
"output=s" => \$output,
"temp-margin=i" => \$temp_margin,
"temp-hd-margin=i" => \$temp_hd_margin,
"pwr-margin=i" => \$pwr_margin,
"pwr-rel-margin=i" => \$pwr_rel_margin,
"temp-hyst=i" => \$temp_hyst,
"temp-hd-hyst=i" => \$temp_hd_hyst,
"temp-ups-hyst=i" => \$temp_ups_hyst,
"pwr-hyst=i" => \$pwr_hyst,
"pwr-rel-hys=i" => \$pwr_rel_hyst
);
sub usage(){
print<<"_EOF";
Usage: $0 --output=/etc/zabbix/sensors.ini
_EOF
}
unless ($output){
usage();
exit 1;
}
# Path
my $ipmitool = which('ipmitool');
my $smartctl = which('smartctl');
my $lmsensor = which('sensors');
my $upsc = which('upsc');
my $cfg = new Config::Simple(syntax => 'ini');
my $sensors = {};
# Try to detect IPMI sensors
if ($ipmitool && -x $ipmitool){
# First check for temperature sensors
my @lines = qx($ipmitool sdr type Temperature 2>/dev/null);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
# Looks like
# Inlet Temp | 04h | ok | 7.1 | 25 degrees C
if ($l !~ m/^(\w+[\s\w\/\-]+?\w+)\s*\|.*\|\s*([\w\.\s]+)\s*\|.*\|\s*([\-\w\.\s]+)$/){
next SENSOR;
}
my $name = $1;
my $sensor = {};
my @details = qx($ipmitool sdr get '$name' 2>/dev/null);
if ($? != 0){
print "Couldn't get detail for sensor $name\n";
next SENSOR;
}
my $val = undef;
foreach my $d (@details){
chomp $d;
if ($d =~ m/^\s*Sensor\sReading\s*:\s*(\-?\w+)/){
$val = $1;
print "Sensor $name has value: $val\n";
if ($val !~ m/^\-?\d+$/){
print "Skipping sensor $name, couldn't parse its value: $val\n";
next SENSOR;
}
}
elsif ($d =~ m/^\s*Upper\scritical\s*:\s*(\-?\d+(\.\d+))/){
$sensor->{threshold_high} = $1-$temp_margin;
}
elsif ($d =~ m/^\s*Upper\snon\-critical\s*:\s*(\-?\d+(\.\d+))/){
$sensor->{threshold_low} = $1-$temp_margin;
}
}
# Another loop to check for Normal max if Upper critical wasn't found
if (!$sensor->{threshold_high}){
foreach my $d (@details){
chomp $d;
if ($d =~ m/^\s*Normal\sMaximum\s*:\s*(\-?\d+(\.\d+))/){
$sensor->{threshold_high} = $1-$temp_margin;
}
}
}
next SENSOR unless $val;
$sensor->{threshold_low} ||= ($sensor->{threshold_high}) ? $sensor->{threshold_high}-$temp_hyst : $def_temp_thres_high-$temp_hyst;
$sensor->{threshold_high} ||= $def_temp_thres_high;
$sensor->{threshold_high} =~ s/\.0+$//;
$sensor->{threshold_low} =~ s/\.0+$//;
$sensor->{description} = $name;
$sensor->{type} = 'temp';
$sensor->{unit} = '°C';
$sensor->{cmd} = "$ipmitool sdr get '$name' 2>/dev/null | perl -ne 'if (/Sensor Reading\\s*:\\s*([^\\s]+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a temperature sensor using IPMI: $name\n";
}
}
# Now check for Fan, nearly the same as Temp, but
# * We try to detect the unit
# * threshold handling is not the same
@lines = qx($ipmitool sdr type Fan 2>/dev/null);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
if ($l !~ m/^(\w+[\s\w]+?\w+)\s*\|.*\|\s*([\w\.\s]+)\s*\|.*\|\s*([\-\w\.\s]+)$/){
next SENSOR;
}
my $name = $1;
my $value = $3;
my $sensor = {};
my @details = qx($ipmitool sdr get '$name' 2>/dev/null);
if ($? != 0){
print "Couldn't get detail for sensor $name\n";
next SENSOR;
}
my $val = undef;
foreach my $d (@details){
chomp $d;
if ($d =~ m/^\s*Sensor\sReading\s*:\s*(\d+(\.\d+)?)/){
$val = $1;
if ($val !~ m/^\d+(\.\d+)?$/){
print "Skipping sensor $name, couldn't parse its value: $val\n";
next SENSOR;
}
}
elsif ($d =~ m/^\s*Lower\scritical\s*:\s*(\d+(\.\d+))/){
$sensor->{threshold_low} = $1-$temp_margin;
}
elsif ($d =~ m/^\s*Lower\snon\-critical\s*:\s*(\d+(\.\d+))/){
$sensor->{threshold_high} = $1-$temp_margin;
}
}
next SENSOR unless $val;
$sensor->{threshold_high} ||= $def_fan_thres_high;
$sensor->{threshold_low} ||= $def_fan_thres_high-$temp_hyst;
$sensor->{threshold_high} =~ s/\.0+$//;
$sensor->{threshold_low} =~ s/\.0+$//;
$sensor->{description} = $name;
$sensor->{type} = 'fan';
$sensor->{unit} = ($value =~ m/percent|%/ || $val < 100) ? '%' : 'rpm';
$sensor->{cmd} = "$ipmitool sdr get '$name' 2>/dev/null | perl -ne 'if (/Sensor Reading\\s*:\\s*([^\\s]+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a fan sensor using IPMI: $name\n";
}
}
# Now look for power information
@lines = qx($ipmitool sdr type 'Current' 2>/dev/null);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
if ($l !~ m/^(\w+[\s\w]+?\w+(\s%)?)\s*\|.*\|\s*([\w\.\s]+)\s*\|.*\|\s*([\-\w\.\s]+)$/){
print "Skiping line $l\n";
next SENSOR;
}
my $name = $1;
my $value = $4;
my $sensor = {};
if ($name =~ m/(Power)|(Pwr)|(Consumption)|(PS\d+\sCurr\sOut)/i || $value =~ m/W(att)?/i){
my @details = qx($ipmitool sdr get '$name' 2>/dev/null);
if ($? != 0){
print "Couldn't get detail for sensor $name\n";
next SENSOR;
}
my $val = undef;
my $unit = ($name =~ m/%/) ? '%' : 'Watt';
foreach my $d (@details){
chomp $d;
if ($d =~ m/^\s*Sensor\sReading\s*:\s*(\w+)/){
$val = $1;
if ($val !~ m/^\d+$/){
print "Skipping sensor $name, couldn't parse its value: $val\n";
next SENSOR;
}
}
elsif ($d =~ m/^\s*Upper\scritical\s*:\s*(\d+(\.\d+)?)/){
$sensor->{threshold_high} = ($unit eq '%') ? $1-$pwr_rel_margin : $1-$pwr_margin;
}
elsif ($d =~ m/^\s*Upper\snon\-critical\s*:\s*(\d+(\.\d+)?)/){
$sensor->{threshold_low} = ($unit eq '%') ? $1-$pwr_rel_margin : $1-$pwr_margin;
}
}
next SENSOR unless $val;
$sensor->{threshold_high} ||= ($unit eq '%') ? $def_pwr_rel_thres_high : $def_pwr_thres_high;
$sensor->{threshold_low} ||= ($unit eq '%') ? $def_pwr_rel_thres_high-$pwr_rel_hyst : $def_pwr_thres_high-$pwr_hyst;
$sensor->{threshold_high} =~ s/\.0+$//;
$sensor->{threshold_low} =~ s/\.0+$//;
$sensor->{description} = $name;
$sensor->{type} = 'power';
$sensor->{unit} = ($name =~ m/%/) ? '%' : 'Watt';
$sensor->{cmd} = "$ipmitool sdr get '$name' 2>/dev/null | perl -ne 'if (/Sensor Reading\\s*:\\s*([^\\s]+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a power sensor using IPMI: $name\n";
}
}
}
}
# Try to detect lm_sensors, using the sensors command
if ($lmsensor && -x $lmsensor){
my @lines = qx($lmsensor);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
# Looks like
# temp1: +27.8°C (crit = +119.0°C)
# or
# Core 0: +36.0°C (high = +80.0°C, crit = +100.0°C)
if ($l !~ m/^(\w+[\s\w]+?):\s*\+?(\d+)(\.\d+)?°C\s*(.*)$/){
next SENSOR;
}
my $name = $1;
my $val = $2;
my $thr = $4;
my $sensor = {};
if ($val !~ m/^\-?\d+$/){
print "Skipping sensor $name, couldn't parse its value: $val\n";
next SENSOR;
}
if ($name =~ m/^Core\s+\d+/){
print "Skipping individual core sensor $name\n";
next SENSOR;
}
if ($thr =~ m/high\s+=\s+\+(\d+(\.\d+)?)/){
$sensor->{threshold_high} = $1;
}
elsif ($thr =~ m/^crit\s+=\s+\+(\d+(\.\d+)?)/){
$sensor->{threshold_high} = $1 - $temp_margin;
}
next SENSOR unless $val;
$sensor->{threshold_low} ||= ($sensor->{threshold_high}) ? $sensor->{threshold_high}-$temp_hyst : $def_temp_thres_high-$temp_hyst;
$sensor->{threshold_high} ||= $def_temp_thres_high;
$sensor->{threshold_high} =~ s/\.0+$//;
$sensor->{threshold_low} =~ s/\.0+$//;
$sensor->{description} = $name;
$sensor->{type} = 'temp';
$sensor->{unit} = '°C';
$sensor->{cmd} = "$lmsensor | perl -ne 'if (/^$name:\\s*\\+(\\d+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a temperature sensor using lm_sensors: $name\n";
}
}
}
# Now, try to detect smart capable HDD
if ($smartctl && -x $smartctl){
foreach my $block (Zabbix::Agent::Addons::Disks::list_smart_hdd({ skip_remouvable => 1 })){
my @lines = qx($smartctl -A /dev/$block);
next if ($? != 0);
foreach my $l (@lines){
if ($l =~ /(Temperature_Celsius|Airflow_Temperature_Cel)/){
$sensors->{$block} = {
description => "$block temperature",
threshold_low => $def_hd_temp_thres_high-$temp_hd_hyst,
threshold_high => $def_hd_temp_thres_high,
type => 'temp',
unit => '°C',
cmd => "$smartctl -A /dev/$block | perl -ne 'if (/Temperature_Celsius(\\s+[^\\s]+){7}\\s+(\\d+(\\.\\d+)?)/) { print \"\$2\\n\"; last }'"
};
print "Found a temperature sensor using smartctl: $block\n";
last;
}
# Format found on some NVMe SSD
elsif ($l =~ /Temperature:\s+(\d+(\.\d+)?)\sCelsius/){
$sensors->{$block} = {
description => "$block temperature",
threshold_low => $def_hd_temp_thres_high-$temp_hd_hyst,
threshold_high => $def_hd_temp_thres_high,
type => 'temp',
unit => '°C',
cmd => "$smartctl -A /dev/$block | perl -ne 'if (/Temperature:\\s+(\\d+(\\.\\d+)?)/) { print \"\$1\\n\"; last }'"
};
print "Found a temperature sensor using smartctl: $block\n";
last;
}
}
}
# Some LSI based hardware RAID controller can report HDD temp
if (-e '/dev/megaraid_sas_ioctl_node'){
# Only check for the firsts 26 drives
foreach my $i (0..25){
my @res = qx($smartctl -d megaraid,$i -A /dev/sda);
next if ($? != 0);
foreach my $l (@res){
if ($l =~ m/Drive\sTrip\sTemperature:\s+(\d+)\s/){
$sensors->{'sda-' . $i} = {
description => "Temperature for disk No $i on sda",
type => 'temp',
threshold_high => $1-$temp_hd_margin,
threshold_low => $1-$temp_hd_margin-$temp_hd_hyst,
unit => '°C',
cmd => "$smartctl -A -d megaraid,$i /dev/sda | grep 'Current Drive Temperature' | awk '{print \$4}'"
};
print "Found a temperature sensor using smartctl (megaraid): sda-$i\n";
last;
}
elsif ($l =~ /(Temperature_Celsius|Airflow_Temperature_Cel)/){
$sensors->{'sda-' . $i} = {
description => "Temperature for disk No $i on sda",
threshold_low => $def_hd_temp_thres_high-$temp_hd_hyst,
threshold_high => $def_hd_temp_thres_high,
type => 'temp',
unit => '°C',
cmd => "$smartctl -A -d megaraid,$i /dev/sda | perl -ne 'if (/(Temperature_Celsius|Airflow_Temperature_Cel)(\\s+[^\\s]+){7}\\s+(\\d+)/) { print \"\$3\\n\"; last }'"
};
print "Found a temperature sensor using smartctl (megaraid): sda-$i\n";
last;
}
}
}
}
}
# Now check UPS
if ($upsc && -x $upsc){
foreach my $ups (Zabbix::Agent::Addons::UPS::list_ups()){
my @lines = qx($upsc $ups);
next if ($? != 0);
foreach my $l (@lines){
if ($l =~ m/^ups\.temperature:\s+(\d+(\.\d+)?)/){
$sensors->{'ups_' . lc $ups . '_temp'} = {
description => "ups temperature for $ups",
type => 'temp',
threshold_high => $def_ups_temp_thres_high,
threshold_low => $def_ups_temp_thres_high-$temp_ups_hyst,
unit => '°C',
cmd => "$upsc $ups ups.temperature 2>/dev/null"
};
print "Found a temperature sensor for ups $ups\n";
last;
}
elsif ($l =~ m/^ups\.load:\s+(\d+(\.\d+)?)/){
$sensors->{'ups_' . lc $ups . '_load'} = {
description => "ups load for $ups",
type => 'power',
threshold_high => $def_pwr_rel_thres_high,
threshold_low => $def_pwr_rel_thres_high-$pwr_rel_hyst,
unit => '%',
cmd => "$upsc $ups ups.load 2>/dev/null"
};
}
}
}
}
# TODO: add support for lm sensors, but its ouput is harder to parse
foreach my $s (sort keys %$sensors){
$cfg->set_block($s, $sensors->{$s});
}
$cfg->write($output);
# Take a sensor description and return a suitable string as sensor ID
sub sensor_name{
my $desc = shift;
my $id = lc $desc;
$id =~ s/[^\w]/_/g;
$id =~ s/%/percent/g;
$id =~ s/_rpm$//;
return $id;
}

View File

@ -0,0 +1,27 @@
#!/usr/bin/perl -w
use JSON;
# Ensure data are fresh in the cache for Zabbix to pick them quickly
# You can add this script in a cron job every 5 min for example, so the cache will always be fresh
# for Zabbix Agent can run check_pve_sudo quickly
qx(/var/lib/zabbix/bin/check_pve_sudo --cluster --cache=120);
my $nodes = from_json(qx(/var/lib/zabbix/bin/disco_pve_sudo --what=nodes));
foreach my $node (@{$nodes->{data}}){
qx(/var/lib/zabbix/bin/check_pve_sudo --node=$node->{'{#PVE_NODE_NAME}'} --cache=120);
}
my $pools = from_json(qx(/var/lib/zabbix/bin/disco_pve_sudo --what=pools));
foreach my $pool (@{$pools->{data}}){
qx(/var/lib/zabbix/bin/check_pve_sudo --pool=$pool->{'{#PVE_POOL_ID}'} --cache=120);
}
my $storages = from_json(qx(/var/lib/zabbix/bin/disco_pve_sudo --what=storage));
foreach my $stor (@{$storages->{data}}){
qx(/var/lib/zabbix/bin/check_pve_sudo --storage=$stor->{'{#PVE_STOR_ID}'} --cache=120);
}
my $guests = from_json(qx(/var/lib/zabbix/bin/disco_pve_sudo --what=guests));
foreach my $guest (@{$guests->{data}}){
qx(/var/lib/zabbix/bin/check_pve_sudo --guest=$guest->{'{#PVE_GUEST_ID}'} --cache=120);
}

Some files were not shown because too many files have changed in this diff Show More