Compare commits

..

No commits in common. "master" and "0.1.22_el6" have entirely different histories.

139 changed files with 470 additions and 37920 deletions

View File

@ -1,3 +0,0 @@
the .tito/packages directory contains metadata files
named after their packages. Each file has the latest tagged
version and the project's relative directory.

View File

@ -1 +0,0 @@
0.2.172-1 ./

View File

@ -1 +0,0 @@
../../tito_libs/releasers.conf

View File

@ -1,6 +0,0 @@
[buildconfig]
builder = tito.builder.Builder
tagger = tito.tagger.VersionTagger
changelog_do_not_remove_cherrypick = 0
changelog_format = %s (%ae)
lib_dir = ../tito_libs

24
conf/sensors.conf Normal file
View File

@ -0,0 +1,24 @@
# You can configure here the sensors
# Format is <sensors_name>=<command>!<high threshold>!<low threshold>
# An alert is triggerd if the temperature is above the high threshold
# The alert is cleared if the temperature is less than low threshold
# Example:
#
#
## Examples with ipmitool
# cpu0 = /usr/bin/ipmitool sdr get 'P1 Therm Margin' | grep 'Sensor Reading' | cut -d':' -f 2 | awk '{print$1}'!-30!-39
# mb = /usr/bin/ipmitool sdr get 'Baseboard Temp' | grep 'Sensor Reading' | cut -d':' -f 2 | awk '{print$1}'!50!45
#
## Examples with smartctl
# sda = /usr/sbin/smartctl -a /dev/sda | grep Temperature_Celsius | awk '{print $10}'!45!40
# sdb = /usr/sbin/smartctl -a /dev/sdb | grep Temperature_Celsius | awk '{print $10}'!45!50
#
## Examples with lm_sensors
# cpu0=/usr/bin/sensors | grep temp1 | cut -d':' -f 2 | awk '{print $1'} | sed -e "s/+//g" -e "s/.C//g"!65!55
#
## Examples with acpi
# cpu0=cat /proc/acpi/thermal_zone/THRM/temperature | awk '{print $2}'!65!55
#
#
# !!! WARNING !!!
# All the commands will be executed with root privileges

View File

@ -1,45 +0,0 @@
# This file lets you configure which sensors will be monitored by Zabbix
# Sensors defined here will be sent to Zabbix through its low level discovery feature
# You then have to create discovery rules and prototypes to make use of them
#
# This file is in ini format, each sensor has its own block and a set of key/value pair
#
# Example:
#
# [cpu0]
# description=Temperature of the first CPU
# threshold_high=60
# threshold_low=50
# cmd="/usr/bin/sensors | grep temp1 | cut -d':' -f 2 | awk '{print $1}' | sed -e 's/+//g' -e 's/.C//g'"
# type=temp
# unit=°C
#
# [mb]
# description=Motherboard's temperature
# threshold_high=50
# threshold_low=45
# cmd="/usr/bin/ipmitool sdr get 'Baseboard Temp' | grep 'Sensor Reading' | awk '{print $4}'"
# type=temp
# unit=°C
#
# [sda]
# description=hard drive temperature
# threshold_high=50
# threshold_low=45
# cmd="/usr/sbin/smartctl -A /dev/sda | grep Temperature_Celsius | awk '{print $10}'"
# type=temp
# unit=°C
#
# [fan1]
# description=front fan
# threshold_high=12000
# threshold_low=1400
# cmd="/usr/bin/ipmitool sdr get 'Fan1A RPM' | grep 'Sensor Reading' | awk '{print $4}'"
# type=fan
# unit=rpm
#
#
# !!! WARNING !!!
# * All the commands will be executed with root privileges
# * If your cmd contains quotes, you must double quote the whole command
# * If your cmd contains double quotes, you must escape them as \"

View File

@ -1,4 +1,2 @@
Defaults:zabbix !requiretty
Cmnd_Alias ZABBIX_AGENT = /var/lib/zabbix/bin/*_sudo
Defaults!ZABBIX_AGENT !syslog
zabbix ALL=(root) NOPASSWD: ZABBIX_AGENT
zabbix ALL=(root) NOPASSWD: /var/lib/zabbix/bin/*_sudo

View File

@ -1,63 +0,0 @@
package Zabbix::Agent::Addons::Disks;
use strict;
use warnings;
# Return an array of block devices, skip if size == 0
sub list_block_dev {
my @bd = ();
opendir(my $dh, "/sys/block") or die "Couldn't open /sys/block: $!";
my @blocks = grep { $_ !~ m/^\./ } readdir($dh);
closedir($dh);
foreach my $block (@blocks){
my $size = 1;
if ( -e "/sys/block/$block/size"){
open SIZE, "/sys/block/$block/size";
$size = join "", <SIZE>;
close SIZE;
chomp($size);
next if ($size eq '0');
}
push @bd, $block;
}
return @bd;
}
sub list_smart_hdd{
my ($param) = shift || {};
my @shd = ();
if (-x "/usr/sbin/smartctl"){
BLOCK: foreach my $block (list_block_dev()){
# Skip block we already know won't support SMART
next if ($block =~ m/^(ram|loop|md|dm\-)\d+/);
my $smart_enabled = 0;
my @smart_info = qx(/usr/sbin/smartctl -i /dev/$block);
next unless ($? == 0);
foreach my $line (@smart_info){
if ($line =~ m/^SMART support is:\s+Enabled/i){
$smart_enabled = 1;
last;
} elsif ($line =~ m/NVMe/i){
$smart_enabled = 1;
last;
} elsif ($line =~ m/^Transport protocol:\s+iSCSI/i){
# Skip iSCSI block
next BLOCK;
}
}
# Skip block unless S.M.A.R.T is advertized as enabled
next unless ($smart_enabled);
if ($param->{skip_remouvable} && -e "/sys/block/$block/removable"){
open REMOVABLE, "/sys/block/$block/removable";
my $removable = join "", <REMOVABLE>;
close REMOVABLE;
chomp($removable);
next if ($removable eq '1');
}
push @shd, $block;
}
}
return @shd;
}
1;

View File

@ -1,917 +0,0 @@
package Zabbix::Agent::Addons::LVM;
# Forked from Linux::LVM
# with support for thin pools
use 5.006;
use strict;
use warnings;
require Exporter;
our @ISA = qw(Exporter);
# Items to export into callers namespace by default. Note: do not export
# names by default without a very good reason. Use EXPORT_OK instead.
# Do not simply export all your public functions/methods/constants.
# This allows declaration use Zabbix::Agent::Addons::LVM ':all';
# If you do not need this, moving things directly into @EXPORT or @EXPORT_OK
# will save memory.
our %EXPORT_TAGS = ( 'all' => [ qw( get_volume_group_list
get_volume_group_information
get_logical_volume_information
get_physical_volume_information
get_vg_information
get_pv_info
get_lv_info
) ] );
our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
our @EXPORT = qw( get_volume_group_list
get_volume_group_information
get_logical_volume_information
get_physical_volume_information
get_vg_information
get_pv_info
get_lv_info
);
our $VERSION = '0.18';
our $units;
# Preloaded methods go here.
# Autoload methods go after =cut, and are processed by the autosplit program.
#-----------------------------------------------------------------------#
# Subroutine: units #
#-----------------------------------------------------------------------#
# Description: Set units to be used for pe_size, lv_size, etc. #
# legal values are same as lvm --units: #
# hbskmgtpeHBSKMGTPE #
# (h)uman-readable, (b)ytes, (s)ectors, (k)ilobytes, #
# (m)egabytes, (g)igabytes, (t)erabytes, (p)etabytes, #
# (e)xabytes. Capitalise to use multiples of 1000 (S.I.) #
# instead of 1024. #
# Can also specify custom units e.g. --units 3M #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a array with the volume group names. #
#-----------------------------------------------------------------------#
sub units {
shift;
$units = shift() if @_;
return $units;
}
#-----------------------------------------------------------------------#
# Subroutine: get_volume_group_list #
#-----------------------------------------------------------------------#
# Description: This function will return a sorted list of all of the #
# active volume groups on the system. #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a array with the volume group names. #
#-----------------------------------------------------------------------#
sub get_volume_group_list() {
my %vg = get_vg_information();
return (sort keys(%vg));
} # End of the get_volume_group_list routine.
#-----------------------------------------------------------------------#
# Subroutine: get_volume_group_information #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# data about the specified volume group. #
#-----------------------------------------------------------------------#
# Parameters: A string containing a volume group name. #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with the volume group data. #
#-----------------------------------------------------------------------#
sub get_volume_group_information($) {
my $volume_group = $_[0];
my %vg_info;
my %vg = get_vg_information();
foreach(sort keys %{$vg{$volume_group}}) {
if ( $_ eq "pvols" ) { next; }
elsif( $_ eq "lvols" ) { next; }
else {
$vg_info{$_} = $vg{$volume_group}->{$_};
}
}
return %vg_info;
} # End of the get_volume_group_information routine.
#-----------------------------------------------------------------------#
# Subroutine: get_volume_group_information #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# data about the specified volume group. #
#-----------------------------------------------------------------------#
# Parameters: A string containing a volume group name. #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with the volume group data. #
#-----------------------------------------------------------------------#
sub get_logical_volume_information($) {
my $volume_group = $_[0];
my %lv_info;
my $lvname;
my %vg = get_vg_information();
foreach $lvname (sort keys %{$vg{$volume_group}->{lvols}}) {
foreach(sort keys %{$vg{$volume_group}->{lvols}->{$lvname}}) {
$lv_info{$lvname}->{$_} = $vg{$volume_group}->{lvols}->{$lvname}->{$_};
}
}
return %lv_info;
} # End of the get_logical_volume_information routine.
#-----------------------------------------------------------------------#
# Subroutine: get_volume_group_information #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# data about the specified volume group. #
#-----------------------------------------------------------------------#
# Parameters: A string containing a volume group name. #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with the volume group data. #
#-----------------------------------------------------------------------#
sub get_physical_volume_information($) {
my $volume_group = $_[0];
my %pv_info;
my $pvname;
my %vg = get_vg_information();
foreach $pvname (sort keys %{$vg{$volume_group}->{pvols}}) {
foreach(sort keys %{$vg{$volume_group}->{pvols}->{$pvname}}) {
$pv_info{$pvname}->{$_} = $vg{$volume_group}->{pvols}->{$pvname}->{$_};
}
}
return %pv_info;
} # End of the get_physical_volume_information routine.
#-----------------------------------------------------------------------#
# Subroutine: get_vg_information #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# volume group information for the system. #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with all of the vg information. #
#-----------------------------------------------------------------------#
sub get_vg_information() {
my %vghash;
my $vgn;
my $lvn;
my $pvn;
my @vginfo;
my $units_arg = '';
$units_arg = " --units $units " if ($units);
if ( -e "/usr/sbin/vgdisplay" ) {
@vginfo = `/usr/sbin/vgdisplay -v $units_arg 2>/dev/null`;
} else {
if( ! -e "/sbin/vgdisplay" ) { die("LVM utilities not installed in /sbin or /usr/sbin"); }
@vginfo = `/sbin/vgdisplay -v $units_arg 2>/dev/null`;
}
VGINF: foreach(@vginfo) {
chomp;
s/^\s+//g;
s/\s+$//g;
next VGINF if m/^$/;
# Parse the volume group name.
if( m/VG Name\s+(\S+)/ ) {
$vgn = $1; $vghash{$vgn}->{vgname} = $1;
next VGINF; }
# Parse the volume group access.
elsif( m/VG Access\s+(\S+)/ ) {
$vghash{$vgn}->{access} = $1;
next VGINF; }
# Parse the volume group status.
elsif( m/VG Status\s+(.+)/ ) {
$vghash{$vgn}->{status} = $1;
next VGINF; }
# Parse the volume group number.
elsif( m/VG #\s+(\S+)/ ) {
$vghash{$vgn}->{vg_number} = $1;
next VGINF; }
# Parse the maximum logical volume size and size unit for the volume group.
elsif( m/MAX LV Size\s+(\S+) (\S+)/ ) {
$vghash{$vgn}->{max_lv_size} = $1;
$vghash{$vgn}->{max_lv_size_unit} = $2;
next VGINF; }
# Parse the maximum number of logical volumes for the volume group.
elsif( m/MAX LV\s+(\S+)/ ) {
$vghash{$vgn}->{max_lv} = $1;
next VGINF; }
# Parse the current number of logical volumes for the volume group.
elsif( m/Cur LV\s+(\S+)/ ) {
$vghash{$vgn}->{cur_lv} = $1;
next VGINF; }
# Parse the number of open logical volumes for the volume group.
elsif( m/Open LV\s+(\S+)/ ) {
$vghash{$vgn}->{open_lv} = $1;
next VGINF; }
# Parse the number of physical volumes accessible to the volume group.
elsif( m/Max PV\s+(\S+)/ ) {
$vghash{$vgn}->{max_pv} = $1;
next VGINF; }
# Parse the current number of physical volumes in the volume group.
elsif( m/Cur PV\s+(\S+)/ ) {
$vghash{$vgn}->{cur_pv} = $1;
next VGINF; }
# Parse the number of active physical volumes in the volume group.
elsif( m/Act PV\s+(\S+)/ ) {
$vghash{$vgn}->{act_pv} = $1;
next VGINF; }
# Parse the size of the volume group.
elsif( m/VG Size\s+(\S+) (\S+)/ ) {
$vghash{$vgn}->{vg_size} = $1;
$vghash{$vgn}->{vg_size_unit} = $2;
next VGINF; }
# Parse the physical extent size and unit for one extent of volume group.
elsif( m/PE Size\s+(\S+) (\S+)/ ) {
$vghash{$vgn}->{pe_size} = $1;
$vghash{$vgn}->{pe_size_unit} = $2;
next VGINF; }
# Parse the total number and number of free physical extents from the physical disk.
elsif( m/Total PE \/ Free PE\s+(\S+) \/ (\S+)/m ) {
$vghash{$vgn}->{pvols}->{$pvn}->{total_pe} = $1;
$vghash{$vgn}->{pvols}->{$pvn}->{free_pe} = $2;
next VGINF; }
# Parse the total number of physical extents from the volume group.
elsif( m/Total PE\s+(\S+)/ ) {
$vghash{$vgn}->{total_pe} = $1;
next VGINF; }
# Parse the number of allocated physical extents from the volume group.
elsif( m/Alloc PE \/ Size\s+(\S+) \/ (\S+)(?:\s+(\S+))?/ ) {
$vghash{$vgn}->{alloc_pe} = $1;
$vghash{$vgn}->{alloc_pe_size} = $2;
$vghash{$vgn}->{alloc_pe_size_unit} = $3 || "B";
next VGINF; }
# Parse the volume group name.
elsif( m/Free PE \/ Size\s+(\S+) \/ (\S+) (\S+)/ ) {
$vghash{$vgn}->{free_pe} = $1;
$vghash{$vgn}->{free_pe_size} = $2;
$vghash{$vgn}->{free_pe_size_unit} = $3;
next VGINF; }
# Parse the volume group uuid.
elsif( m/VG UUID\s+(\S+)/ ) {
$vghash{$vgn}->{uuid} = $1;
next VGINF; }
# Parse the logical volume name.
elsif( m/LV Name\s+(\S+)/ ) {
$lvn = $1;
$vghash{$vgn}->{lvols}->{$lvn}->{name} = $1;
next VGINF; }
# since version 2.02.89 'LV Name' is no longer the full path, 'LV Path' is.
# LV Path may be bogus or missing in some cases, such as thin pools.
if( m/LV Path\s+(\S+)/ ) {
$lvn = $1 unless $lvn;
$vghash{$vgn}->{lvols}->{$lvn}->{name} = $1;
next VGINF; }
# Parse the logical volume UUID.
elsif( m/LV UUID\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{uuid} = $1;
next VGINF; }
# Parse the logical volume UUID.
elsif( m/Segments\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{segments} = $1;
next VGINF; }
# Parse the logical volume size and unit.
elsif( m/LV Size\s+(\S+) (\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{lv_size} = $1;
$vghash{$vgn}->{lvols}->{$lvn}->{lv_size_unit} = $2;
next VGINF; }
# Parse the logical volume write access.
elsif( m/LV Write Access\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{write_access} = $1;
next VGINF; }
# Parse the logical volume status.
elsif( m/LV Status\s+(.+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{status} = $1;
next VGINF; }
# Parse the number of logical extents in the logical volume.
elsif( m/Current LE\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{cur_le} = $1;
next VGINF; }
# Parse the number of allocated logical extents in the logical volume.
elsif( m/Allocated LE\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{alloc_le} = $1;
next VGINF; }
# Parse the allocation type for the logical volume.
elsif( m/Allocation\s+(.+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{allocation} = $1;
next VGINF; }
# Parse the volume number.
elsif( m/LV #\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{lv_number} = $1;
next VGINF; }
# Parse the number of times the logical volume is open.
elsif( m/# open\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{open_lv} = $1;
next VGINF; }
# Parse the block device of the logical volume.
elsif( m/Block device\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{device} = $1;
next VGINF; }
# Parse the value for the read ahead sectors of the logical volume.
elsif( m/Read ahead sectors\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{read_ahead} = $1;
next VGINF; }
elsif( m/Allocated to snapshot\s+(\S+)%/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'allocated_to_snapshot'} = $1;
next VGINF; }
elsif( m/COW-table size\s+([0-9\.]+)\s+(\S+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'cow_table_size'} = $1;
$vghash{$vgn}->{lvols}->{$lvn}->{'cow_table_unit'} = $2;
next VGINF; }
# Thin pools have data and metadata allocations
elsif( m/Allocated pool data\s+(\S+)%/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'allocated_pool_data'} = $1;
next VGINF; }
elsif( m/Allocated metadata\s+(\S+)%/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'allocated_meta_data'} = $1;
next VGINF; }
elsif( m/Mirrored volumes\s+(.+)/ ) {
$vghash{$vgn}->{lvols}->{$lvn}->{'mirrored_volumes'} = $1;
next VGINF; }
# Parse the physical disk name.
elsif( m/PV Name\s+(\S+)/ ) {
$pvn = $1;
$vghash{$vgn}->{pvols}->{$pvn}->{device} = $1;
next VGINF; }
# Parse the status of the physical disk.
elsif( m/PV Status\s+(.+)/ ) {
$vghash{$vgn}->{pvols}->{$pvn}->{status} = $1;
next VGINF; }
# Parse the status of the physical disk.
elsif( m/PV UUID\s+(.+)/ ) {
$vghash{$vgn}->{pvols}->{$pvn}->{uuid} = $1;
next VGINF; }
}
return %vghash;
} # End of the get_vg_information routine.
#-----------------------------------------------------------------------#
# Subroutine: get_pv_info #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# information about the specified physical volume. #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with all of the pv information. #
#-----------------------------------------------------------------------#
sub get_pv_info($) {
my $pvname = $_[0];
my %pvhash;
my @pvinfo;
if( ! -e "$pvname" ) { die("Physical Disk: $pvname does not exist."); }
my $units_arg = '';
$units_arg = " --units $units " if ($units);
if ( -e "/usr/sbin/pvdisplay" ) {
@pvinfo = `/usr/sbin/pvdisplay $units_arg $pvname`;
} else {
if( ! -e "/sbin/pvdisplay" ) { die("LVM utilities not installed in /sbin or /usr/sbin"); }
@pvinfo = `/sbin/pvdisplay $units_arg $pvname`;
}
PVINF: foreach(@pvinfo) {
# Get the name of the physical volume.
if( m/PV Name\s+(\S+)/ ) {
$pvhash{pv_name} = $1;
next PVINF; }
# Get the name of the volume group the physical volume belongs to.
if( m/VG Name\s+(\S+)/ ) {
$pvhash{vg_name} = $1;
next PVINF; }
# Get the size information of the physical volume.
if( m/PV Size\s+(\S+) (\S+)/ ) {
$pvhash{size} = $1;
$pvhash{size_unit} = $2;
next PVINF; }
# Get the physical volume number.
if( m/PV\#\s+(\S+)/ ) {
$pvhash{pv_number} = $1;
next PVINF; }
# Get the status of the physical volume.
if( m/PV Status\s+(.+)/ ) {
$pvhash{status} = $1;
next PVINF; }
# Get the allocation status of the physical volume.
if( m/Allocatable\s+(.+)/ ) {
$pvhash{allocatable} = $1;
next PVINF; }
# Get the number of logical volumes on the physical volume.
if( m/Cur LV\s+(\S+)/ ) {
$pvhash{num_lvols} = $1;
next PVINF; }
# Get the physical extent size and unit of the physical volume.
if( m/PE Size \((\S+)\)\s+(\S+)/ ) {
$pvhash{pe_size} = $2;
$pvhash{pe_size_unit} = $1;
next PVINF; }
# Get the total numver of physical extents on the physical volume.
if( m/Total PE\s+(\S+)/ ) {
$pvhash{total_pe} = $1;
next PVINF; }
# Get the number of free extents on the physical volume.
if( m/Free PE\s+(\S+)/ ) {
$pvhash{free_pe} = $1;
next PVINF; }
# Get the number of allocated physical extents on the physical volume.
if( m/Allocated PE\s+(\S+)/ ) {
$pvhash{alloc_pe} = $1;
next PVINF; }
# Get the UUID of the physical volume.
if( m/PV UUID\s+(\S+)/ ) {
$pvhash{uuid} = $1;
next PVINF; }
}
return %pvhash;
} # End of the get_pv_info routine.
#-----------------------------------------------------------------------#
# Subroutine: get_lv_info #
#-----------------------------------------------------------------------#
# Description: This function will return a hash containing all of the #
# information about the specified logical volume. #
#-----------------------------------------------------------------------#
# Parameters: None #
#-----------------------------------------------------------------------#
# Return Values: On success, a hash with all of the lv information. #
#-----------------------------------------------------------------------#
sub get_lv_info($) {
my $lvname = $_[0];
my %lvhash;
my @lvinfo;
my $units_arg = '';
$units_arg = " --units $units " if ($units);
if ( -e "/usr/sbin/vgdisplay" ) {
@lvinfo = `/usr/sbin/lvdisplay $units_arg $lvname`;
} else {
if( ! -e "/sbin/vgdisplay" ) { die("LVM utilities not installed in /sbin or /usr/sbin"); }
@lvinfo = `/sbin/lvdisplay $units_arg $lvname`;
}
LVINF: foreach(@lvinfo) {
# Get the logical volume name.
if( m/LV Name\s+(\S+)/ ) {
$lvhash{lv_name} = $1;
next LVINF; }
# since version 2.02.89 'LV Name' is no longer the full path, 'LV Path' is.
# LV Path may be bogus or missing in some cases, such as thin pools.
if( m/LV Path\s+(\S+)/ ) {
$lvhash{lv_name} = $1;
next LVINF; }
# Get the volume group name.
if( m/VG Name\s+(\S+)/ ) {
$lvhash{vg_name} = $1;
next LVINF; }
# Get the volume group name.
if( m/LV UUID\s+(\S+)/ ) {
$lvhash{uuid} = $1;
next LVINF; }
# Get the logical volume write status.
if( m/LV Write Access\s+(.+)/ ) {
$lvhash{access} = $1;
next LVINF; }
# Get the logical volume status.
if( m/LV Status\s+(.+)/ ) {
$lvhash{status} = $1;
next LVINF; }
# Get the logical volume number.
if( m/LV \#\s+(\S+)/ ) {
$lvhash{lv_number} = $1;
next LVINF; }
# Get the number of opens for the logical volume.
if( m/\# open\s+(\S+)/ ) {
$lvhash{lv_open} = $1;
next LVINF; }
# Get the logical volume size and size unit.
if( m/LV Size\s+(\S+) (\S+)/ ) {
$lvhash{size} = $1;
$lvhash{size_unit} = $2;
next LVINF; }
# Get the number of extents assigned to the logical volume.
if( m/Current LE\s+(\S+)/ ) {
$lvhash{current_le} = $1;
next LVINF; }
# Get the number of extents allocated to the logical volume.
if( m/Allocated LE\s+(\S+)/ ) {
$lvhash{alloc_le} = $1;
next LVINF; }
# Get the extent allocation type of the logical volume.
if( m/Allocation\s+(.+)/ ) {
$lvhash{allocation} = $1;
next LVINF; }
# Get the number of read ahead sectors for the logical volume.
if( m/Read ahead sectors\s+(\S+)/ ) {
$lvhash{read_ahead} = $1;
next LVINF; }
# Get the block device of the logical volume.
if( m/Block device\s+(\S+)/ ) {
$lvhash{block_device} = $1;
next LVINF; }
if( m/Allocated to snapshot\s+(\S+)%/ ) {
$lvhash{allocated_to_snapshot} = $1;
next LVINF; }
elsif( m/COW-table size\s+([0-9\.]+)\s+(\S+)/ ) {
$lvhash{'cow_table_size'} = $1;
$lvhash{'cow_table_unit'} = $2;
next LVINF; }
# Thin pools have data and metadata allocation
if( m/Allocated pool data\s+(\S+)%/ ) {
$lvhash{allocated_pool_data} = $1;
next LVINF; }
if( m/Allocated metadata\s+(\S+)%/ ) {
$lvhash{allocated_meta_data} = $1;
next LVINF; }
}
return %lvhash;
} # End of the get_lv_info routine.
1;
__END__
# Below is stub documentation for your module. You'd better edit it!
=head1 NAME
Zabbix::Agent::Addons::LVM - Perl extension for accessing Logical Volume Manager(LVM)
data structures on Linux.
=head1 SYNOPSIS
use Zabbix::Agent::Addons::LVM;
Zabbix::Agent::Addons::LVM->units('G');
=head1 ABSTRACT
The live data used in the examples is included in the DESCRIPTION area
for your convenience and reference.
=head1 DESCRIPTION
units() Get or set the units used to report sizes of LVs, PVs, etc.
legal values: hbskmgtpeHBSKMGTPE
see man lvm documentation of --units
get_volume_group_list() This routine will return an array that
contains the names of the volume groups.
@vgs = get_volume_group_list(); print "@vgs \n";
Would yield the following: vg00
get_volume_group_information($) This routine will return all of
the volume group information about
the specified volume group.
%vg = get_volume_group_information("vg00");
foreach(sort keys %vg) {
print "$_ = $vg{$_}\n";
}
Would yield the following:
access = read/write
act_pv = 2
alloc_pe = 3840
alloc_pe_size = 15
alloc_pe_size_unit = GB
cur_lv = 3
cur_pv = 2
free_pe = 864
free_pe_size = 3.38
free_pe_size_unit = GB
max_lv = 256
max_lv_size = 255.99
max_lv_size_unit = GB
max_pv = 256
open_lv = 0
pe_size = 4
pe_size_unit = MB
status = available/resizable
total_pe = 4704
uuid = BBq8si-NyRR-9ZNW-3J5e-DoRO-RBHK-ckrszi
vg_number = 0
vg_size = 18.38
vg_size_unit = GB
vgname = vg00
get_logical_volume_information($) This routine will return all of the
logical volume information associated
with the specified volume group.
%lv = get_logical_volume_information("vg00");
foreach $lvname (sort keys %lv) {
foreach(sort keys %{$lv{$lvname}}) {
print "$_ = $lv{$lvname}->{$_}\n";
}
print "\n";
}
Would yield the following results:
alloc_le = 1024
allocation = next free
cur_le = 1024
device = 58:0
lv_number = 1
lv_size = 4
lv_size_unit = GB
name = /dev/vg00/lvol1
open_lv = 0
read_ahead = 1024
status = available
write_access = read/write
alloc_le = 1280
allocation = next free
cur_le = 1280
device = 58:1
lv_number = 2
lv_size = 5
lv_size_unit = GB
name = /dev/vg00/lvol2
open_lv = 0
read_ahead = 1024
status = available
write_access = read/write
alloc_le = 1536
allocation = next free
cur_le = 1536
device = 58:2
lv_number = 3
lv_size = 6
lv_size_unit = GB
name = /dev/vg00/lvol3
open_lv = 0
read_ahead = 1024
status = available
write_access = read/write
get_physical_volume_information($) This routine will return all of the information
information about the physical volumes assigned
to the specified volume group.
%pv = get_physical_volume_information("vg00");
foreach $pvname (sort keys %pv) {
foreach(sort keys %{$pv{$pvname}}) {
print "$_ = $pv{$pvname}->{$_}\n";
}
print "\n";
}
Would yield the following results:
device = /dev/hda3
free_pe = 0
pv_number = 1
status = available / allocatable
total_pe = 2160
device = /dev/hda4
free_pe = 864
pv_number = 2
status = available / allocatable
total_pe = 2544
get_lv_info($) This routine will return all of the information about the
specified logical volume. The information will be returned
in a hash.
get_lv_info
%lv = get_lv_info("/dev/vg00/lvol1");
foreach (sort keys %lv) {
print "$_ = $lv{$_} \n";
}
Would yield the following results:
access = read/write
alloc_le = 1024
allocation = next free
block_device = 58:0
current_le = 1024
lv_name = /dev/vg00/lvol1
lv_number = 1
lv_open = 0
read_ahead = 1024
size = 4
size_unit = GB
status = available
vg_name = vg00
get_pv_info($) This routine will return all of the information about the
specified physical volume. The information will be returned
in a hash.
%pv = get_pv_info("/dev/hda3");
foreach (sort keys %pv) {
print "$_ = $pv{$_} \n";
}
Would yield the following results:
alloc_pe = 2160
allocatable = yes (but full)
free_pe = 0
num_lvols = 2
pe_size = 4096
pe_size_unit = KByte
pv_name = /dev/hda3
pv_number = 1
sectors = 17703630
size = 8.44
size_info = NOT usable 4.19 MB [LVM: 136 KB]
size_unit = GB
status = available
total_pe = 2160
uuid = 2c5ADu-oEdt-ovCe-rqp0-MWpF-I5u1-8XigH4
vg_name = vg00
Command Output Used In The Above Examples: /sbin/vgdisplay -v
--- Volume group ---
VG Name vg00
VG Access read/write
VG Status available/resizable
VG # 0
MAX LV 256
Cur LV 3
Open LV 0
MAX LV Size 255.99 GB
Max PV 256
Cur PV 2
Act PV 2
VG Size 18.38 GB
PE Size 4 MB
Total PE 4704
Alloc PE / Size 3840 / 15 GB
Free PE / Size 864 / 3.38 GB
VG UUID BBq8si-NyRR-9ZNW-3J5e-DoRO-RBHK-ckrszi
--- Logical volume ---
LV Name /dev/vg00/lvol1
VG Name vg00
LV Write Access read/write
LV Status available
LV # 1
# open 0
LV Size 4 GB
Current LE 1024
Allocated LE 1024
Allocation next free
Read ahead sectors 1024
Block device 58:0
--- Logical volume ---
LV Name /dev/vg00/lvol2
VG Name vg00
LV Write Access read/write
LV Status available
LV # 2
# open 0
LV Size 5 GB
Current LE 1280
Allocated LE 1280
Allocation next free
Read ahead sectors 1024
Block device 58:1
--- Logical volume ---
LV Name /dev/vg00/lvol3
VG Name vg00
LV Write Access read/write
LV Status available
LV # 3
# open 0
LV Size 6 GB
Current LE 1536
Allocated LE 1536
Allocation next free
Read ahead sectors 1024
Block device 58:2
--- Physical volumes ---
PV Name (#) /dev/hda3 (1)
PV Status available / allocatable
Total PE / Free PE 2160 / 0
PV Name (#) /dev/hda4 (2)
PV Status available / allocatable
Total PE / Free PE 2544 / 864
=head1 SEE ALSO
L<vgdisplay>(1M)
L<lvdisplay>(1M)
L<pvdisplay>(1M)
=head1 AUTHOR
Chad Kerner, E<lt>chadkerner@yahoo.comE<gt>
=head1 COPYRIGHT AND LICENSE
Copyright 2003 by Chad Kerner
This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
Modified by Daniel Berteaud <daniel@firewall-services.com> to add
support for LVM thin
=cut

View File

@ -1,21 +0,0 @@
package Zabbix::Agent::Addons::UPS;
use warnings;
use strict;
use File::Which;
# List configured UPS (only nut is supported)
sub list_ups {
my @ups = ();
my $upsc = which('upsc');
if ($upsc && -x $upsc){
my @out = qx($upsc -l 2>/dev/null);
if ($? == 0){
@ups = @out;
chomp @ups;
}
}
return @ups;
};
1;

View File

@ -1,27 +0,0 @@
module zabbix-agent-addons 1.0;
require {
type kernel_t;
type devlog_t;
type zabbix_var_lib_t;
type sudo_exec_t;
type proc_mdstat_t;
type zabbix_agent_t;
class process setrlimit;
class capability { audit_write dac_override sys_resource };
class file { execute execute_no_trans getattr ioctl open read };
class netlink_audit_socket { create nlmsg_relay };
class sock_file write;
class unix_dgram_socket { connect create sendto };
}
#============= zabbix_agent_t ==============
allow zabbix_agent_t devlog_t:sock_file write;
allow zabbix_agent_t kernel_t:unix_dgram_socket sendto;
allow zabbix_agent_t proc_mdstat_t:file { getattr ioctl open read };
allow zabbix_agent_t self:capability { audit_write dac_override sys_resource };
allow zabbix_agent_t self:netlink_audit_socket { create nlmsg_relay };
allow zabbix_agent_t self:process setrlimit;
allow zabbix_agent_t self:unix_dgram_socket { connect create };
allow zabbix_agent_t sudo_exec_t:file { execute execute_no_trans };
allow zabbix_agent_t zabbix_var_lib_t:file { execute execute_no_trans ioctl open read };

View File

@ -1,11 +1,7 @@
%if 0%{?rhel} && 0%{?rhel} < 7
%global _without_selinux 1
%endif
Summary: Scripts for Zabbix monitoring
Name: zabbix-agent-addons
Version: 0.2.172
Release: 1%{?dist}
Version: 0.1.21
Release: 1
Source0: %{name}-%{version}.tar.gz
BuildArch: noarch
@ -17,22 +13,10 @@ Requires: zabbix-agent
Requires: perl(Getopt::Long)
Requires: perl(Getopt::Std)
Requires: perl(JSON)
Requires: perl(Linux::LVM)
Requires: perl(POSIX)
Requires: perl(MIME::Base64)
Requires: perl(File::Which)
Requires: perl(Config::Simple)
Requires: perl(Statistics::Descriptive)
%if 0%{?rhel} > 6
# used by samba4 scripts, which wont run on anything < 7
Requires: perl(File::ReadBackwards)
%endif
Requires: fping
BuildRequires: perl
%if ! 0%{?_without_selinux}
Requires: policycoreutils
BuildRequires: selinux-policy-devel
BuildRequires: checkpolicy
%endif
AutoReqProv: no
@ -46,11 +30,6 @@ LVM, RAID status, S.M.A.R.T. drives, BackupPC etc...
%setup -q
%build
%if ! 0%{?_without_selinux}
pushd selinux
make -f %{_datadir}/selinux/devel/Makefile
popd
%endif
%install
@ -62,711 +41,31 @@ popd
# Install Zabbix conf
%{__install} -d $RPM_BUILD_ROOT%{_sysconfdir}/zabbix/zabbix_agentd.conf.d/
%{__install} -m 0644 zabbix_conf/* $RPM_BUILD_ROOT%{_sysconfdir}/zabbix/zabbix_agentd.conf.d/
# Install perl modules
%{__install} -d -m 0755 $RPM_BUILD_ROOT%{perl_vendorlib}
cp -r lib/* $RPM_BUILD_ROOT%{perl_vendorlib}/
# Install sensors conf
%{__install} -m 0755 conf/sensors.ini $RPM_BUILD_ROOT%{_sysconfdir}/zabbix/
%{__install} -m 0755 conf/sensors.conf $RPM_BUILD_ROOT%{_sysconfdir}/zabbix/
# Install sudo conf
%{__install} -d 750 $RPM_BUILD_ROOT%{_sysconfdir}/sudoers.d
%{__install} -m 600 conf/sudo.conf $RPM_BUILD_ROOT%{_sysconfdir}/sudoers.d/zabbix_agent
# Install SELinux policy
%if ! 0%{?_without_selinux}
%{__install} -d 750 $RPM_BUILD_ROOT%{_datadir}/selinux/packages/%{name}
%{__install} -m644 selinux/%{name}.pp $RPM_BUILD_ROOT%{_datadir}/selinux/packages/%{name}/%{name}.pp
%endif
%clean
%{__rm} -rf $RPM_BUILD_ROOT
%pre
%preun
%post
if [ $1 -eq 2 ] ; then
if [ -e "/etc/zabbix/sensors.conf" ]; then
/var/lib/zabbix/bin/util_convert_sensors_ini /etc/zabbix/sensors.conf
fi
fi
%files
%defattr(-,root,root,-)
%doc README
%doc zabbix_templates/*
%doc README CHANGELOG.git
%dir %attr(0750,zabbix,zabbix) %{_localstatedir}/lib/zabbix/bin
%{_localstatedir}/lib/zabbix/bin/*
%{perl_vendorlib}
%config(noreplace) %attr(0640,root,zabbix) %{_sysconfdir}/zabbix/sensors.ini
%config(noreplace) %attr(0640,root,zabbix) %{_sysconfdir}/zabbix/sensors.conf
%config(noreplace) %attr(0640,root,zabbix) %{_sysconfdir}/zabbix/zabbix_agentd.conf.d/*
%attr(0440,root,root) %{_sysconfdir}/sudoers.d/*
%if ! 0%{?_without_selinux}
%{_datadir}/selinux/packages/%{name}/%{name}.pp
%endif
%changelog
* Thu Dec 21 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.172-1
- Add Zabbix template for storageDevices (dbd@ehtrace.com)
- Read raw value for SSL_Life_Left (dbd@ehtrace.com)
- Read SSD_Life_Left if available (dbd@ehtrace.com)
- /dev/bus/0 might not exist but can be queried (dbd@ehtrace.com)
- Report more info from some NVMe (dbd@ehtrace.com)
- Adjust default values for stor dev (dbd@ehtrace.com)
- Fix UserParam (dbd@ehtrace.com)
- Add new script for smart monitoring (dbd@ehtrace.com)
* Tue Sep 19 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.171-1
- Ignore samba NT_STATUS_PROTOCOL_UNREACHABLE errors (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.170-1
- Fix + discover NMVe (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.169-1
- Better sensor output parsing (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.168-1
- Drop ipmitool stderr and simplify output parsing (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.167-1
- Fix fan detection on some BMC boards (dbd@ehtrace.com)
- Update ZFS template (dbd@ehtrace.com)
* Sat Mar 26 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.166-1
- Fix counting samba computers auth tries (dbd@ehtrace.com)
* Mon Mar 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.165-1
- last_seen might not be defined in check_unifi (dbd@ehtrace.com)
* Mon Mar 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.164-1
- Use JSON bool in unifi scripts (dbd@ehtrace.com)
* Mon Jan 24 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.163-1
- Fix check_zimbra_sudo (dbd@ehtrace.com)
* Fri Jan 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.162-1
- Add alloc_ct for LVM VG when missing (dbd@ehtrace.com)
* Fri Jan 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.161-1
- Fix Zimbra discovery and check scripts (dbd@ehtrace.com)
* Thu Jan 20 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.160-1
- Add a {#DOCKER_CONTAINER_STATUS} LLD macro (dbd@ehtrace.com)
* Thu Jan 20 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.159-1
- Update Docker template (dbd@ehtrace.com)
- Don't query state in docker discovery (dbd@ehtrace.com)
* Thu Jan 13 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.158-1
- Small fixes for Docker check script and template (dbd@ehtrace.com)
* Thu Jan 13 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.157-1
- Enhacements in the Docker template (dbd@ehtrace.com)
* Wed Jan 12 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.156-1
- Add Docker scripts (dbd@ehtrace.com)
* Tue Jan 11 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.155-1
- Release bump
* Thu Dec 16 2021 Daniel Berteaud <dani@lapiole.org> 0.2.154-1
- Fix zpool iostat as /proc/spl/kstat/zfs/pool/io doesn't exist anymore
(dani@lapiole.org)
- Add nodata triggers for Elasticsearch (daniel@firewall-services.com)
- Include Zabbix template for Elasticsearch (daniel@firewall-services.com)
* Tue Oct 19 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.153-1
- Tweak elasticsearch monitoring scripts (daniel@firewall-services.com)
* Mon Oct 18 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.152-1
- Small fixes in elasticsearch scripts (daniel@firewall-services.com)
* Mon Oct 18 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.151-1
- Add Elasticsearch monitoring scripts (daniel@firewall-services.com)
- Updates and fixes in Zabbix templates (daniel@firewall-services.com)
* Fri Jul 16 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.150-1
- Do not count Unconfigured(good) drives as an error (daniel@firewall-
services.com)
- Remove duplicated templates (daniel@firewall-services.com)
- Typo in template filename (daniel@firewall-services.com)
- Update and provide more templates (daniel@firewall-services.com)
- Update and add more Zabbix templates (daniel@firewall-services.com)
- Remove health and capacity sanoid checks from discovery (daniel@firewall-
services.com)
* Thu May 27 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.149-1
- Support Debian lib path for BackupPC (daniel@firewall-services.com)
* Wed Feb 17 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.148-1
- Fix zfs pool monitoring when a pool has errors (daniel@firewall-services.com)
- Alert only if not samba monitoring for 25min (instead of 15)
(daniel@firewall-services.com)
* Thu Jan 14 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.147-1
- [check_samba_dc_sudo] Fix typo with GPO listing (daniel@firewall-
services.com)
* Thu Jan 14 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.146-1
- [check_samba_sudo] Update default audit log file path, and drop errors from
samba-tool (daniel@firewall-services.com)
* Thu Jan 14 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.145-1
- Add perl(File::ReadBackwards) dependency (daniel@firewall-services.com)
* Thu Jan 14 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.144-1
- Optimize samba audit_auth log parsing by reading from the tail of the file
(daniel@firewall-services.com)
* Wed Jan 13 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.143-1
- Update BackupPC template (daniel@firewall-services.com)
* Wed Jan 13 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.142-1
- Modernize lvm monitoring scripts (daniel@firewall-services.com)
- Don't catch stderr for vgdisplay commands (daniel@firewall-services.com)
* Tue Jan 12 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.141-1
- Small fixes in check_samba_dc (skip unparsable logs, and handle message with
NT_STATUS_NO_SUCH_USER (daniel@firewall-services.com)
* Mon Jan 11 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.140-1
- Add general stats to BackupPC monitoring script (daniel@firewall-
services.com)
* Mon Jan 11 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.139-1
- Add OU discovery to samba monitoring (daniel@firewall-services.com)
* Mon Jan 11 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.138-1
- Add missing Samba application name for aggregated items (daniel@firewall-
services.com)
- Minor fixes for samba script and template (daniel@firewall-services.com)
* Sat Jan 09 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.137-1
- Add scripts and template to monitor Samba 4 DC (daniel@firewall-services.com)
* Fri Jan 08 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.136-1
- Add guest counter for PVE cluster and node (daniel@firewall-services.com)
* Thu Dec 17 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.135-1
- Update Template_App_MySQL (daniel@firewall-services.com)
- Update Template_App_ZFS (daniel@firewall-services.com)
* Tue Dec 01 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.134-1
- Possibility to check certificate for Unifi API (daniel@firewall-services.com)
* Sat Nov 07 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.133-1
- Add perl in BuildReq for el8 (daniel@firewall-services.com)
* Mon Oct 26 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.132-1
- Run upsc commands with 2>/de/null (daniel@firewall-services.com)
- IPMI sensors can have / and - in their name (daniel@firewall-services.com)
* Thu Oct 22 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.131-1
- Don't return garbage in mpath discovery if command failed (daniel@firewall-
services.com)
* Tue Oct 20 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.130-1
- Add App_Multipath template (daniel@firewall-services.com)
- Add Linux_Server template (daniel@firewall-services.com)
* Tue Oct 20 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.129-1
- Add scripts to discover and check multipath devices (daniel@firewall-
services.com)
* Tue Sep 29 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.128-1
- Use MAC of device if no name is defined in Unifi device discovery
(daniel@firewall-services.com)
* Wed Sep 23 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.127-1
- Update scripts to work with ssacli (in adition to hpacucli) (daniel@firewall-
services.com)
* Fri Sep 04 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.126-1
- Add some compatibility for older MySQL servers (daniel@firewall-services.com)
* Tue Sep 01 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.125-1
- Allow empty --defaults opt for check_mysql_sudo (daniel@firewall-
services.com)
* Mon Aug 31 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.124-1
- Update Template_App_MySQL (daniel@firewall-services.com)
* Mon Aug 31 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.123-1
- check_mysql needs sudo permissions (daniel@firewall-services.com)
* Mon Aug 31 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.122-1
- Add MySQL monitoring script and template (daniel@firewall-services.com)
- Add Template_Vhost (daniel@firewall-services.com)
- Add templates for Windows (minimal and server) (daniel@firewall-services.com)
- Add /usr/local/BackupPC/lib as lib dir for BackupPC scripts (daniel@firewall-
services.com)
* Wed May 20 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.121-1
- Do not rely on distrib version to check if --output-format is needed for
check_pve_sudo (daniel@firewall-services.com)
* Fri Apr 03 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.120-1
- Fix mdadm when we have spares (daniel@firewall-services.com)
* Tue Mar 03 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.119-1
- Better detection of smart capable drives (daniel@firewall-services.com)
* Mon Mar 02 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.118-1
- Update Template_App_PVE_Cluster (daniel@firewall-services.com)
* Mon Mar 02 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.117-1
- Add basic SNMP templates (daniel@firewall-services.com)
- Add Template_App_Unifi (daniel@firewall-services.com)
- Add Template_OS_PfSense2 (daniel@firewall-services.com)
- Add Template_Ping (daniel@firewall-services.com)
- Fix cache when the same resource is queried with different options
(daniel@firewall-services.com)
- Remove debug statement in util_populate_pve_cache (daniel@firewall-
services.com)
* Mon Mar 02 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.116-1
- Default to accept cached value up to 5 min old for check_pve_sudo
(daniel@firewall-services.com)
* Mon Mar 02 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.115-1
- Add a script to populate check_pve_sudo cache (daniel@firewall-services.com)
- Enhance check_pve_sudo with a local cache support to speed up monitoring
(daniel@firewall-services.com)
* Tue Feb 25 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.114-1
- Automatic commit of package [zabbix-agent-addons] release [0.2.112-1].
(daniel@firewall-services.com)
- drop stderrr for upsc commands (daniel@firewall-services.com)
* Tue Feb 25 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.113-1
- Skip Core X temp sensors (daniel@firewall-services.com)
* Wed Feb 19 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.112-1
- drop stderrr for upsc commands (daniel@firewall-services.com)
* Mon Feb 17 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.111-1
- Update ZFS and BackupPC templates (daniel@firewall-services.com)
* Mon Feb 10 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.110-1
- Fix a typo in ZabbixSizeTooSmallFactor conf (daniel@firewall-services.com)
* Wed Feb 05 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.109-1
- Don't skip local node in PVE nodes discovery (daniel@firewall-services.com)
* Wed Jan 22 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.108-1
- Only skip RAID volumes checks when in HBA mode, not physical disks checks
(daniel@firewall-services.com)
- Declar variable in the correct scope for hba mode detection (daniel@firewall-
services.com)
- Handle megaraid controlers in HBO/JBOD mode (skip RAID checks)
(daniel@firewall-services.com)
- Use head -1 to be sure to get a single value for sensors (daniel@firewall-
services.com)
* Thu Jan 16 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.107-1
- Add Zabbix template for Squid (daniel@firewall-services.com)
* Thu Jan 16 2020 Daniel Berteaud <daniel@firewall-services.com> 0.2.106-1
- Remove uri from UsereParam args for squid (daniel@firewall-services.com)
* Tue Dec 17 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.105-1
- Fix ready sizeNew from last backup (except when link hasn't ran yet)
(daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.104-1
- Disable vfs.dev.discovery in default conf (daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.103-1
- Set min backup size to 0 in template (daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.102-1
- Fix key name for enabled value (daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.101-1
- Init complete JSON objects with default values in bheck_backuppc_sudo
(daniel@firewall-services.com)
- Remove unused variables (daniel@firewall-services.com)
* Sun Dec 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.100-1
- Only substract $new_size_of_last_full once (daniel@firewall-services.com)
* Fri Dec 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.99-1
- Fix when a host has a single backup with 0 new file size (daniel@firewall-
services.com)
* Fri Dec 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.98-1
- Fix backups total size computation when there's only one full
(daniel@firewall-services.com)
* Fri Dec 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.97-1
- Include Zabbix template to monitor BackupPC (daniel@firewall-services.com)
* Fri Dec 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.96-1
- Enhanced stats for BackupPC's entity (daniel@firewall-services.com)
* Wed Dec 11 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.95-1
- Wait for BackupPC_link to run before we take new sizes in our stat
(daniel@firewall-services.com)
* Wed Dec 11 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.94-1
- Fix BackupPC script when BackuPPC_link is waiting for the nightly cleanup to
finish (daniel@firewall-services.com)
* Fri Nov 29 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.93-1
- Don't use autoloader in our forked Linux::LVM (daniel@firewall-services.com)
- Don't requires Linux::LVM anymore (daniel@firewall-services.com)
- Replace Linux::LVM occurrences with Zabbix::Agent::Addons::LVM
(daniel@firewall-services.com)
- Bundle a fork of Linux::LVM with support for LVM thin pools (daniel@firewall-
services.com)
* Wed Nov 27 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.92-1
- Better compat with 4.4 vfs.dev.discovery (and use lsblk to get the list of
dev if available) (daniel@firewall-services.com)
* Tue Nov 26 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.91-1
- Add DEVNAME macro for vfs.dev.discovery to ease transition to 4.4
(daniel@firewall-services.com)
- Minor update in ZFS template (daniel@firewall-services.com)
* Sun Oct 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.90-1
- Fix some unifi stats for uap/usw in recent unifi versions (daniel@firewall-
services.com)
* Mon Oct 14 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.89-1
- Add Zabbix template for GlusterFS (daniel@firewall-services.com)
- Add Zabbix tempalte for DRBD (daniel@firewall-services.com)
- Add Zabbix template for Proxmox Mail Gateway (daniel@firewall-services.com)
- Add template to monitor a PVE cluster (daniel@firewall-services.com)
- ZFS ARC low hit ratio for data and global are calculated for 1h
(daniel@firewall-services.com)
* Fri Oct 11 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.88-1
- Add Zabbix template for ZFS (daniel@firewall-services.com)
* Fri Oct 11 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.87-1
- Enhance ZFS monitoring scripts to retrieve ARC stats (daniel@firewall-
services.com)
- Send an empty data array when Zimbra is not installed (daniel@firewall-
services.com)
* Tue Oct 01 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.86-1
- Fix pve script when no net or disk stats are available (daniel@firewall-
services.com)
* Sat Sep 21 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.85-1
- Check $sanoidmon is defined before checking its value (daniel@firewall-
services.com)
* Sat Sep 21 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.84-1
- Fix var name in disco_zfs (daniel@firewall-services.com)
* Sat Sep 21 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.83-1
- Better sanoïd monitoring integration (daniel@firewall-services.com)
* Fri Sep 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.82-1
- Remove trailing x for compressratio with ZoL < 0.8 (daniel@firewall-
services.com)
* Fri Sep 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.81-1
- Revert to suffix conversion for ZFS error count (daniel@firewall-
services.com)
* Fri Sep 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.80-1
- Rewrite ZFS monitoring from scratch (daniel@firewall-services.com)
- Set info in the data element for Zimbra discovery (daniel@firewall-
services.com)
* Fri Sep 13 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.79-1
- Add simple Zabbix service status scripts (daniel@firewall-services.com)
* Tue Sep 03 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.77-1
- Skip self PVE node (daniel@firewall-services.com)
* Tue Jul 30 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.76-1
- Add support for some NVMe temp sensors Found on OVH's Advanced servers for
example (daniel@firewall-services.com)
- Fix when running on Debian buster Which fails with RC 25 when using
File::Spec devnull (daniel@firewall-services.com)
* Tue May 21 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.75-1
- Add basic scripts to monitor VDO volumes (daniel@firewall-services.com)
* Tue Apr 16 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.74-1
- Don't fail if Statistics::Descriptive doesn't support quantile
(daniel@firewall-services.com)
* Mon Apr 15 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.73-1
- More work on BackupPC's monitoring scripts (daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.72-1
- Fix reporting MaxXferError (daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.71-1
- Fix a typo in check_backuppc_sudo (daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.70-1
- Fix counting entity size (daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.69-1
- Don't count vm as an entity in BackupPC's entities discovery
(daniel@firewall-services.com)
* Thu Apr 04 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.68-1
- Update BackupPC's discovery and monitoring scripts (daniel@firewall-
services.com)
* Wed Apr 03 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.67-1
- Add last_errors in backuppc JSON info (daniel@firewall-services.com)
- Update conf for BackupPC (daniel@firewall-services.com)
* Wed Apr 03 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.66-1
- Remove crazy and useless regex to exclude hosts from BackupPC
(daniel@firewall-services.com)
* Wed Apr 03 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.65-1
- Enhance backuppc reporting script Including reporting the new file size, and
sending all the info at once in JSON format (daniel@firewall-services.com)
- Some coding style updates (daniel@firewall-services.com)
- More compact BPCSTATUS (1/0 instead of enabled/disabled) (daniel@firewall-
services.com)
* Wed Feb 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.64-1
- Also report the number in the deferred queue (daniel@firewall-services.com)
* Wed Feb 20 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.63-1
- Report number of email in the active and hold queues (daniel@firewall-
services.com)
* Sat Jan 19 2019 Daniel Berteaud <daniel@firewall-services.com> 0.2.62-1
- Add scripts to ping other hosts (daniel@firewall-services.com)
* Mon Dec 10 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.61-1
- Save cookies to a file so we don't have to login at every invocation GLPI
#34449 (daniel@firewall-services.com)
* Sun Dec 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.60-1
- Print ZBX_NOTSUPPORTED in case of API error Prevent tons of error messages in
Zabbix Server's logs (daniel@firewall-services.com)
* Sun Dec 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.59-1
- Fix ZBX_NOTSUPPORTED string in several scripts (daniel@firewall-services.com)
* Thu Nov 15 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.57-0.beta1
- Add enhanced squid monitoring support (daniel@firewall-services.com)
* Fri Nov 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.56-1
- Add simple script for nginx (similar httpd) (daniel@firewall-services.com)
* Fri Oct 26 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.55-1
- Fix PVE storage monitoring GLPI #33910 (daniel@firewall-services.com)
* Wed Oct 24 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.54-1
- Rework PMG monitoring scripts (daniel@firewall-services.com)
* Thu Oct 18 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.52-0.beta1
- Add very basic script for PMG monitoring (daniel@firewall-services.com)
* Tue Sep 18 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.51-1
- check_unifi: also output satisfaction for stations (daniel@firewall-
services.com)
* Mon Sep 17 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.50-1
- Fix comparison with uninitialized value in check_unifi (daniel@firewall-
services.com)
* Sat Sep 15 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.49-1
- Report number of unarchived alarms in check_unifi --unifi (daniel@firewall-
services.com)
* Sat Sep 15 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.48-1
- More fixes for AP monitoring in check_unifi (daniel@firewall-services.com)
* Sat Sep 15 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.47-1
- Several fixes in check_unifi (daniel@firewall-services.com)
* Fri Sep 14 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.46-1
- Enhance Unifi discovery and monitoring Adding support for station monitoring
(daniel@firewall-services.com)
* Thu Sep 13 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.45-0.beta2
- Fix check_unifi when value is defined but false (daniel@firewall-
services.com)
* Thu Sep 13 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.44-0.beta1
- Add scripts to monitor Unifi sites (daniel@firewall-services.com)
* Tue Aug 21 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.43-1
- Fix PVE scripts to Work with new pvesh version (daniel@firewall-services.com)
* Mon Jul 23 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.42-1
- Initialize an empty json object (daniel@firewall-services.com)
* Mon Jul 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.41-1
- Don't log sudo usage for Zabbix (daniel@firewall-services.com)
* Wed Jul 04 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.40-1
- Fix ZFS pool stats retrieval (daniel@firewall-services.com)
* Wed Jun 13 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.39-1
- Fix computing pool CPU usage in check_pve (daniel@firewall-services.com)
* Thu Jun 07 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.38-1
- Add global net and disk stats for the cluster in check_pve_sudo
(daniel@firewall-services.com)
* Tue Jun 05 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.37-1
- Fix check_pve_sudo for single node monitoring (daniel@firewall-services.com)
* Tue Jun 05 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.36-1
- Remove redundant condition (daniel@firewall-services.com)
- Fix {#PVE_STOR_STATUS} macro (daniel@firewall-services.com)
- Only gather info about online nodes (daniel@firewall-services.com)
- Add some global cluster stats for PVE (daniel@firewall-services.com)
* Sun Jun 03 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.35-1
- Enhance PVE scripts and conf (daniel@firewall-services.com)
- Add basic scripts for PVE monitoring (daniel@firewall-services.com)
* Wed May 30 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.34-1
- Add stats for ZFS zpools (daniel@firewall-services.com)
* Tue May 29 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.33-1
- Ensure we always return a value for scan action status errors in check_zfs
(daniel@firewall-services.com)
* Tue May 29 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.32-1
- Handle situations where there's more than 1000 errors on a item in ZFS pools
(daniel@firewall-services.com)
* Tue May 29 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.31-1
- Various enhancements in check_zfs (daniel@firewall-services.com)
- Fix macro name for zfs zpool discovery (daniel@firewall-services.com)
* Mon May 28 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.30-1
- Rename vfs.zfs.discovery to vfs.zfs.zpool.discovery So later we'll be able to
add other discovery rules for say, datasets (daniel@firewall-services.com)
* Mon May 28 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.29-1
- Add scripts to discover and check ZFS zpools (daniel@firewall-services.com)
* Tue Mar 06 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.28-1
- Use "all" key to get all httpd stats in JSON format (daniel@firewall-
services.com)
* Tue Mar 06 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.27-1
- Respond with all stats as a JSON structure if no --what given
(daniel@firewall-services.com)
* Tue Mar 06 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.26-1
- Support space in httpd status key So total_accesses and total_kbytes are
available again (daniel@firewall-services.com)
* Tue Feb 06 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.25-1
- Fix mdadm RAID discovery condition (daniel@firewall-services.com)
* Tue Jan 09 2018 Daniel Berteaud <daniel@firewall-services.com> 0.2.24-1
- Don't WARN when device is being checked, only when it's rebuilding
(daniel@firewall-services.com)
- Don't detect mdadm RAID in containers (daniel@firewall-services.com)
* Thu Nov 30 2017 Daniel Berteaud <daniel@firewall-services.com> 0.2.23-1
- Check line format in check_httpd Instead of spliting errors in case server-
status redirect to somewhere else (daniel@firewall-services.com)
* Mon Nov 20 2017 Daniel Berteaud <daniel@firewall-services.com> 0.2.22-1
- Add script to monitor spamassassin's bayes database stats (daniel@firewall-
services.com)
- Symlink releasrs.conf to global's one (daniel@firewall-services.com)
* Tue Nov 14 2017 Daniel Berteaud <daniel@firewall-services.com> 0.2.21-1
- Remove now non existing CHANGELOG.git file (daniel@firewall-services.com)
* Tue Nov 14 2017 Daniel Berteaud <daniel@firewall-services.com> 0.2.20-1
- new package built with tito
* Thu Oct 12 2017 Daniel Berteaud <daniel@firewall-services.com> - 0.2.19-1
- Correctly handle Partially Degraded state
* Thu Aug 24 2017 Daniel Berteaud <daniel@firewall-services.com> - 0.2.18-1
- Only include SELinux policy module on el7
* Wed Aug 23 2017 Daniel Berteaud <daniel@firewall-services.com> - 0.2.17-1
- Add a SELinux policy module
* Wed Jun 14 2017 Daniel Berteaud <daniel@firewall-services.com> - 0.2.16-1
- Add kernel.openedfile UserParameter
* Thu Nov 24 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.15-1
- Fix discovery scripts to always return a valid JSON value, even if empty
(sensors, lvm and nut_ups)
* Wed Nov 9 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.14-1
- Add scripts to monitor apache httpd
* Sun Oct 30 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.13-1
- Fix handling Airflow_Temperature_Cel label
* Fri Oct 28 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.12-1
- Support Airflow_Temperature_Cel as temp label for smartctl based sensors
* Thu Sep 1 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.11-1
- Add support for lm_sensors based sensors
* Thu Aug 25 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.10-1
- Add monitoring item for squid's FD
* Wed Apr 6 2016 Daniel Berteaud <daniel@firewall-services.com> - 0.2.9-1
- Detect HDD temp sensors on sat+megaraid controllers
* Mon Mar 21 2016 Daniel B. <daniel@firewall-services.com> - 0.2.8-1
- Prevent running several gluster check commands at the same time
* Wed Sep 16 2015 Daniel B. <daniel@firewall-services.com> - 0.2.7-1
- Prevent GlusterFS heal false positive due to concurrent locking
* Mon Sep 14 2015 Daniel B. <daniel@firewall-services.com> - 0.2.6-1
- Add script to discover and monitor DRBD resources
* Wed Sep 9 2015 Daniel B. <daniel@firewall-services.com> - 0.2.5-1
- Support negative values for temp sensors
* Mon Jul 27 2015 Daniel B. <daniel@firewall-services.com> - 0.2.4-1
- Several enhancements in sensors ini generator
* Fri Jul 24 2015 Daniel B. <daniel@firewall-services.com> - 0.2.3-1
- Separate UPS default threshold
- Minor coding style updates
* Mon Jul 20 2015 Daniel B. <daniel@firewall-services.com> - 0.2.2-1
- Start working on perl libs to reduce code duplication
- Detect nut UPS temp sensors
* Fri Jul 10 2015 Daniel B. <daniel@firewall-services.com> - 0.2.1-1
- Fix GlusterFS brick count on 3.7.x
* Fri Jul 10 2015 Daniel B. <daniel@firewall-services.com> - 0.2.0-1
- Migrate sensors config to an ini format
- Add a generator script which detects available sensors
* Tue Jul 7 2015 Daniel B. <daniel@firewall-services.com> - 0.1.27-1
- Support different sensors types
* Thu Jun 4 2015 Daniel B. <daniel@firewall-services.com> - 0.1.26-1
- Alert if a self heal is in progress on a glusterfs vol
* Thu Jun 4 2015 Daniel B. <daniel@firewall-services.com> - 0.1.25-1
- Fix gluster checks if info heal-failed is not supported
* Wed Apr 15 2015 Daniel B. <daniel@firewall-services.com> - 0.1.24-1
- Report a warning if a RAID array is resyncing
* Tue Feb 10 2015 Daniel B. <daniel@firewall-services.com> - 0.1.23-1
- Fix disco_filesystem to output valid JSON
* Thu Jan 8 2015 Daniel B. <daniel@firewall-services.com> - 0.1.22-1
- Fix check_qmail_sudo
* Mon Jan 5 2015 Daniel B. <daniel@firewall-services.com> - 0.1.21-1
- Add scripts to check qmail (requires qmqtool)

View File

@ -1,24 +1,13 @@
# Discovery of configured host
# Key: backuppc.host.discovery
# Macro: {#BPCSTATUS}
# Filter regex: enabled|1 => true
# Filter regex: enabled => true
# Other available macros:
# {#BPCPERIOD}: Max age (in day) the oldest backup should be
# {#BPCHOST}: name of the backup host
UserParameter=backuppc.host.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_backuppc_sudo --hosts
UserParameter=backuppc.entity.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_backuppc_sudo --entities
UserParameter=backuppc.host.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_backuppc_sudo --base64 --regex=$1
# Item prototypes
# key: backuppc.host[{#BPCHOST}]
# or
# key: backuppc.entity[{#BPC_ENTITY}]
# Returns a JSON object, use dependent item to split it
UserParameter=backuppc.host[*],/usr/bin/sudo /var/lib/zabbix/bin/check_backuppc_sudo --host=$1
UserParameter=backuppc.entity[*],/usr/bin/sudo /var/lib/zabbix/bin/check_backuppc_sudo --entity=$1
# key: backuppc.host[{#BPCHOST}]
# or
# key: backuppc.general
# Same as entity checks for will process every hosts
# Returns a JSON object, use dependent item to split it
UserParameter=backuppc.general,/usr/bin/sudo /var/lib/zabbix/bin/check_backuppc_sudo --general
# key: backuppc.host.info[{#BPCHOST},item]
# Valide item are: errors, max_errors, size, duration, age, notify
UserParameter=backuppc.host.info[*],/usr/bin/sudo /var/lib/zabbix/bin/check_backuppc_sudo $1 $2

View File

@ -1,3 +1,2 @@
# Discover block devices
# For Zabbix AGent < 4.4 you can uncomment this
#UserParameter=vfs.dev.discovery,/var/lib/zabbix/bin/disco_block_devices
UserParameter=vfs.dev.discovery,/var/lib/zabbix/bin/disco_block_devices

View File

@ -1,9 +0,0 @@
# Discover Docker items
# $1 can be containers, networks, volumes
UserParameter=container.docker.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_docker_sudo --what $1
# Type: Agent or Agent (active)
# container.docker.check.all[type,id]
# Where type is what to monitor (global, container, network, volume)
# id is the id of the item to monitor. Can be a name or an ID. For the global check, there's no ID
UserParameter=container.docker.check[*],/usr/bin/sudo /var/lib/zabbix/bin/check_docker_sudo --$1 $2

View File

@ -1,6 +0,0 @@
# Discover DRBD resources
UserParameter=drbd.resource.discovery[*],/var/lib/zabbix/bin/disco_drbd
# DRBD status
UserParameter=drbd.resource.status[*],/var/lib/zabbix/bin/check_drbd --resource=$1 --what=$2

View File

@ -1,2 +0,0 @@
UserParameter=elasticsearch.discovery[*],/var/lib/zabbix/bin/disco_elasticsearch --url=$1 --user=$2 --pass=$3 --$4
UserParameter=elasticsearch.check[*],/var/lib/zabbix/bin/check_elasticsearch --url=$1 --user=$2 --pass=$3 --$4 $5

View File

@ -1 +0,0 @@
UserParameter=kernel.openedfiles,cat /proc/sys/fs/file-nr | awk '{print $1}'

View File

@ -1,6 +0,0 @@
# Discover if an httpd instance is running and has mod_status available on http://127.0.0.1/server-status
# Just return {#HTTPD_STATUS_AVAILABLE} => 'yes' if found
UserParameter=httpd.discovery,/var/lib/zabbix/bin/disco_httpd
# Stats to get
UserParameter=httpd[*],/var/lib/zabbix/bin/check_httpd --uri $1 --what $2

View File

@ -1,6 +0,0 @@
# net.icmp takes two args. The host to ping (either an IP or a host name), and one of
# * all: returns info in JSON format
# * latency: returns latency in seconds. Floating number
# * respond: returns 0 if no response where received, 1 otherwise
# * loss: returns % of packet loss. Floating number
UserParameter=net.icmp[*],/usr/bin/sudo /var/lib/zabbix/bin/check_icmp_sudo $1 --info=$2

View File

@ -5,5 +5,4 @@ UserParameter=vfs.lvm.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_lvm_s
# Type: Agent or Agent (active)
# Key: vfs.lvm.lv[volume,key] where volume is the path to a logical volume and
# key can be size, allocation (for snapshots only) or status
UserParameter=vfs.lvm.lv[*],/usr/bin/sudo /var/lib/zabbix/bin/check_lvm_sudo --lv='$1' --what='$2'
UserParameter=vfs.lvm.vg[*],/usr/bin/sudo /var/lib/zabbix/bin/check_lvm_sudo --vg='$1' --what='$2'
UserParameter=vfs.lvm.lv[*],/usr/bin/sudo /var/lib/zabbix/bin/check_lvm_sudo $1 $2

View File

@ -1,6 +0,0 @@
# Discover multipath devices
# $1 not used for now
UserParameter=vfs.mpath.discovery[*],sudo /var/lib/zabbix/bin/disco_mpath_sudo --$1
# Check multipath device
UserParameter=vfs.mpath.info[*],sudo /var/lib/zabbix/bin/check_mpath_sudo --mpath=$1

View File

@ -1,7 +0,0 @@
# Type: Agent or Agent (active)
# You can also get all the stats in a json object by passing 'all',
# or retreive the value of a specific key
# Run the script without --what to get a list of available keys
UserParameter=db.mysql[*],sudo /var/lib/zabbix/bin/check_mysql_sudo --host '$1' --port '$2' --user '$3' --password '$4' --defaults '$5' --what '$6'

View File

@ -1,5 +0,0 @@
# Discover if an nginx instance is running and has status handler running on http://localhost/nginx-status
UserParameter=nginx.discovery,/var/lib/zabbix/bin/disco_nginx
# Stats to get
UserParameter=nginx.status[*],/var/lib/zabbix/bin/check_nginx --uri $1 --what $2

View File

@ -7,7 +7,7 @@
# Units: %
# Multiplier: Do not use
# Store Value: As is
UserParameter=ups.load[*],upsc $1 ups.load 2>/dev/null
UserParameter=ups.load[*],upsc $1 ups.load
# Description: Nut UPS Battery Charge
# Type: Agent or Agent (active)
@ -16,23 +16,23 @@ UserParameter=ups.load[*],upsc $1 ups.load 2>/dev/null
# Units: %
# Multiplier: Do not use
# Store Value: As is
UserParameter=ups.battery.charge[*],upsc $1 battery.charge 2>/dev/null
UserParameter=ups.battery.charge[*],upsc $1 battery.charge
# Description: Nut UPS Status
# Type: Agent or Agent (active)
# Key: ups.status[UPS]
# Type of information: Character
# Show Value: As is (you can also define a dictionnary OL=>On Line etc...)
UserParameter=ups.status[*],upsc $1 ups.status 2>/dev/null
UserParameter=ups.status[*],upsc $1 ups.status
# Description: Nut UPS Model
# Type: Agent or Agent (active)
# Key: ups.model[UPS]
# Type of information: Text
UserParameter=ups.model[*],upsc $1 ups.model 2>/dev/null
UserParameter=ups.model[*],upsc $1 ups.model
# UPS discovery
UserParameter=hardware.ups.discovery[*],/var/lib/zabbix/bin/disco_nut_ups $1
# This is a new, more generic nut ups UserParameter
UserParameter=hardware.ups[*],upsc $1 $2 2>/dev/null
UserParameter=hardware.ups[*],upsc $1 $2

View File

@ -1,9 +0,0 @@
# Discover PMG items
# $1 can be domains
UserParameter=pmg.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_pmg_sudo --what $1
# Type: Agent or Agent (active)
# pmg.check.all[type,id]
# Where type is what to monitor (domain)
# id is the id of the item to monitor. Eg, the domain name
UserParameter=pmg.check.all[*],/usr/bin/sudo /var/lib/zabbix/bin/check_pmg_sudo --timespan=$1 --spamthres=$2 --$3 $4

View File

@ -1,10 +0,0 @@
# Discover PVE items
# $1 can be nodes, guests, storage or pools
UserParameter=pve.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_pve_sudo --what $1
# Type: Agent or Agent (active)
# pve.check.all[type,id]
# Where type is what to monitor (pool, storage, node, cluster, guest)
# id is the id of the item to monitor. For a guest, it's his numerical ID, for a storage
# a node or a pool, its name. For the cluster, there's no ID
UserParameter=pve.check.all[*],/usr/bin/sudo /var/lib/zabbix/bin/check_pve_sudo --cache=300 --$1 $2

View File

@ -1,13 +0,0 @@
# Description: SA Learn statistics
# Type: Agent or Agent (active)
# Key: mail.bayes
# Type of information: Text
# Units: N/A
# Custom multiplier: Do not use
# Store Value: As is
# This is a master item which must then be splited with preprocessing (Zabbix >= 3.4.0)
UserParameter=mail.bayes.all,/usr/bin/sudo /var/lib/zabbix/bin/check_sa_learn_sudo
# Or you can use individual items. Valid arg are ham, spam and token
UserParameter=mail.bayes[*],/usr/bin/sudo /var/lib/zabbix/bin/check_sa_learn_sudo --what=$1

View File

@ -1,7 +0,0 @@
UserParameter=samba_dc.discovery[*],sudo /var/lib/zabbix/bin/disco_samba_dc_sudo --what='$1'
# Create a text item with key samba_dc.info[300] and a check interval of 300
# Then use dependent item to get individual counters
UserParameter=samba_dc.info[*],sudo /var/lib/zabbix/bin/check_samba_dc_sudo --since='$1'
# Create a text item with key samba_dc.ou[{#SAMBA_OU}], then use dependant items with JSONPath to get individual info
UserParameter=samba_dc.ou[*],sudo /var/lib/zabbix/bin/check_samba_dc_sudo --ou='$1'

View File

@ -1,6 +1,6 @@
# Sensors discovery
# See /etc/zabbix/sensors.conf
UserParameter=hardware.sensor.discovery[*],/var/lib/zabbix/bin/disco_sensors --type=$1
UserParameter=hardware.sensor.discovery,/var/lib/zabbix/bin/disco_sensors
# Sensors
UserParameter=hardware.sensor[*],/usr/bin/sudo /var/lib/zabbix/bin/check_sensors_sudo $1 $2

View File

@ -5,7 +5,3 @@ UserParameter=hardware.disk.smart.discovery,/usr/bin/sudo /var/lib/zabbix/bin/di
# Takes two args: the drives to check, and the value to get
# eg: hardward.disk.smart[/dev/sda,Reallocated_Sector_Ct]
UserParameter=hardware.disk.smart[*],/usr/bin/sudo /var/lib/zabbix/bin/check_smart_sudo $1 $2
# New smart disk discovery/monitoring
UserParameter=stor.dev.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_stor_dev_sudo
UserParameter=stor.dev.info[*],/usr/bin/sudo /var/lib/zabbix/bin/check_stor_dev_sudo --dev "$1" --type "$2"

View File

@ -1,16 +1,5 @@
# Squid
# Discover if a squid instance is running and has status handler running on http://127.0.0.1:3128/squid-internal-mgr/info
UserParameter=squid.discovery[*],/var/lib/zabbix/bin/disco_squid
# Stats to get
UserParameter=squid.check[*],/var/lib/zabbix/bin/check_squid --what $1
################################################
## LEGACY SQUID ITEMS
################################################
# Description: Squid Request Hit Ratio
# Type: Agent or Agent (active)
# Key: squid.request_hit_ratio
@ -61,33 +50,3 @@ UserParameter=squid.cache_size_disk,squidclient mgr:info|grep 'Storage Swap size
UserParameter=squid.cache_size_mem,squidclient mgr:info|grep 'Storage Mem size:' | awk '{print $4}'
# Description: Squid FD limit
# Type: Agent or Agent (active)
# Key: squid.fd_max
# Type of information: Numeric (integer 64bits)
# Units: N/A
# Custom multiplier: Do not use
# Store Value: As is
UserParameter=squid.max_fd,squidclient mgr:info | grep 'Maximum number of file descriptors' | cut -d':' -f2 | tr -d ' \t'
# Description: Squid reserved FD
# Type: Agent or Agent (active)
# Key: squid.fd_reserved
# Type of information: Numeric (integer 64bits)
# Units: N/A
# Custom multiplier: Do not use
# Store Value: As is
UserParameter=squid.reserved_fd,squidclient mgr:info | grep 'Reserved number of file descriptors' | cut -d':' -f2 | tr -d ' \t'
# Description: Squid available FD
# Type: Agent or Agent (active)
# Key: squid.fd_available
# Type of information: Numeric (integer 64bits)
# Units: N/A
# Custom multiplier: Do not use
# Store Value: As is
UserParameter=squid.available_fd,squidclient mgr:info | grep 'Available number of file descriptors' | cut -d':' -f2 | tr -d ' \t'

View File

@ -1,2 +0,0 @@
UserParameter=unifi.discovery[*],/var/lib/zabbix/bin/disco_unifi --url=$1 --user=$2 --pass=$3 --site=$4 --what=$5 --type=$6
UserParameter=unifi.check.all[*],/var/lib/zabbix/bin/check_unifi --url=$1 --user=$2 --pass=$3 --site=$4 --$5 $6

View File

@ -1,12 +0,0 @@
# Discover VDO volumes
# $1 not used for now
UserParameter=vfs.vdo.discovery[*],/var/lib/zabbix/bin/disco_vdo_sudo --what=$1
# Type: Agent or Agent (active)
# Key: vfs.vdo.vol[volume,item] where volume is the name of the volume to monitor
# item can be one of the valid keys (run manually without --value arg to see available keys)
UserParameter=vfs.vdo.vol[*],sudo /var/lib/zabbix/bin/check_vdo_sudo --volume=$1 --value=$2
# Type: Agent or Agent (active)
# You can also get all the info about a vdo volume at once, in JSON
UserParameter=vfs.vdo.vol.all[*],sudo /var/lib/zabbix/bin/check_vdo_sudo --volume=$1

View File

@ -1,19 +0,0 @@
# Discover ZFS zpools
# $1 not used for now
UserParameter=vfs.zfs.discovery[*],/var/lib/zabbix/bin/disco_zfs --$1
# Type: Agent or Agent (active)
# You can also get all the info about a zpool at once, in JSON
UserParameter=vfs.zfs.zpool.info[*],/var/lib/zabbix/bin/check_zfs --zpool=$1
# Type: Agent or Agent (active)
# FS, Zvol or Snap info in JSON
UserParameter=vfs.zfs.dataset.info[*],/var/lib/zabbix/bin/check_zfs --dataset=$1
# Type: Agent or Agent (active)
# Sanoïd snapshot monitoring
UserParameter=vfs.zfs.sanoid.check[*],/var/lib/zabbix/bin/check_zfs --sanoid=$1
# Type: Agent or Agent (active)
# ARC stats
UserParameter=vfs.zfs.stats.all[*],/var/lib/zabbix/bin/check_zfs --stats=$1

View File

@ -1,11 +0,0 @@
# Discovery of configured host
# Key: zimbra.discovery[services] or zimbra.discovery[servers]
# Macro: {#ZM_SERVICE}
# Other available macros:
UserParameter=zimbra.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_zimbra_sudo --$1
# Item prototypes
# key: zimbra.check[*]
# or
# Returns a JSON object, use dependent item to split it
UserParameter=zimbra.status[*],/usr/bin/sudo /var/lib/zabbix/bin/check_zimbra_sudo --status=$1

View File

@ -1,27 +1,10 @@
#!/usr/bin/perl
use lib "/usr/share/BackupPC/lib";
use lib "/usr/share/backuppc/lib";
use lib "/usr/local/BackupPC/lib";
use BackupPC::Lib;
use BackupPC::CGI::Lib;
use POSIX;
use JSON;
use Getopt::Long;
use Statistics::Descriptive;
use Data::Dumper;
my $general = 0;
my $host = undef;
my $entity = undef;
my $pretty = 0;
GetOptions(
"general" => \$general,
"host=s" => \$host,
"entity=s" => \$entity,
"pretty" => \$pretty
);
# We need to switch to backuppc UID/GID
my $uid = getuid();
@ -30,157 +13,73 @@ my (undef,undef,$bkpuid,$bkpgid) = getpwnam('backuppc');
setuid($bkpuid) if ($uid ne $bkpuid);
setgid($bkpgid) if ($gid ne $bkpgid);
my $host = $ARGV[0];
my $what = $ARGV[1];
my $bpc = BackupPC::Lib->new();
my @backups = $bpc->BackupInfoRead($host);
my $mainConf = $bpc->ConfigDataRead();
my $json = {};
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $fullCnt = $incrCnt = 0;
my $fullAge = $incrAge = $lastAge = -1;
my $lastXferErrors = 0;
my $maxErrors = 0;
if ( $host ) {
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
$json = {
bkp => 0,
full_size => 0,
total_size => 0,
history_size => 0,
errors => 0,
new_size => 0,
new_size_avg => 0,
new_size_median => 0,
new_size_q1 => 0,
new_size_q3 => 0,
duration => 0,
comp_ratio => 0,
enabled => 0,
max_errors => 0,
age => 0,
type => 'none'
};
my $new_size_of_last_full = 0;
my @bpc_info = $bpc->BackupInfoRead($host);
my $sizes = new Statistics::Descriptive::Full;
if ( scalar( @bpc_info ) ){
foreach my $backup ( @bpc_info ) {
# Skip partial or active backups
next if ( $backup->{type} !~ m/^full|incr$/ );
if ( $backup->{type} eq "full" ) {
$json->{full_size} = $backup->{size};
$new_size_of_last_full = $backup->{sizeNew} if $backup->{num} > 0;
}
# Push all the sizes in our data set to compute avg sizes
# Exclude backup N°0 as it'll always have much more new data than normal backups
# Also exclude if size is not defined. This can happen in BackupPC v3 when
# the BackupPC_link process is waiting for the nightly to finish
$sizes->add_data($backup->{sizeNew}) if ( $backup->{num} > 0 && $backup->{sizeNew} );
$json->{bkp}++;
for ( my $i = 0 ; $i < @backups ; $i++ ) {
if ( $backups[$i]{type} eq "full" ) {
$fullCnt++;
if ( $fullAge < 0 || $backups[$i]{startTime} > $fullAge ) {
$fullAge = $backups[$i]{startTime};
$fullSize = $backups[$i]{size};
$fullDur = $backups[$i]{endTime} - $backups[$i]{startTime};
}
}
# Ignore the last backup if it's not full or incr (which means it's either partial or active)
my $i = ( $bpc_info[-1]->{type} =~ m/^full|incr$/ ) ? -1 : -2;
$json->{errors} = $bpc_info[$i]->{xferErrs};
$json->{duration} = $bpc_info[$i]->{endTime} - $bpc_info[$i]->{startTime};
$json->{type} = $bpc_info[$i]->{type};
$json->{new_size_avg} = int $sizes->mean;
$json->{new_size_median} = int $sizes->median;
# Some old versions of Statistics::Descriptive (eg, on el5) do not support quantile
$json->{new_size_q1} = eval { int $sizes->quantile(1) } || 0;
$json->{new_size_q3} = eval { int $sizes->quantile(3) } || 0;
$json->{enabled} = ( $conf->{BackupsDisable} > 0 ) ? 0 : 1;
$json->{total_size} = $sizes->sum + $json->{full_size} - $new_size_of_last_full;
$json->{history_size} = $json->{total_size} - $json->{full_size};
$json->{age} = time - $bpc_info[$i]->{startTime};
# For sizeNew, we need to wait for BackupPC_link to run, which can be delayed
# if a nightly process is running. In this case, use the stats from the previous backup
# Except when we ave a single backup, in which case we read stats of this only backup
$i = ( $bpc_info[-1]->{sizeNew} || scalar @bpc_info == 1 ) ? -1 : -2;
$json->{new_size} = $bpc_info[$i]->{sizeNew};
$json->{comp_ratio} = ( $bpc_info[$i]->{sizeNew} > 0 ) ?
sprintf( "%.2f", 100 - ( $bpc_info[$i]->{sizeNewComp} * 100 / $bpc_info[$i]->{sizeNew} ) )
:
0;
$json->{max_errors} = $conf->{MaxXferError} || 0;
}
} elsif ( $entity or $general) {
$json = {
perf => 0,
size => 0,
full_size => 0,
total_size => 0,
history_size => 0,
hosts => 0,
bkp => 0,
ratio => 0
};
my $entity_total_new = 0;
my $entity_total_comp = 0;
foreach my $host ( keys %{ $bpc->HostInfoRead } ) {
next unless ($host =~ m/^(vm_)?\Q$entity\E_.*/ or $general);
my $full_size;
$json->{hosts}++;
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $freq = ( $conf->{FullPeriod} > $conf->{IncrPeriod} ) ? $conf->{IncrPeriod} : $conf->{FullPeriod};
my $host_duration = 0;
my $host_bkp_num = 0;
my $host_new_size = 0;
my $host_full_size = 0;
my $host_new_size_of_last_full = 0;
foreach my $backup ( $bpc->BackupInfoRead( $host ) ) {
next if ( $backup->{type} !~ m/^full|incr$/ );
# Save the total size of the last full backup
if ( $backup->{type} eq 'full' ) {
$host_full_size = $backup->{size};
$host_new_size_of_last_full = $backup->{sizeNew} if $backup->{num} > 0;
}
$host_new_size += $backup->{sizeNew} if ( $backup->{num} > 0 && $backup->{sizeNew} );
$entity_total_new += $backup->{sizeNew};
$entity_total_comp += $backup->{sizeNewComp};
$host_duration += $backup->{endTime} - $backup->{startTime};
$host_bkp_num++;
$json->{bkp}++;
else {
$incrCnt++;
if ( $incrAge < 0 || $backups[$i]{startTime} > $incrAge ) {
$incrAge = $backups[$i]{startTime};
}
}
# Compute the average cost as the number of hours per day spent
# to backup this host
$json->{perf} += ( $host_bkp_num > 0 ) ? $host_duration / ( 3600 * $host_bkp_num * $freq ) : 0;
}
if ( $fullAge > $incrAge && $fullAge >= 0 ) {
$lastAge = $fullAge;
}
else {
$lastAge = $incrAge;
}
if ( $lastAge < 0 ) {
$lastAge = "";
}
else {
$lastAge = sprintf("%.1f", (time - $lastAge) / (24 * 3600));
}
$lastXferErrors = $backups[@backups-1]{xferErrs} if ( @backups );
$maxErrors = $conf->{MaxXferError} if (defined $conf->{MaxXferError});
# $json->{size} represents the total size used by this host.
# But we want to substract the new size of the last full, as for this one we
# do not count sizeNew but size
my $host_total_size = $host_new_size + $host_full_size - $host_new_size_of_last_full;
# This one is kept just for compatibility. New Zabbix template will use total_size
$json->{size} += $host_total_size;
$json->{total_size} += $host_total_size;
$json->{full_size} += $host_full_size;
$json->{history_size} += $host_total_size - $host_full_size;
}
$json->{ratio} = ( $entity_total_new > 0 ) ? 100 - ( $entity_total_comp * 100 / $entity_total_new ) : 0;
# Round some values
foreach my $key ( qw(ratio perf) ) {
$json->{$key} = sprintf( "%.2f", $json->{$key} );
}
} else {
if ($what eq 'errors'){
print $lastXferErrors;
}
elsif ($what eq 'max_errors'){
print $maxErrors;
}
elsif ($what eq 'age'){
print $lastAge;
}
elsif ($what eq 'size'){
print $fullSize;
}
elsif ($what eq 'duration'){
print $fullDur;
}
elsif ($what eq 'notify'){
print $conf->{EMailNotifyOldBackupDays};
}
else{
print<<"EOF";
Usage: $0 --host=<host> or --entity=<entity>
Usage: $0 <host> [errors|age|size|duration]
EOF
}
print to_json( $json, { pretty => $pretty } );
exit(0);

View File

@ -1,90 +0,0 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
use Date::Parse;
my $docker = which('docker');
my $json = {};
my $pretty = 0;
my ($global, $container, $network, $volume) = undef;
GetOptions(
'global' => \$global,
'container=s' => \$container,
'network=s' => \$network,
'volume=s' => \$volume,
'pretty' => \$pretty
);
# Sanitize args
if (defined $container and not $container =~ m/^[a-zA-Z0-9\-_]+/){
die "Invalid container ID $container\n";
} elsif (defined $network and not $network =~ m/^[a-zA-Z0-9\-_]+/){
die "Invalid network ID\n";
} elsif (defined $volume and not $volume =~ m/^[a-zA-Z0-9\-_]+/){
die "Invalid volume name\n";
}
# Default formating
my $format = '{{ json . }}';
my $cmd;
if ($global){
$json->{info} = from_json(qx($docker info --format '$format'));
} elsif (defined $container) {
$json->{inspect} = from_json(qx($docker container inspect $container --format '$format'));
$json->{stats} = from_json(qx($docker container stats $container --format '$format' --no-stream));
# Remove percent sign so Zabbix can get raw value
foreach my $stat (qw(MemPerc CPUPerc)){
$json->{stats}->{$stat} =~ s/%$//;
}
# Extract mem usage vs mem limit, net in vs net out and blk read vs blk write
($json->{stats}->{MemCurrent}, $json->{stats}->{MemLimit}) = split(/\s*\/\s*/, $json->{stats}->{MemUsage});
($json->{stats}->{NetIOIn}, $json->{stats}->{NetIOOut}) = split(/\s*\/\s*/, $json->{stats}->{NetIO});
($json->{stats}->{BlockIORead}, $json->{stats}->{BlockIOWrite}) = split(/\s*\/\s*/, $json->{stats}->{BlockIO});
# Convert into Bytes
foreach my $stat (qw(MemCurrent MemLimit NetIOIn NetIOOut BlockIORead BlockIOWrite)){
$json->{stats}->{$stat} = convert_unit($json->{stats}->{$stat});
}
# Compute a useful Uptime from the StartedAt value
if ($json->{inspect}->{State}->{Running}){
$json->{stats}->{Uptime} = int(time() - str2time($json->{inspect}->{State}->{StartedAt}));
} else {
$json->{stats}->{Uptime} = 0;
}
} elsif (defined $network){
$json->{inspect} = from_json(qx($docker network inspect $network --format '$format'));
} elsif (defined $volume){
$json->{inspect} = from_json(qx($docker volume inspect $volume --format '$format'));
}
print to_json($json, { pretty => $pretty }) . "\n";
sub convert_unit {
my $val = shift;
my $suffix_multiplier = {
ki => 1024,
Ki => 1024,
Mi => 1024 * 1024,
Gi => 1024 * 1024 * 1024,
Ti => 1024 * 1024 * 1024 * 1024,
Pi => 1024 * 1024 * 1024 * 1024 * 1024,
k => 1000,
K => 1000,
M => 1000 * 1000,
G => 1000 * 1000 * 1000,
T => 1000 * 1000 * 1000 * 1000,
P => 1000 * 1000 * 1000 * 1000 * 1000
};
if ($val =~ m/^(\d+(\.\d+)?)(ki|Ki|Mi|Gi|Ti|Pi|k|K|M|G|T|P)?B/){
$val = int($1 * $suffix_multiplier->{$3}) if (defined $3 and defined $suffix_multiplier->{$3});
# Remove the Bytes suffix if remaining
$val =~ s/B$//;
}
return $val;
}

View File

@ -1,46 +0,0 @@
#!/usr/bin/perl -w
use strict;
use File::Which;
use Getopt::Long;
my $what = 'cstate';
my $resource = undef;
my @supported = qw(cstate dstate role);
GetOptions(
"what=s" => \$what,
"resource=s" => \$resource
);
my $drbdadm = which('drbdadm');
unless($drbdadm){
die 'ZBX_NOTSUPPORTED';
}
sub usage(){
my $supp = join('|', @supported);
print <<"EOF";
usage: $0 --what=[$supp] --resource=<drbd resource name>
EOF
}
unless ((grep { $_ eq $what } @supported) && $resource){
usage();
exit 1;
}
open RES, '-|', $drbdadm, $what, $resource || die "Can't open pipe: $!";
my $out = join "", <RES>;
close RES || die "An error occured: $!\n";
chomp($out);
# We only want the state of the local node
if ($out =~ m{(.*)/.*}){
$out = $1;
}
print $out;
exit 0;

View File

@ -1,94 +0,0 @@
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Request::Common;
use URI;
use Data::Dumper;
my $user = undef;
my $pass = undef;
my $url = 'http://localhost:9200';
my $certcheck = 1;
my $cluster = 0;
my $node = undef;
my $index = undef;
my $pretty = 0;
my $json = {};
GetOptions (
'user:s' => \$user,
'password:s' => \$pass,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'cluster' => \$cluster,
'node=s' => \$node,
'index=s' => \$index,
'pretty' => \$pretty
);
# If no option is given, default to fetch the cluster status
if (not defined $cluster and not defined $node and not defined $index){
$cluster = 1;
}
my $uri = URI->new($url);
if (not defined $uri){
die "COuldn't parse $url as a valid url\n";
}
# If connecting over http or is host is localhost
# there's no need to check certificate
if ($uri->scheme eq 'http' or $uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my $resp;
my $sslopts = {};
if (not $certcheck){
$sslopts = {
verify_hostname => 0,
SSL_verify_mode => 0
}
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts
);
$ua->env_proxy;
if ($cluster){
$json = make_request('/_cluster/stats');
} elsif (defined $node){
my $resp = make_request('/_nodes/' . $node)->{'nodes'};
# We can specify node by ID, name or IP
if (defined $resp->{$node}){
$json = $resp->{$node};
} else {
my $node_id = (keys %{$resp})[0];
$json = $resp->{$node_id};
}
} elsif (defined $index){
$json = make_request('/_cluster/health/' . $index . '?level=indices')->{'indices'}->{$index};
}
print to_json($json, { pretty => $pretty });
sub make_request {
my $path = shift;
my $req_url = $url . $path;
my $req = GET $req_url;
if (defined $user and $user ne '' and defined $pass and $pass ne ''){
$req->authorization_basic($user, $pass);
}
my $resp = $ua->request($req);
die "Request to $req_url failed : " . $resp->message . "\n" if $resp->is_error;
return from_json($resp->decoded_content);
}

View File

@ -10,16 +10,12 @@ my $peer = undef;
my $bricks = undef;
my $gluster = which('gluster');
my $lock = '/var/lock/gluster-zabbix.lock';
unless($gluster){
# Gluster is not installed, exit with an error
die "gluster command not found";
}
# Get an exclusive lock
open(LOCK, ">$lock") || die "Can't open $lock";
flock(LOCK, 2);
GetOptions(
"what=s" => \$what,
@ -44,14 +40,15 @@ sub gluster($){
my $code = 256;
my @result = ();
# Loop to run gluster cmd as it can fail if two run at the same time
for (my $i = 0; ($code != 0 && $i < 3); $i++){
open (RES, "$cmd 2>/dev/null |")
for (my $i = 0; ($code != 0 && $i < 10); $i++){
open (RES, "$cmd |")
|| die "error: Could not execute $cmd";
@result = <RES>;
close RES;
$code = $?;
sleep(1) unless ($code == 0);
}
die "error: Could not execute $cmd" unless ($code == 0);
return @result;
}
@ -62,85 +59,46 @@ if (($what eq 'volume' && !$volume) ||
}
if ($what eq 'volume'){
my $bricksfound = 0;
my @volinfo = gluster("$gluster vol status $volume");
unless (scalar @volinfo){
die "Error occurred while trying to get volume status for $volume";
}
my $bricksfound = 0;
my $status = 'OK';
foreach my $line (@volinfo){
# Check that all bricks are online
if ($line =~ m/^Brick\ ([\w\.]+:\/[\w\.\/]+)\s+\d+(\s+\d+)?\s+([A-Z])/){
if ($line =~ m/^Brick\ ([\w\.]+:\/[\w\.\/]+)\s+\d+\s+([A-Z])/){
$bricksfound++;
if ($3 ne 'Y') {
print "CRITICAL: brick status (reported $3 on $1)";
exit 1;
}
$status = "CRITICAL: brick status (reported $2 on $1)" if ($2 ne 'Y');
}
# Check the Self-Heal daemons are up and running
elsif ($line =~ m/^Self-heal\ Daemon\ on\ ([\w\.]+)\s+N\/A\\s+([A-Z])/ && $2 ne 'Y'){
print "CRITICAL: self-heal daemon (reported $2 on $1)";
exit 1;
elsif ($line =~ m/^Self-heal\ Daemon\ on\ ([\w\.]+)\s+N\/A\\s+([A-Z])/){
$status = "CRITICAL: self-heal daemon (reported $2 on $1)" if ($2 ne 'Y');
}
}
# Check the number of bricks is the one we expect
if ($bricks && $bricks != $bricksfound){
print "CRITICAL: bricks count mismatch (found $bricksfound while expecting $bricks)";
exit 1;
}
@volinfo = gluster("$gluster vol heal $volume info");
unless (scalar @volinfo){
die "Error occurred while trying to get volume heal info for $volume";
}
foreach my $line (@volinfo){
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/ && $1 gt 0){
# Lets check a second time to limit false positives
sleep 1;
@volinfo = gluster("$gluster vol heal $volume info");
unless (scalar @volinfo){
die "Error occurred while trying to get volume heal info for $volume";
}
foreach my $line (@volinfo){
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/ && $1 gt 0){
print "CRITICAL: self-heal in progress ($1)";
exit 1;
}
}
}
$status = "CRITICAL: bricks count mismatch (found $bricksfound while expecting $bricks)";
}
@volinfo = gluster("$gluster vol heal $volume info heal-failed");
# the heal-failed command isn't supported on all version of GlusterFS
if (scalar @volinfo){
foreach my $line (@volinfo){
# Now, check we don't have any file which the Self-Heal daemon couldn't sync
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/ && $1 gt 0){
print "CRITICAL: self-heal error ($1)";
exit 1;
}
foreach my $line (@volinfo){
# Now, check we don't have any file which the Self-Heal daemon couldn't sync
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/){
$status = "CRITICAL: self-heal error ($1)" if ($1 gt 0);
}
}
@volinfo = gluster("$gluster vol heal $volume info split-brain");
unless (scalar @volinfo){
die "Error occurred while trying to get split-brain info for $volume";
}
foreach my $line (@volinfo){
# Now, check we don't have any file in a split-brain situation
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/ && $1 gt 0){
print "CRITICAL: split-bran ($1)";
exit 1;
if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/){
$status = "CRITICAL: split-bran ($1)" if ($1 gt 0);
}
}
@volinfo = gluster("$gluster vol info $volume");
unless (scalar @volinfo){
die "Error occurred while trying to get volume info for $volume";
}
foreach my $line (@volinfo){
# Check the volume is started
if ($line =~ m/^Status:\s+(\w+)$/ && $1 ne 'Started'){
print 'CRITICAL: The volume is not started';
exit 1;
if ($line =~ m/^Status:\s+(\w+)$/){
$status = 'CRITICAL: The volume is not started' unless ($1 eq 'Started');
}
}
print 'OK';
print $status;
}
elsif ($what eq 'peer'){
my @peers = gluster("$gluster pool list");
@ -154,5 +112,3 @@ elsif ($what eq 'peer'){
print $status;
}
close(LOCK);
exit(0);

View File

@ -1,55 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use Getopt::Long;
use JSON;
my $uri = 'http://127.0.0.1/server-status';
my $what = 'all';
my $help = 0;
GetOptions(
"uri=s" => \$uri,
"what=s" => \$what,
"help" => \$help
);
my %res = ();
my $status = get($uri . '?auto');
unless ($status){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
foreach my $line (split(/\n/, $status)){
next unless ($line =~ m/^(\w+(\s\w+)?):\s([\.\d]+)/);
my ($key, $val) = ($1,$3);
$key =~ s/\s/_/g;
$key = lc $key;
# Remove leading and trailing spaces
$val =~ s/^\s+|\s+$//g;
# Add 0 before the . when needed
$val =~ s/^(\.\d+)$/0$1/;
$res{$key} = $val;
}
if ($help){
print "Valid keys are:\n\n";
print "$_\n" for keys %res;
exit 0;
}
if ($what eq 'all'){
print to_json(\%res);
}
elsif (defined $res{$what}){
print $res{$what};
}
else{
print 'ZBX_NOTSUPPORTED';
}
exit 0;

View File

@ -1,45 +0,0 @@
#!/usr/bin/perl -w
use warnings;
use strict;
use File::Which;
use Getopt::Long;
use JSON;
my $fping = which('fping');
unless ($fping){
die "ZBX_NOTSUPPOTED\n";
}
my $info = 'all';
my $pretty = 0;
my @valid_info = qw(all respond latency loss);
my $host = $ARGV[0];
GetOptions(
'info=s' => \$info,
'pretty' => \$pretty
);
unless (grep { $info eq $_ } @valid_info){
die "Usage: $0 [--info=<respond|latency|loss>] host\n";
}
my $ping = qx($fping -c 5 -p 10 -q $host 2>&1);
# Output looks like 10.29.254.2 : xmt/rcv/%loss = 5/5/0%, min/avg/max = 1.42/1.65/1.90
if ($ping =~ m|^$host : xmt/rcv/%loss = 5/(\d)/(\d+(?:\.\d+)?)%(?:, min/avg/max = (?:\d+(?:\.\d+)?)/(\d+(\.\d+))/(?:\d+(?:\.\d+)?))?$|){
my $stat = {
respond => ($1 > 0) ? 1 : 0,
loss => $2 + 0,
latency => (defined $3) ? $3 / 1000 : 0
};
if ($info ne 'all'){
print $stat->{$info} . "\n";
} else {
print to_json($stat, { pretty => $pretty }) . "\n";
}
} else {
die "ZBX_NOTSUPPOTED\n";
}
exit 0;

View File

@ -1,76 +1,49 @@
#!/usr/bin/perl -w
use Zabbix::Agent::Addons::LVM;
use Getopt::Long;
use JSON;
use Linux::LVM;
Zabbix::Agent::Addons::LVM->units(B);
Linux::LVM->units(B);
my $vg = undef;
my $lv = undef;
my $what = undef;
my $pretty = 0;
GetOptions(
'vg=s' => \$vg,
'lv=s' => \$lv,
'what:s' => \$what,
"pretty" => \$pretty
);
if (not defined $lv and not defined $vg){
$lv ||= $ARGV[0];
$what ||= $ARGV[1];
if (@ARGV < 2){
usage();
exit(1);
}
if (not defined $lv and not defined $vg){
usage();
exit 1;
}
my $vol = $ARGV[0];
my $what = $ARGV[1];
sub usage {
print<<"EOF";
Usage: $0 <logical volume> [size|allocation|allocation_pool_data|allocation_metadata|status]
$0 --lv=<logical volume>
$0 --lv=<logical volume> --what=<size|allocation|allocation_pool_data|allocation_metadata|status|etc.>
$0 --vg=<volume group>
$0 --vg=<volume group> --what=<alloc_pe_size|vg_size|etc.>
EOF
}
my $json;
if (defined $vg){
%{$json} = get_volume_group_information($vg);
# Depending on LVM version, alloc_ct might not be present
if (not defined $json->{alloc_ct}){
$json->{alloc_ct} = sprintf("%.1f", 100 * $json->{alloc_pe_size} / $json->{vg_size});
}
} elsif (defined $lv) {
%{$json} = get_lv_info($lv);
} else{
usage();
}
my %info = get_lv_info($vol);
# Normalize float values
foreach (qw(allocated_to_snapshot allocated_pool_data allocated_meta_data)){
$json->{$_} =~ s/,/./g if (defined $json->{$_});
if ($what eq 'size'){
print $info{size};
}
# Compat with older versions
my $old_keys = {
allocation => 'allocated_to_snapshot',
allocation_pool_data => 'allocated_pool_data',
allocation_metadata => 'allocated_meta_data'
};
if (defined $what && defined $old_keys->{$what}){
$what = $old_keys->{$what};
elsif ($what eq 'allocation'){
my $ret = (defined $info{allocated_to_snapshot}) ? $info{allocated_to_snapshot} : "ZBX_NOTSUPPORTED";
$ret =~ s/,/\./;
print $ret;
}
if (defined $what and $what ne ''){
print ((defined $json->{$what}) ? $json->{$what} : 'ZBX_NOTSUPPOTED');
} else {
print to_json($json, { pretty => $pretty });
elsif ($what eq 'allocation_pool_data'){
my $ret = (defined $info{allocated_pool_data}) ? $info{allocated_pool_data} : "ZBX_NOTSUPPORTED";
$ret =~ s/,/\./;
print $ret;
}
elsif ($what eq 'allocation_metadata'){
my $ret = (defined $info{allocated_meta_data}) ? $info{allocated_meta_data} : "ZBX_NOTSUPPORTED";
$ret =~ s/,/\./;
print $ret;
}
elsif ($what eq 'status'){
print $info{status};
}
else{
usage();
}
exit(0);

View File

@ -1,125 +0,0 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $json = {};
my $mpath = undef;
my $help = 0;
my $pretty = 0;
GetOptions(
"mpath=s" => \$mpath,
"help" => \$help,
"pretty" => \$pretty
);
my $multipathd = which('multipathd');
if (not defined $multipathd){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
if ($help or not defined $mpath){
print <<_EOF;
Usage : $0 --mpath=<name of the mpath device> [--pretty]
* --mpath : the name of the device to check
* --pretty : output pretty JSON, easier to read for humans
* --help : display this help
_EOF
exit 2;
}
$json = {
mpath => $mpath,
size => 0,
dm_st => 'unknown',
features => '',
failures => 0,
path_failures => 0,
paths_num_total => 0,
paths_num_ko => 0,
paths_num_active => 0,
paths_num_inactive => 0,
paths_details => [],
paths_with_issue => [],
errors => []
};
my @res = qx($multipathd show maps raw format "%n|%N|%S|%f|%t|%x|%0");
if ($? != 0){
push @{$json->{errors}}, "Failed to run multipathd show maps raw format";
}
foreach (@res){
chomp;
next if $_ !~ /^$mpath\|/;
(undef, $json->{paths_num_total}, $json->{size}, $json->{features},
$json->{dm_st}, $json->{failures}, $json->{path_failures}) = split(/\s*\|\s*/, $_);
# Cast to int
foreach (qw(failures path_failures paths_num_total)){
$json->{$_} = 0 + $json->{$_};
}
# Convert size to bytes
my $unit = chop $json->{size};
if ($unit eq 'K'){
$json->{size} *= 1024;
} elsif ($unit eq 'M'){
$json->{size} *= 1024 * 1024;
} elsif ($unit eq 'G'){
$json->{size} *= 1024 * 1024 * 1024;
} elsif ($unit eq 'T'){
$json->{size} *= 1024 * 1024 * 1024 * 1024;
} elsif ($unit eq 'P'){
$json->{size} *= 1024 * 1024 * 1024 * 1024 * 1024;
}
# No need to process the other mpath here
last;
}
# Now check status of every path
@res = qx($multipathd show paths format "%m|%d|%t|%o|%T|%0|%z");
if ($? != 0){
push @{$json->{errors}}, "Failed to run multipathd show paths format";
}
# Skip header line
shift @res;
foreach (@res){
chomp;
next if $_ !~ /^$mpath\|/;
my (undef, $dev, $dm_st, $dev_st, $chk_st, $failures, $serial) = split(/\s*\|\s*/, $_);
push @{$json->{paths_details}}, {
dev => $dev,
dm_st => $dm_st,
dev_st => $dev_st,
chk_st => $chk_st,
failures => $failures + 0,
serial => $serial
};
if ($dm_st eq 'active'){
$json->{paths_num_active} += 1;
if ($dev_st ne 'running'){
$json->{paths_num_ko} += 1;
push @{$json->{paths_with_issue}}, $dev;
push @{$json->{errors}}, "dev $dev is not running";
} elsif ($chk_st ne 'ready' or $failures > 0){
$json->{paths_num_ko} += 1;
push @{$json->{paths_with_issue}}, $dev;
push @{$json->{errors}}, "dev $dev is not active";
} else {
$json->{paths_num_ok} += 1;
}
} else {
$json->{paths_num_inactive} += 1;
}
}
# We want easy usage from zabbix, so turn thos ones to strings
$json->{paths_with_issue} = join(',', @{$json->{paths_with_issue}});
$json->{errors} = join(',', @{$json->{errors}});
print to_json($json, { pretty => $pretty });
exit 0;

View File

@ -1,109 +0,0 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Data::Dumper;
use Getopt::Long;
my $what = 'all';
my $defaults = undef;
my $host = undef;
my $port = undef;
my $user = undef;
my $password = undef;
my $help = 0;
my $pretty = 0;
my $exit = 0;
my $json = {
zbx_error => "none"
};
GetOptions(
"what=s" => \$what,
"help" => \$help,
"defaults=s" => \$defaults,
"host=s" => \$host,
"port=s" => \$port,
"user=s" => \$user,
"password=s" => \$password,
"pretty" => \$pretty
);
# Basic input checks
if (defined $defaults and $defaults ne '' and not -e $defaults){
$json->{zbx_error} = "File $defaults doesn't exist";
$exit = 1;
} elsif (defined $host and $host ne '' and $host !~ m/^[\w\-\.]+$/){
$json->{zbx_error} = "Bad value for --host";
$exit = 1;
} elsif (defined $port and $port ne '' and ($port !~ m/^\d+$/ or $port lt 1 or $port gt 65535)){
$json->{zbx_error} = "Bad value for --port";
$exit = 1;
} elsif (defined $user and $user ne '' and $user !~ m/^[\w\-\.]+$/){
$json->{zbx_error} = "Bad value for --user";
$exit = 1;
} elsif (defined $password and $password ne '') {
# Just escape quotes as will protect the password with
$password =~ s/'/\\'/g;
}
if ($help){
print <<_EOF;
Usage: $0 [--what=key] [--help] [--pretty]
* --what : if a key is given (eg --what=Bytes_received) will print only this value.
Else, all the stats are printed in a json format.
Run once without --what to get a list of available keys
* --help : print this help and exit
* --defaults : set the file from which mysql will read defaults
* --host : set the hostname to connect to
* --user : set the user to connect as
* --password : set the password to use
* --pretty : prints JSON in a pretty, human readable format. Has no use when --what is also given
_EOF
exit 0;
}
if ($exit eq 0){
my $opt = "";
$opt .= " --defaults-file=$defaults" if (defined $defaults and $defaults ne '');
$opt .= " --host=$host" if (defined $host and $host ne '');
$opt .= " --user=$user" if (defined $user and $user ne '');
$opt .= " --password='$password'" if (defined $password and $password ne '');
my @status = qx(mysql $opt --batch --execute 'show global status;' 2>&1);
if ($? != 0){
$exit = $?;
$json->{zbx_error} = join '', @status;
} else {
foreach (@status){
chomp;
my ($key, $val) = split(/\t/, $_);
$json->{$key} = $val;
}
# Some older MySQL do not have all the variables we might want
if (not defined $json->{Acl_users}){
$json->{Acl_users} = qx(mysql $opt --batch --skip-column-names --execute 'select count(user) from user;' mysql);
chomp $json->{Acl_users};
}
if (not defined $json->{Max_statement_time_exceeded} and defined $json->{Max_execution_time_exceeded}){
$json->{Max_statement_time_exceeded} = $json->{Max_execution_time_exceeded}
}
}
}
if ($what ne 'all' and defined $json->{$what}){
print $json->{$what} . "\n";
} else {
print to_json($json, { pretty => $pretty });
}
exit $exit;

View File

@ -1,56 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use Getopt::Long;
use JSON;
my $uri = 'http://127.0.0.1/nginx-status';
my $what = 'all';
my $help = 0;
my $pretty = 0;
GetOptions(
"uri=s" => \$uri,
"what=s" => \$what,
"help" => \$help,
"pretty" => \$pretty
);
my $res = {};
my $status = get($uri);
unless ($status){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
foreach my $line (split(/\n/, $status)){
if ($line =~ m/^Active connections: (\d+)/){
$res->{active_connections} = $1;
} elsif ($line =~ m/\s*(\d+)\s+\d+\s+(\d+)/){
$res->{total_connections} = $1;
$res->{total_requests} = $2;
} elsif ($line =~ m/Waiting: (\d+)/){
$res->{keep_alive} = $1;
}
}
if ($help){
print "Valid keys are:\n\n";
print "$_\n" for keys %{$res};
exit 0;
}
if ($what eq 'all'){
print to_json($res, { pretty => $pretty });
}
elsif (defined $res->{$what}){
print $res->{$what};
}
else{
print 'ZBX_NOTSUPPORTED';
}
exit 0;

View File

@ -1,104 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use Data::Dumper;
use PMG::DBTools;
my $json = {
count_in => 0,
count_out => 0,
bytes_in => 0,
bytes_out => 0,
spam_in => 0,
spam_out => 0,
virus_in => 0,
virus_out => 0,
ptime_in => 0,
ptime_out => 0,
queue_hold => 0,
queue_active => 0,
queue_deferred => 0
};
my $pretty = 0;
my ($domain,$what) = undef;
my $timespan = 900;
my $spamthres = 5;
my $resp = undef;
GetOptions(
'domain=s' => \$domain,
'what=s' => \$what,
'timespan=i' => \$timespan,
'spamthres=i' => \$spamthres,
'pretty' => \$pretty
);
my $dbh = PMG::DBTools::open_ruledb;
my $since = time - $timespan;
my $query = "SELECT cstatistic.direction AS direction, cstatistic.bytes AS bytes, cstatistic.spamlevel AS spamlevel, " .
"cstatistic.virusinfo AS virus, cstatistic.ptime AS ptime, cstatistic.sender AS sender, creceivers.receiver " .
"AS receiver FROM cstatistic LEFT JOIN creceivers ON cstatistic.rid = creceivers.cstatistic_rid" .
" WHERE time > ?";
my $sth = $dbh->prepare($query);
$sth->execute($since);
while (my $res = $sth->fetchrow_hashref){
if (not $res->{direction}){
next if (defined $domain and $res->{sender} !~ m/.*\@$domain$/);
$json->{bytes_out} += $res->{bytes};
$json->{count_out} += 1;
$json->{ptime_out} += $res->{ptime};
$json->{spam_out} += 1 if ($res->{spamlevel} ge $spamthres);
$json->{virus_out} += 1 if (defined $res->{virus});
} else {
next if (defined $domain and $res->{receiver} !~ /.*\@$domain$/);
$json->{bytes_in} += $res->{bytes};
$json->{count_in} += 1;
$json->{ptime_in} += $res->{ptime};
$json->{spam_in} += 1 if ($res->{spamlevel} ge $spamthres);
$json->{virus_in} += 1 if (defined $res->{virus});
}
}
# Init to 0 if missing
$json->{$_} //= 0 foreach (qw/bytes_out count_out ptime_out spam_out virus_out
bytes_in count_in ptime_in spam_in virus_in/);
# Compute averages
$json->{ptime_in} = $json->{ptime_in} / $json->{count_in} / 1000 if ($json->{count_in} > 0);
$json->{ptime_out} = $json->{ptime_out} / $json->{count_out} / 1000 if ($json->{count_out} > 0);
# Now, only for general stats, count early rejects, and queue stats
if (not defined $domain){
$query = "SELECT SUM(rblcount) AS rbl, SUM(pregreetcount) AS pregreet FROM localstat WHERE mtime > ?";
$sth = $dbh->prepare($query);
$sth->execute($since);
my $res = $sth->fetchrow_hashref;
$json->{$_} = $res->{$_} foreach (qw/rbl pregreet/);
# Here we count email in the queue (active, deferred and hold queues)
foreach my $res (qx(postqueue -j)){
$res = from_json($res);
foreach (qw/hold active deferred/){
$json->{'queue_' . $_} += 1 if ($res->{queue_name} eq $_);
}
}
}
$json->{$_} //= 0 foreach (qw/rbl pregreet/);
if (defined $what and not defined $json->{$what}){
print 'ZBX_NOTSUPPORTED';
exit 0;
} elsif (defined $what){
$resp = $json->{$what}
} else {
$resp = $json;
}
$resp = (ref $resp eq 'HASH' or ref $resp eq 'ARRAY') ? to_json($resp, { pretty => $pretty }) : $resp;
print $resp . "\n";

View File

@ -1,182 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
use File::Path qw(make_path);
use File::Basename;
use Data::Dumper;
my $pvesh = which('pvesh');
my $json = {};
my $pretty = 0;
my ($cluster,$guest,$node,$storage,$pool) = undef;
# Max age of cached values
my $cache = 60;
my $cache_dir = '/tmp/zbx_pve_cache/';
GetOptions(
'cluster' => \$cluster,
'guest=i' => \$guest,
'node=s' => \$node,
'storage=s' => \$storage,
'pool=s' => \$pool,
'pretty' => \$pretty,
'cache=i' => \$cache,
'cache-dir=s' => \$cache_dir
);
# Old versions do not support, nore need --output-format=json
my $pvesh_opt = (system("$pvesh ls / --output-format=json 2>&1 > /dev/null") == 0) ? '--output-format=json' : '';
if ($cluster){
my $cluster = get_api_data('/cluster/status');
# Set default values so monitoring works for single node, without cluster setup
$json->{status} = {
all_online => 1,
quorate => 1,
nodes => 1,
name => 'default',
version => 1
};
# Set default global stats
$json->{network} = {
in => 0,
out => 0
};
$json->{disk} = {
read => 0,
write => 0
};
$json->{guests} = {
qemu => 0,
qemu_running => 0,
lxc => 0,
lxc_running => 0
};
my @nodes = ();
foreach my $item (@{$cluster}){
if ($item->{type} eq 'cluster'){
$json->{status}->{$_} = $item->{$_} foreach (qw(quorate nodes name version));
} elsif ($item->{type} eq 'node' and $item->{online}){
push @nodes, $item->{name};
} elsif ($item->{type} eq 'node'){
$json->{status}->{all_online} = 0;
}
}
foreach my $node (@nodes){
my $n = get_api_data("/nodes/$node/status");
# Here we gather (and sum) some info about individual nodes to get the total number of
# CPU, the amount of memory etc...
$json->{memory}->{$_} += $n->{memory}->{$_} foreach (qw(free total used));
$json->{ksm}->{$_} += $n->{ksm}->{$_} foreach (qw(shared));
$json->{cpuinfo}->{$_} += $n->{cpuinfo}->{$_} foreach (qw(cpus sockets));
$json->{loadavg}[$_] += $n->{loadavg}[$_] foreach (0..2);
}
# We want average load avg of the cluster, not the sum of individual loads
$json->{loadavg}[$_] = sprintf "%.2f", $json->{loadavg}[$_] / $json->{status}->{nodes} foreach (0..2);
my $guests = get_api_data('/cluster/resources', '--type=vm');
foreach my $guest (@{$guests}){
$json->{network}->{in} += $guest->{netin} || 0;
$json->{network}->{out} += $guest->{netout} || 0;
$json->{disk}->{read} += $guest->{diskread} || 0;
$json->{disk}->{write} += $guest->{diskwrite} || 0;
my $type = $guest->{type};
$json->{guests}->{$type}++;
$json->{guests}->{$type . '_running'}++ if ($guest->{status} eq 'running');
}
} elsif ($node){
$json->{guests} = {
qemu => 0,
qemu_running => 0,
lxc => 0,
lxc_running => 0
};
foreach my $item (qw(status version subscription)){
$json->{$item} = get_api_data("/nodes/$node/$item");
}
my $guests = get_api_data('/cluster/resources', '--type=vm');
foreach my $guest (@{$guests}){
next if ($guest->{node} ne $node);
my $type = $guest->{type};
$json->{guests}->{$type}++;
$json->{guests}->{$type . '_running'}++ if ($guest->{status} eq 'running');
}
} elsif ($guest){
my $guests = get_api_data('/cluster/resources', '--type=vm');
foreach my $g (@{$guests}){
if ($g->{vmid} eq $guest){
$json = $g;
last;
}
}
} elsif ($pool){
my $pool = get_api_data("/pools/$pool");
$json->{comment} = $pool->{comment};
foreach my $type (qw(qemu lxc)){
$json->{$_}->{$type} = 0 foreach (qw(guests templates));
}
foreach my $item (@{$pool->{members}}){
if ($item->{type} =~ m/^(qemu|lxc)$/ and !$item->{template}){
$json->{guests}->{$_} += $item->{$_} foreach (qw(maxcpu diskread diskwrite maxdisk mem maxmem netin netout));
$json->{guests}->{used_cpu} += $item->{cpu} * $item->{maxcpu};
$json->{guests}->{$item->{type}}++;
}
if ($item->{type} =~ m/^(qemu|lxc)$/ and $item->{template}){
$json->{templates}->{$_} += $item->{$_} foreach (qw(maxdisk));
$json->{templates}->{$item->{type}}++;
}
}
$json->{guests}->{$_} //= 0 foreach (qw(cpu maxcpu diskread diskwrite maxdisk mem maxmem netin netout));
$json->{templates}->{$_} //= 0 foreach (qw(maxdisk));
$json->{guests}->{cpu} = ($json->{guests}->{maxcpu} == 0) ? 0 : $json->{guests}->{used_cpu} / $json->{guests}->{maxcpu};
} elsif ($storage){
my $stores = get_api_data('/cluster/resources', '--type=storage');
foreach my $s (@{$stores}){
if ($s->{storage} eq $storage){
$json->{maxdisk} = $s->{maxdisk};
$json->{disk} = $s->{disk};
last;
}
}
} else{
print 'ZBX_NOTSUPPORTED';
exit 0;
}
print to_json($json, { pretty => $pretty }) . "\n";
# Helper which will either get data from
# the cache if its fresh enough, or query the API
# and save the result in the cache for later
sub get_api_data {
my ($path, $query_opt) = @_;
$query_opt ||= '';
my $opt_filename = $query_opt;
$opt_filename =~ s/[\-=]/_/g;
my $res;
# Is the cache existing and fresh enough ?
if (-f $cache_dir . $path . $opt_filename and int((-M $cache_dir . $path . $opt_filename)*60*60*24) < $cache){
{
local $/; # Enable slurp
open my $fh, "<", $cache_dir . $path . $opt_filename;
$res = <$fh>;
close $fh;
}
} else {
$res = qx($pvesh get $path $query_opt $pvesh_opt 2>/dev/null);
# Save the result in the cache for later retrival
eval{
my $dir = (fileparse($path))[1];
make_path($cache_dir . $dir, { chmod => 700 });
};
open my $fh, ">", $cache_dir . $path . $opt_filename;
print $fh $res;
close $fh;
}
return from_json($res);
}

View File

@ -1,11 +1,10 @@
#!/usr/bin/perl -w
use strict;
use File::Which;
use Getopt::Long;
my $slot = '';
my $cli = which('hpacucli') || which('ssacli');
my $hpacucli = '/usr/sbin/hpacucli';
my @validchecks = qw/controller array logicaldrive physicaldrive/;
my $check = join ',', @validchecks;
@ -15,67 +14,67 @@ GetOptions ('slot=s' => \$slot,
);
sub usage(){
print <<"EOF";
print <<"EOF";
$0 --slot=<slot number> --check=<what to check>
* slot must be a number. You can find on which slot you have controllers with the command:
$cli controller all show status
$hpacucli controller all show status
* check is a comma separated list of item to check. Default values (without --check option) will check everything
Valid values are:
EOF
print "$_\n" foreach (@validchecks);
exit(0);
print "$_\n" foreach (@validchecks);
exit(0);
}
if ($slot !~ /^\d+$/){
usage();
usage();
}
unless (-x $cli){
die "Cannot run $cli\n";
unless (-x $hpacucli){
die "Cannot run $hpacucli\n";
}
my @checks = split /\s?,\s?/, $check;
foreach my $check (@checks){
usage() unless (grep { $_ eq $check} @validchecks);
usage() unless (grep { $_ eq $check} @validchecks);
}
foreach my $param (@checks){
# Global controller checks
if ($param eq 'controller'){
open CLI, "$cli controller slot=$slot show status|" ||
die "An error occured while running $cli: $!";
foreach my $line (<CLI>){
if ( $line =~ /Status\:\s*([\w\s]+)$/ ) {
my $res = $1;
chomp($res);
if ($res ne 'OK'){
print "CRITICAL: $line\n";
exit(0);
# Global controller checks
if ($param eq 'controller'){
open HPACUCLI, "$hpacucli controller slot=$slot show status|" ||
die "An error occured while running $hpacucli: $!";
foreach my $line (<HPACUCLI>){
if ( $line =~ /Status\:\s*([\w\s]+)$/ ) {
my $res = $1;
chomp($res);
if ($res ne 'OK'){
print "CRITICAL: $line\n";
exit(0);
}
}
}
}
close HPACUCLI;
}
close CLI;
}
else{
open CLI, "$cli controller slot=$slot $param all show status|" ||
die "An error occured while running $cli: $!";
foreach my $line (<CLI>){
if ( $line =~ /^\s*$param.*:\s*(\w+[\w\s]*)$/i ) {
my $res = $1;
chomp($res);
if ($res ne 'OK'){
print "CRITICAL: $line\n";
exit(0);
else{
open HPACUCLI, "$hpacucli controller slot=$slot $param all show status|" ||
die "An error occured while running $hpacucli: $!";
foreach my $line (<HPACUCLI>){
if ( $line =~ /^\s*$param.*:\s*(\w+[\w\s]*)$/i ) {
my $res = $1;
chomp($res);
if ($res ne 'OK'){
print "CRITICAL: $line\n";
exit(0);
}
}
}
}
close HPACUCLI;
}
close CLI;
}
}
print 'OK';

View File

@ -27,19 +27,20 @@ use Getopt::Long;
#
# unused devices: <none>
my $file = "/proc/mdstat";
my $file = "/proc/mdstat";
my $device = "all";
# Get command line options.
GetOptions (
'file=s' => \$file,
'device=s' => \$device,
'help' => sub { &usage() }
);
GetOptions ('file=s' => \$file,
'device=s' => \$device,
'help' => sub { &usage() } );
## Strip leading "/dev/" from --device in case it has been given
$device =~ s/^\/dev\///;
## Return codes for Nagios
my %ERRORS=('OK'=>0,'WARNING'=>1,'CRITICAL'=>2,'UNKNOWN'=>3,'DEPENDENT'=>4);
## This is a global return value - set to the worst result we get overall
my $retval = 0;
@ -49,86 +50,78 @@ my $result = 'OK';
open FILE, "< $file" or die "Can't open $file : $!";
while (<FILE>) {
next if ! /^(md\d+)+\s*:/;
next if $device ne "all" and $device ne $1;
my $dev = $1;
push @raids, $dev;
next if ! /^(md\d+)+\s*:/;
next if $device ne "all" and $device ne $1;
my $dev = $1;
push @raids, $dev;
my @array = split(/ /);
$devs_total{$dev} = 0;
my $devs_up = 0;
my $missing = 0;
for $_ (@array) {
$level{$dev} = $1 if /^(raid\d+)$/;
next if ! /(\w+)\[\d+\](\(.\))*/;
$devs_total{$dev}++;
if ($2 eq "(F)") {
$failed_devs{$dev} .= "$1,";
}
elsif ($2 eq "(S)") {
$spare_devs{$dev} .= "$1,";
$devs_up++;
}
else {
$active_devs{$dev} .= "$1,";
$devs_up++;
}
}
if (! defined($active_devs{$dev})){
$active_devs{$dev} = "none";
}
else {
$active_devs{$dev} =~ s/,$//;
}
if (! defined($spare_devs{$dev})){
$spare_devs{$dev} = "none";
}
else {
$spare_devs{$dev} =~ s/,$//;
}
if (! defined($failed_devs{$dev})){
$failed_devs{$dev} = "none";
}
else {
$failed_devs{$dev} =~ s/,$//;
}
my @array = split(/ /);
$devs_total{$dev} = 0;
my $devs_up = 0;
my $missing = 0;
for $_ (@array) {
$level{$dev} = $1 if /^(raid\d+)$/;
next if ! /(\w+)\[\d+\](\(.\))*/;
$devs_total{$dev}++;
if ($2 eq "(F)") {
$failed_devs{$dev} .= "$1,";
}
elsif ($2 eq "(S)") {
$spare_devs{$dev} .= "$1,";
}
else {
$active_devs{$dev} .= "$1,";
$devs_up++;
}
}
if (! defined($active_devs{$dev})) { $active_devs{$dev} = "none"; }
else { $active_devs{$dev} =~ s/,$//; }
if (! defined($spare_devs{$dev})) { $spare_devs{$dev} = "none"; }
else { $spare_devs{$dev} =~ s/,$//; }
if (! defined($failed_devs{$dev})) { $failed_devs{$dev} = "none"; }
else { $failed_devs{$dev} =~ s/,$//; }
$_ = <FILE>;
/(\d+)\ blocks\ (.*)(\[.*\])\s?$/;
$size{$dev} = int($1/1024);
#print "$3\n";
$missing = 1 if ($3 =~ m/_/);
if ($size{$dev} > 1024){
$size{$dev} = int($size{$dev}/1024)."GB";
}
else{
$size{$dev} .= "MB";
}
$_ = <FILE>;
if (($devs_total{$dev} > $devs_up) || ($failed_devs{$dev} ne "none") || (($missing) && (!/recovery/))) {
$status{$dev} = "Degraded";
$result = "CRITICAL";
$retval = $ERRORS{"CRITICAL"};
}
else {
$status{$dev} = "Optimal";
}
if (/recovery/){
$status{$dev} = "Rebuilding";
if ($result eq "OK"){
$result = "WARNING";
$retval = $ERRORS{"WARNING"};
}
}
$_ = <FILE>;
/(\d+)\ blocks\ (.*)(\[.*\])\s?$/;
$size{$dev} = int($1/1024);
$missing = 1 if ($3 =~ m/_/);
if ($size{$dev} > 1024){
$size{$dev} = int($size{$dev}/1024)."GB";
}
else{
$size{$dev} .= "MB";
}
$_ = <FILE>;
if (($devs_total{$dev} > $devs_up) || ($failed_devs{$dev} ne "none") || (($missing) && (!/recovery/))) {
$status{$dev} = "Degraded";
$result = "CRITICAL";
}
else {
$status{$dev} = "Optimal";
}
if (/(recovery|resync)\s*=\s*\d{1,2}(\.\d)?%/){
$status{$dev} = "Rebuilding";
if ($result eq "OK"){
$result = "WARNING";
}
}
}
print "$result: ";
foreach my $raid (@raids){
print "$raid:$level{$raid}:$devs_total{$raid} drives:$size{$raid}:$status{$raid} ";
print "$raid:$level{$raid}:$devs_total{$raid} drives:$size{$raid}:$status{$raid} ";
}
print "\n";
close FILE;
exit 0;
exit $retval;
# =====
sub usage(){
sub usage()
{
printf("
Check status of Linux SW RAID

View File

@ -124,66 +124,48 @@ ADAPTER: for ( my $adp = 0; $adp < $adapters; $adp++ ) {
}
}
close LDGETNUM;
open (CFGDSPLY, "$megacli -CfgDsply -a$adp -NoLog |")
|| die "error: Could not execute $megacli -CfgDsply -a$adp -NoLog";
my $hba = 0;
my $failgrouplist = 0;
while (<CFGDSPLY>) {
if ( m/Failed to get Disk Group list/ ) {
$failgrouplist = 1;
}
if ( m/Product Name:.*(JBOD|HBA)/ ) {
$hba = 1;
}
}
close CFGDSPLY;
# When controller is in HBA/JBOD mode, skip RAID volume checks
unless ($hba && $failgrouplist) {
LDISK: for ( my $ld = 0; $ld < $ldnum; $ld++ ) {
# Get info on this particular logical drive
open (LDINFO, "$megacli -LdInfo -L$ld -a$adp -NoLog |")
|| die "error: Could not execute $megacli -LdInfo -L$ld -a$adp -NoLog";
my ($size, $unit, $raidlevel, $ldpdcount, $spandepth, $state);
while (<LDINFO>) {
if ( m/^Size\s*:\s*(\d+(\.\d+)?)\s*(MB|GB|TB)/ ) {
$size = $1;
$unit = $3;
# Adjust MB to GB if that's what we got
if ( $unit eq 'MB' ) {
$size = sprintf( "%.0f", ($size / 1024) );
$unit= 'GB';
}
} elsif ( m/^State\s*:\s*(\w+(\s\w+)?)/ ) {
$state = $1;
if ( $state ne 'Optimal' ) {
$status = 'CRITICAL';
}
} elsif ( m/^Number Of Drives( per span)?\s*:\s*(\d+)/ ) {
$ldpdcount = $2;
} elsif ( m/^Span Depth\s*:\s*(\d+)/ ) {
$spandepth = $1;
$ldpdcount = $ldpdcount * $spandepth;
} elsif ( m/^RAID Level\s*:\s*Primary-(\d)/ ) {
$raidlevel = $1;
LDISK: for ( my $ld = 0; $ld < $ldnum; $ld++ ) {
# Get info on this particular logical drive
open (LDINFO, "$megacli -LdInfo -L$ld -a$adp -NoLog |")
|| die "error: Could not execute $megacli -LdInfo -L$ld -a$adp -NoLog";
my ($size, $unit, $raidlevel, $ldpdcount, $spandepth, $state);
while (<LDINFO>) {
if ( m/^Size\s*:\s*(\d+(\.\d+)?)\s*(MB|GB|TB)/ ) {
$size = $1;
$unit = $3;
# Adjust MB to GB if that's what we got
if ( $unit eq 'MB' ) {
$size = sprintf( "%.0f", ($size / 1024) );
$unit= 'GB';
}
} elsif ( m/^State\s*:\s*(\w+)/ ) {
$state = $1;
if ( $state ne 'Optimal' ) {
$status = 'CRITICAL';
}
} elsif ( m/^Number Of Drives( per span)?\s*:\s*(\d+)/ ) {
$ldpdcount = $2;
} elsif ( m/^Span Depth\s*:\s*(\d+)/ ) {
$spandepth = $1;
$ldpdcount = $ldpdcount * $spandepth;
} elsif ( m/^RAID Level\s*:\s*Primary-(\d)/ ) {
$raidlevel = $1;
}
close LDINFO;
$result .= "$adp:$ld:RAID-$raidlevel:$ldpdcount drives:$size$unit:$state ";
} #LDISK
}
close LDINFO;
}
$result .= "$adp:$ld:RAID-$raidlevel:$ldpdcount drives:$size$unit:$state ";
} #LDISK
close LDINFO;
# Get info on physical disks for this adapter
open (PDLIST, "$megacli -PdList -a$adp -NoLog |")
|| die "error: Could not execute $megacli -PdList -a$adp -NoLog";
my ($slotnumber,$fwstate,$fwinfo);
my ($slotnumber,$fwstate);
PDISKS: while (<PDLIST>) {
if ( m/Slot Number:\s*(\d+)/ ) {
$slotnumber = $1;
@ -198,15 +180,12 @@ ADAPTER: for ( my $adp = 0; $adp < $adapters; $adp++ ) {
}
} elsif ( m/Predictive Failure Count:\s*(\d+)/ ) {
$prederrors += $1;
} elsif ( m/Firmware state:\s*(\w+)(.*)/ ) {
} elsif ( m/Firmware state:\s*(\w+)/ ) {
$fwstate = $1;
$fwinfo = $2;
if ( $fwstate =~ m/Hotspare/ ) {
$hotsparecount++;
} elsif ( $fwstate =~ m/^Online/ ) {
# Do nothing
} elsif ( $fwstate =~ m/^Unconfigured/ && defined $fwinfo && $fwinfo =~ m/^\(good\)/) {
# Do nothing
} elsif ( $slotnumber != 255 ) {
$pdbad++;
$status = 'CRITICAL';

View File

@ -1,46 +0,0 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Spec;
open STDERR, '>', File::Spec->devnull() or die "could not open STDERR: $!\n";
my $what = 'all';
GetOptions(
"what=s" => \$what
);
my @salearn = qx(sa-learn --dump magic);
my $data = {
spam => 0,
ham => 0,
token => 0
};
foreach my $line (@salearn){
if ($line =~ m/(\d+)\s*0\s*non-token\sdata:\snspam$/){
$data->{spam} = $1;
}
elsif ($line =~ m/(\d+)\s*0\s*non-token\sdata:\snham$/){
$data->{ham} = $1;
}
elsif ($line =~ m/(\d+)\s*0\s*non-token\sdata:\sntokens$/){
$data->{token} = $1;
}
}
if ($what eq 'spam'){
print $data->{spam} . "\n";
}
elsif ($what eq 'ham'){
print $data->{ham} . "\n";
}
elsif ($what eq 'token'){
print $data->{token} . "\n";
}
else{
print to_json($data);
}
exit(0);

View File

@ -1,188 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
use Date::Parse;
use File::ReadBackwards;
use Data::Dumper;
my $samba_tool = which('samba-tool');
my $pdbedit = which('pdbedit');
# Number of seconds in the past to count authentications
my $since = 300;
my $pretty = 0;
my $general = 1;
my $ou = undef;
# This log is expected to be in JSON format. For example, in smb.conf :
# log level = 1 auth_audit:3 auth_json_audit:4@/var/log/samba/audit_auth.log
my $audit_auth_log = '/var/log/samba/json/auth.log';
if (not defined $samba_tool or not defined $pdbedit){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
GetOptions(
'pretty' => \$pretty,
'since=i' => \$since,
'audit-auth-log=s' => \$audit_auth_log,
'general' => \$general,
'ou=s' => \$ou
);
if ($since !~ m/^\d+$/){
die "Invalid value for since\n";
}
my $json = {};
if (defined $ou){
$json = {
objects => 0
};
if ($ou !~ m/^(?<RDN>(?<Key>(?:\\[0-9A-Fa-f]{2}|\\\[^=\,\\]|[^=\,\\]+)+)\=(?<Value>(?:\\[0-9A-Fa-f]{2}|\\\[^=\,\\]|[^=\,\\]+)+))(?:\s*\,\s*(?<RDN>(?<Key>(?:\\[0-9A-Fa-f]{2}|\\\[^=\,\\]|[^=\,\\]+)+)\=(?<Value>(?:\\[0-9A-Fa-f]{2}|\\\[^=\,\\]|[^=\,\\]+)+)))*$/){
die "Invalid OU\n";
}
foreach (qx($samba_tool ou listobjects '$ou' 2>/dev/null)){
die "Error while counting objects of OU $ou\n" if ($? != 0);
chomp;
$json->{objects}++;
}
} elsif ($general){
$json = {
accounts => {
users => 0,
inactive_users => 0,
active_users => 0,
groups => 0,
computers => 0
},
replication => 'UNKNWON',
processes => {
cldap_server => 0,
kccsrv => 0,
dreplsrv => 0,
ldap_server => 0,
kdc_server => 0,
dnsupdate => 0,
'notify-daemon' => 0,
rpc_server => 0,
winbind_server => 0,
nbt_server => 0,
dnssrv => 0,
samba => 0,
},
gpo => 0,
ou => 0,
activity => {
authentications => {
users => {
success => 0,
failure => 0
},
computers => {
success => 0,
failure => 0
}
},
authorizations => {
users => 0,
computers => 0
},
since => $since
}
};
# Get the numbers of users. pdbedit is prefered here because we can
# differentiate active and inactive users, which samba-tool can't do
# While at it, also get the computers
foreach (qx($pdbedit -L -v)){
next unless (m/^Account Flags:\s+\[(.*)\]/);
my $flags = $1;
if ($flags =~ m/U/){
$json->{accounts}->{users}++;
if ($flags =~ m/D/){
$json->{accounts}->{inactive_users}++;
} else {
$json->{accounts}->{active_users}++;
}
} elsif ($flags =~ m/W/){
$json->{accounts}->{computers}++;
}
}
# Now count groups
foreach (qx($samba_tool group list 2>/dev/null)){
$json->{accounts}->{groups}++;
}
# Get replication status
# We want just a quick summary, so only output the first line
# manual checks will be needed to get the details, but if this field doesn't contains [ALL GOOD],
# then something is probably wrong
$json->{replication} = (split(/\n/, qx($samba_tool drs showrepl --summary 2>/dev/null)))[0];
# Get the list of workers
foreach (qx($samba_tool processes 2>/dev/null)){
if (/^([^\(\s]+).+\d+$/){
$json->{processes}->{$1}++;
}
}
# Get the number of GPO
foreach (qx($samba_tool gpo listall 2>/dev/null)){
next unless (/^GPO/);
$json->{gpo}++;
}
# Get the number of OU
foreach (qx($samba_tool ou list 2>/dev/null)){
$json->{ou}++;
}
if (-e $audit_auth_log){
my $backward = File::ReadBackwards->new( $audit_auth_log ) or die "Couldn't open $audit_auth_log : $!\n";
while (defined (my $line = $backward->readline)){
my $event;
eval {
$event = from_json($line);
};
# Skip the log entry if we can't parse JSON
next if (not defined $event);
my $type = $event->{type};
# We're only interested in Authentication and Authorization messages
next if ($type ne 'Authentication' and $type ne 'Authorization');
# Parse the date in the timstamp field
my $timestamp = str2time($event->{timestamp});
# Skip if date couldn't be parsed
next if (not defined $timestamp);
# As we're reading in reverse order, if we reached an events prior to now - since, then we can stop, as all the other will be even earlier
last if (time() - $timestamp > $since);
my $subject;
if ($type eq 'Authentication'){
if ($event->{Authentication}->{status} eq 'NT_STATUS_PROTOCOL_UNREACHABLE'){
# Ignore NT_STATUS_PROTOCOL_UNREACHABLE as they are harmless
next;
}
# Accounts ending with $ are for computers
$subject = (($event->{$type}->{mappedAccount} || $event->{$type}->{clientAccount} || '')=~ m/\$(\@.+)?$/) ? 'computers' : 'users';
if ($event->{Authentication}->{status} eq 'NT_STATUS_OK'){
$json->{activity}->{authentications}->{$subject}->{success}++;
} else {
$json->{activity}->{authentications}->{$subject}->{failure}++;
}
} else {
$subject = ($event->{$type}->{account} =~ m/\$$/) ? 'computers' : 'users';
$json->{activity}->{authorizations}->{$subject}++;
}
}
}
}
print to_json($json, { pretty => $pretty });

View File

@ -1,32 +1,44 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use Config::Simple;
my $what = $ARGV[0];
my $thres = $ARGV[1];
unless (defined $what){
usage();
exit(1);
usage();
exit(1);
}
my $cfg = new Config::Simple;
$cfg->read('/etc/zabbix/sensors.ini');
open SENSORS, ('</etc/zabbix/sensors.conf') ||
die "Couldn't open /etc/zabbix/sensors.conf: $!\n";
my $ret = 'ZBX_NOTSUPPORTED';
my $sensor = $cfg->get_block($what);
if ($sensor && $sensor->{cmd}){
$ret = qx($sensor->{cmd});
}
foreach (<SENSORS>){
next unless (/^$what(\s+)?=(\s+)?(.*)!(\-?\d+)!(\-?\d+)$/);
my $cmd = $3;
my $high = $4;
my $low = $5;
if (!defined $thres){
$ret = `$cmd`;
}
elsif ($thres eq 'high'){
$ret = $high
}
elsif ($thres eq 'low'){
$ret = $low;
}
else {
usage();
exit(1);
}
}
print $ret;
exit(0);
sub usage {
print <<"EOF";
Usage: $0 sensor_name
Usage: $0 sensor_name [high|low]
EOF
}

View File

@ -1,87 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use Getopt::Long;
use JSON;
my $uri = 'http://127.0.0.1:3128/squid-internal-mgr/info';
my $what = 'all';
my $help = 0;
my $pretty = 0;
GetOptions(
"uri=s" => \$uri,
"what=s" => \$what,
"help" => \$help,
"pretty" => \$pretty
);
my $res = {};
my $status = get($uri);
unless ($status){
print 'ZBX_NOTSUPPORTED';
exit 1;
}
foreach my $line (split(/\n/, $status)){
if ($line =~ m/^Squid Object Cache: Version (\d+(\.\d+)*)/){
$res->{version} = $1;
} elsif ($line =~ m/^\s+Number of clients accessing cache:\s+(\d+)/){
$res->{clients_num} = $1 * 1;
} elsif ($line =~ m/^\s+Number of HTTP requests received:\s+(\d+)/){
$res->{requests} = $1 * 1;
} elsif ($line =~ m/^\s+Hits as % of all requests:\s+5min:\s+(\d+(\.\d+)?)%,/){
$res->{hits_req_percent} = $1 * 1;
} elsif ($line =~ m/^\s+Hits as % of bytes sent:\s+5min:\s+(\d+(\.\d+)?)%,/){
$res->{hits_bytes_percent} = $1 * 1;
} elsif ($line =~ m/^\s+Memory hits as % of hit requests:\s+5min:\s+(\d+(\.\d+)?)%,/){
$res->{mem_hits_req_percent} = $1 * 1;
} elsif ($line =~ m/^\s+Disk hits as % of hit requests:\s+5min:\s+(\d+(\.\d+)?)%,/){
$res->{disk_hits_req_percent} = $1 * 1;
} elsif ($line =~ m/^\s+Storage Swap size:\s+(\d+)\sKB/){
$res->{stor_swap_size} = $1 * 1024;
} elsif ($line =~ m/^\s+Storage Swap capacity:\s+(\d+(\.\d+)?)% used, (\d+(\.\d+)?)% free/){
($res->{stor_swap_used_percent}, $res->{stor_swap_free_percent}) = ($1 * 1, $3 * 1);
} elsif ($line =~ m/^\s+Storage Mem size:\s+(\d+)\sKB/){
$res->{stor_mem_size} = $1 * 1024;
} elsif ($line =~ m/^\s+Storage Mem capacity:\s+(\d+(\.\d+)?)% used, (\d+(\.\d+)?)% free/){
($res->{stor_mem_used_percent}, $res->{stor_mem_free_percent}) = ($1 * 1, $3 * 1);
} elsif ($line =~ m/^\s+Mean Object Size:\s+(\d+(\.\d+)?)\sKB/){
$res->{mean_object_size} = int($1 * 1024);
} elsif ($line =~ m/^\s+CPU Time:\s+(\d+(\.\d+)?)\sseconds/){
$res->{cpu_time} = $1 * 1;
} elsif ($line =~ m/^\s+CPU Usage:\s+(\d+(\.\d+)?)%/){
$res->{cpu_usage} = $1 * 1;
} elsif ($line =~ m/^\s+CPU Usage, 5 minute avg:\s+(\d+(\.\d+)?)%/){
$res->{cpu_usage_avg_5min} = $1 * 1;
} elsif ($line =~ m/^\s+Maximum number of file descriptors:\s+(\d+)/){
$res->{fd_max} = $1 * 1;
} elsif ($line =~ m/^\s+Number of file desc currently in use:\s+(\d+)/){
$res->{fd_used} = $1 * 1;
}
}
if (defined $res->{fd_max} and defined $res->{fd_used}){
$res->{fd_used_percent} = int($res->{fd_used} * 100/ $res->{fd_max});
}
if ($help){
print "Valid keys are:\n\n";
print "$_\n" for keys %{$res};
exit 0;
}
if ($what eq 'all'){
print to_json($res, { pretty => $pretty });
}
elsif (defined $res->{$what}){
print $res->{$what};
}
else{
print 'ZBX_NOTSUPPORTED';
}
exit 0;

View File

@ -1,120 +0,0 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $dev = undef;
my $type = 'auto';
my $what = 'json';
my $pretty = 0;
GetOptions(
'device=s' => \$dev,
'type=s' => \$type,
'what=s' => \$what,
'pretty' => \$pretty
);
if (not defined $dev or $dev !~ m|^/dev/\w+(/\w+)*$|){
print "Invalid --device\n";
exit 1;
} elsif ($what !~ m/^\w+$/){
print "Invalid --what\n";
exit 1;
} elsif ($type !~ m/^\w+\+*\w+(,\w+)*$/){
print "Invalid --type\n";
exit 1;
}
my $json = {
temperature_celsius => 25,
power_on_hours => 0,
power_cycle_count => 0,
reallocated_sector_count => 0,
current_pending_sector => 0,
offline_uncorrectable => 0,
percent_lifetime_remain => 100,
firmware_version => 0
};
my $smartctl = which('smartctl');
sub print_out {
if ($what eq 'json'){
print to_json($json, { pretty => $pretty });
exit 0;
} elsif (defined $json->{$what}){
print $json->{$what} . "\n";
exit 0;
} else {
print "ZBX_NOTSUPPORTED\n";
exit 1;
}
}
sub get_smart_attr {
my $smart = shift;
my $attr = shift;
if (defined $smart->{ata_smart_attributes}->{table}){
foreach (@{$smart->{ata_smart_attributes}->{table}}){
if ($_->{name} eq $attr){
return $_;
}
}
}
return undef;
}
if (not defined $smartctl){
$what = 'error';
print_out();
}
my $data = from_json(qx($smartctl -a $dev -d $type --json=c));
if (defined $data->{temperature}->{current}){
$json->{temperature_celsius} = $data->{temperature}->{current};
}
if (defined $data->{power_on_time}->{hours}){
$json->{power_on_hours} = $data->{power_on_time}->{hours};
}
if (defined $data->{power_cycle_count}){
$json->{power_cycle_count} = $data->{power_cycle_count};
}
if (defined $data->{firmware_version}){
$json->{firmware_version} = $data->{firmware_version};
}
my ($pending, $realloc, $offline, $remain);
if ($pending = get_smart_attr($data, 'Current_Pending_Sector')){
$json->{current_pending_sector} = $pending->{raw}->{value};
}
if ($realloc = get_smart_attr($data, 'Reallocated_Sector_Ct') || get_smart_attr($data, 'Reallocated_Event_Count')){
$json->{reallocated_sector_count} = $realloc->{raw}->{value};
} elsif (defined $data->{nvme_smart_health_information_log}->{media_errors}){
# NMVe can report media error, so report it as reallocated sectors
$json->{reallocated_sector_count} = $data->{nvme_smart_health_information_log}->{media_errors};
}
if ($offline = get_smart_attr($data, 'Offline_Uncorrectable')){
$json->{offline_uncorrectable} = $offline->{raw}->{value};
}
if ($remain = get_smart_attr($data, 'Percent_Lifetime_Remain')){
$json->{percent_lifetime_remain} = $remain->{value};
} elsif ($remain = get_smart_attr($data, 'SSD_Life_Left')){
$json->{percent_lifetime_remain} = $remain->{raw}->{value};
} elsif ($remain = get_smart_attr($data, 'Wear_Leveling_Count')){
$json->{percent_lifetime_remain} = $remain->{value};
} elsif (defined $data->{nvme_smart_health_information_log}->{percentage_used}){
# NMVe sometime report the estimated life used, instead of the remaining
$json->{percent_lifetime_remain} = 100 - $data->{nvme_smart_health_information_log}->{percentage_used};
}
print_out();

View File

@ -1,262 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Cookies;
use URI;
use Data::Dumper;
umask 077;
my $user = 'zabbix';
my $pass = 'secret';
my $site = 'default';
my $url = 'https://localhost:8443';
my $certcheck = 1;
my $unifi;
my $dev;
my $station;
my $net;
my $wlan;
my $pretty = 0;
my $json = {};
my $site_id;
GetOptions (
'user=s' => \$user,
'password|p=s' => \$pass,
'site=s' => \$site,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'unifi' => \$unifi,
'dev=s' => \$dev,
'station=s' => \$station,
'net=s' => \$net,
'wlan=s' => \$wlan,
'pretty' => \$pretty
);
# If connecting to localhost, no need to check certificate
my $uri = URI->new($url);
if ($uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my @radio_proto = qw/a b g na ng ac/;
my $resp;
my $username = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<);
my $cj = HTTP::Cookies->new(
file => "/tmp/.unifi_$username.txt",
autosave => 1,
ignore_discard => 1
);
my $sslopts = {};
if (not $certcheck){
$sslopts = { verify_hostname => 0, SSL_verify_mode => 0 }
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts,
cookie_jar => $cj
);
# Check if we need to login
$resp = $ua->get($url . '/api/self/sites');
if ($resp->is_error){
# Log into the API
$resp = $ua->post(
$url . '/api/login',
Content => to_json({ username => $user, password => $pass }),
Content_Type => 'application/json;charset=UTF-8'
);
die "Login failed: " . $resp->message . "\n" if $resp->is_error;
$resp = $ua->get($url . '/api/self/sites');
die $resp->message . "\n" if $resp->is_error;
}
# Now, we need to get the site ID
foreach (@{from_json($resp->decoded_content)->{data}}){
if ($_->{name} eq $site || $_->{desc} eq $site){
$site_id = $_->{_id};
# If site is referenced by description, translate it to name
$site = $_->{name} if ($_->{name} ne $site);
last;
}
}
die "Site $site not found\n" unless ($site_id);
# Global info about the instance of Unifi
if ($unifi){
$resp = $ua->get($url . '/api/s/' . $site . '/stat/health');
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
if ($entry->{subsystem} eq 'wlan'){
$json->{wireless_clients} = $entry->{num_user};
$json->{wireless_guests} = $entry->{num_guest};
} elsif ($entry->{subsystem} eq 'lan'){
$json->{wired_clients} = $entry->{num_user};
$json->{wired_guests} = $entry->{num_guest};
}
foreach (qw/adopted pending disabled/){
$json->{'dev_' . $_} += $entry->{'num_' . $_} if (defined $entry->{'num_' . $_});
}
foreach (qw/num_ap num_sw num_gw/){
$json->{$_} += $entry->{$_} if ($entry->{$_});
}
}
$json->{$_} ||= 0 foreach (qw/wireless_clients wireless_guests
wired_clients wired_guests dev_adopted
dev_pending dev_disabled num_ap num_sw
num_gw/);
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sysinfo');
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
$json->{$_} = from_json($resp->decoded_content)->{data}->[0]->{$_}
foreach (qw/version build update_available/);
# Get unarchived alarms
$resp = $ua->post($url . '/api/s/' . $site . '/stat/alarm',
Content => to_json({ archived => 'false' }),
Content_Type => 'application/json;charset=UTF-8'
);
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
$json->{alarm} = scalar @{from_json($resp->decoded_content)->{data}};
} elsif ($dev) {
# Dev is identified by MAC
$resp = $ua->get($url . '/api/s/' . $site . '/stat/device/' . $dev);
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
my $obj = from_json($resp->decoded_content)->{data}->[0];
foreach (qw/sys_stats locating serial name num_sta user-num_sta
guest-num_sta inform_url version model state type
cfgversion adopted avg_client_signal/){
$json->{$_} = $obj->{$_} if (defined $obj->{$_});
}
# Convert last seen into a relative time
$json->{last_seen} = (defined $obj->{last_seen}) ? time - $obj->{last_seen} : time;
# Add some more info in sys_stats
$json->{sys_stats}->{$_} = $obj->{'system-stats'}->{$_} foreach (qw/cpu mem uptime/);
# If this is an ap
if ($obj->{type} eq 'uap'){
foreach (qw/guest-rx_packets guest-tx_packets guest-rx_bytes
guest-tx_bytes user-rx_packets user-tx_packets
user-rx_bytes user-tx_bytes rx_packets tx_packets
rx_bytes tx_bytes rx_errors tx_errors
rx_dropped tx_dropped/){
$json->{net_stats}->{$_} = $obj->{stat}->{ap}->{$_} if (defined $obj->{stat}->{ap}->{$_});
}
# Count the number of SSID served
$json->{num_wlan} = scalar @{$obj->{radio_table}};
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sta');
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
foreach my $proto (@radio_proto){
$json->{$_ . $proto} = 0 foreach (qw/num_sta_ avg_rx_rate_ avg_tx_rate_/);
}
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
next if (not $entry->{ap_mac} or $entry->{ap_mac} ne $dev or $entry->{is_wired} == JSON::true);
foreach (@radio_proto){
if ($entry->{radio_proto} eq $_){
$json->{'num_sta_' . $_}++;
$json->{'avg_rx_rate_' . $_} += $entry->{rx_rate};
$json->{'avg_tx_rate_' . $_} += $entry->{tx_rate};
}
}
$json->{$_} += $entry->{$_} foreach (qw/rx_bytes tx_bytes rx_packets tx_packets/);
$json->{'avg_' . $_} += $entry->{$_} foreach (qw/satisfaction tx_power signal noise/);
}
# Now lets compute average values
$json->{'avg_' . $_} = ($json->{num_sta} == 0) ? undef : $json->{'avg_' . $_} / $json->{num_sta}
foreach (qw/satisfaction tx_power signal noise/);
foreach my $proto (@radio_proto){
$json->{'avg_' . $_ . '_rate_' . $proto} = ($json->{'num_sta_' . $proto} == 0) ?
undef : $json->{'avg_' . $_ . '_rate_' . $proto} / $json->{'num_sta_' . $proto}
foreach (qw/tx rx/);
}
} elsif ($obj->{type} eq 'usw'){
foreach (qw/rx_packets tx_packets
rx_bytes tx_bytes rx_errors tx_errors
rx_dropped tx_dropped/){
$json->{net_stats}->{$_} = $obj->{stat}->{sw}->{$_} if (defined $obj->{stat}->{sw}->{$_});
}
}
} elsif ($station) {
# Client is identified by MAC
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sta/' . $station);
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
my $obj = from_json($resp->decoded_content)->{data}->[0];
my @client_base = qw/rx_packets tx_packets rx_bytes tx_bytes hostname last_seen ip authorized oui is_guest/;
foreach (@client_base){
$json->{$_} = $obj->{$_} || 0;
}
# Convert last_seen to relative
$json->{last_seen} = (defined $obj->{last_seen}) ? time - $obj->{last_seen} : time;
# For wireless stations, we gather some more info
if ($obj->{is_wired} == JSON::false){
my @client_wireless = qw/rx_rate tx_rate essid ap_mac tx_power radio_proto signal noise satisfaction/;
foreach (@client_wireless){
$json->{$_} = $obj->{$_} || 0;
}
# We have the MAC of the AP, lets try to find the name of this AP
$resp = $ua->get($url . '/api/s/' . $site . '/stat/device/' . $json->{ap_mac});
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
$json->{ap} = from_json($resp->decoded_content)->{data}->[0]->{name};
}
} elsif ($wlan) {
# Wlan is identified by ID
$resp = $ua->get($url . '/api/s/' . $site . '/rest/wlanconf/' . $wlan);
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
my $obj = from_json($resp->decoded_content)->{data}->[0];
foreach (qw/name security mac_filter_policy vlan/){
$json->{$_} = $obj->{$_};
}
# For boolean, we need to convert
foreach (qw/enabled is_guest mac_filter_enabled vlan_enabled/){
$json->{$_} = (defined $obj->{$_} and $obj->{$_} == JSON::PP::true) ? 1 : 0;
}
# Now, we need to count stations for each SSID
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sta');
die "ZBX_NOTSUPPORTED\n" if $resp->is_error;
# Set default values to 0
$json->{num_sta} = 0;
$json->{'num_sta_' . $_} = 0 foreach (@radio_proto);
$json->{$_} = 0 foreach (qw/rx_bytes tx_bytes rx_packets tx_packets/);
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
next if (not $entry->{essid} or $entry->{essid} ne $json->{name} or $entry->{is_wired} == JSON::PP::true);
$json->{num_sta}++;
foreach (@radio_proto){
if ($entry->{radio_proto} eq $_){
$json->{'num_sta_' . $_}++;
$json->{'avg_rx_rate_' . $_} += $entry->{rx_rate};
$json->{'avg_tx_rate_' . $_} += $entry->{tx_rate};
}
}
$json->{$_} += $entry->{$_} foreach (qw/rx_bytes tx_bytes rx_packets tx_packets/);
$json->{'avg_' . $_} += $entry->{$_} foreach (qw/satisfaction tx_power signal noise/);
}
# Now lets compute average values
$json->{'avg_' . $_} = ($json->{num_sta} == 0) ? undef : $json->{'avg_' . $_} / $json->{num_sta}
foreach (qw/satisfaction tx_power signal noise/);
foreach my $proto (@radio_proto){
$json->{'avg_' . $_ . '_rate_' . $proto} = ($json->{'num_sta_' . $proto} == 0) ?
undef : $json->{'avg_' . $_ . '_rate_' . $proto} / $json->{'num_sta_' . $proto}
foreach (qw/tx rx/);
}
}
print to_json($json, { pretty => $pretty });

View File

@ -1,45 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $vdostats = which('vdostats');
my $json = {};
my $pretty = 0;
my $volume = undef;
my $val = undef;
GetOptions(
'volume=s' => \$volume,
'value=s' => \$val,
'pretty' => \$pretty
);
if ($volume) {
if ($volume !~ m/^\w+$/){
die "Invalide volume name\n";
}
foreach my $line (qx($vdostats --all $volume)){
if ($line =~ m/^\s+([^:]+)\s+:\s+([\d\w]+)/){
my ($key,$val) = ($1,$2);
# Cleanup key
$key =~ s/\s+$//;
$key =~ s/\s+/_/g;
$key =~ s/\(|\)//g;
$key =~ s/%/pct/g;
$json->{lc $key} = $val;
}
}
} else {
print 'ZBX_NOTSUPPORTED';
exit 0;
}
if (defined $val) {
print $json->{$val} || 'ZBX_NOTSUPPORTED';
} else {
print to_json($json, { pretty => $pretty });
}
print "\n";

View File

@ -1,182 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use File::Which;
use Getopt::Long;
my $json = {};
my $pool = undef;
my $dataset = undef;
my $sanoidmon = undef;
my $stats = undef;
my $pretty = 0;
GetOptions(
"zpool|pool=s" => \$pool,
"dataset=s" => \$dataset,
"sanoid=s" => \$sanoidmon,
"stats=s" => \$stats,
"pretty" => \$pretty
);
my $zpool = which('zpool');
my $zfs = which('zfs');
my $sanoid = which('sanoid');
if (not $zpool or not $zfs){
print 'ZBX_NOTSUPPOTED';
exit 0;
}
if (defined $sanoidmon and not $sanoid){
die 'ZBX_NOTSUPPOTED';
}
if (defined $sanoidmon and not grep { $_ eq $sanoidmon } qw(snapshot capacity health)){
die 'ZBX_NOTSUPPOTED';
}
if (not $pool and not $dataset and not $sanoidmon and not $stats){
print <<_EOF;
Usage:
$0 [--zpool=<name>|--dataset=<fs zvol or snap>|--sanoid=<snapshot|capacity|health>]
_EOF
exit 1;
}
# Value map. For Zabbix, we want 0 instead of none
# We also prefer on/off represented as 1/0 as it's more efficient
my $map = {
18446744073709551615 => 0, # See https://github.com/zfsonlinux/zfs/issues/9306
none => 0,
on => 1,
off => 0
};
if ($pool){
foreach (qx($zpool get all $pool -p -H)){
chomp;
my @parse = split /\t+/, $_;
$json->{$parse[1]} = (defined $map->{$parse[2]}) ? $map->{$parse[2]} : $parse[2];
$json->{errors} = get_zpool_errors($pool);
$json->{stats} = get_zpool_stats($pool);
}
} elsif ($dataset){
# Convert %40 back to @ (we send them as %40 in the discovery because @ is not allowed in item keys
$dataset =~ s/%40/\@/g;
foreach (qx($zfs get all $dataset -p -H)){
chomp;
my @parse = split /\t+/, $_;
$json->{$parse[1]} = (defined $map->{$parse[2]}) ? $map->{$parse[2]} : $parse[2];
if ($parse[1] =~ m/compressratio$/){
# Remove trailing x for compressratio and refcompressratio as before 0.8.0 it can be like 1.23x
$json->{$parse[1]} =~ s/x$//;
}
}
} elsif ($sanoidmon){
print qx($sanoid --monitor-$sanoidmon);
exit $?;
} elsif ($stats){
if (not -e '/proc/spl/kstat/zfs/' . $stats){
print 'ZBX_NOTSUPPORTED';
exit 0;
}
open STATS, '</proc/spl/kstat/zfs/' . $stats;
while (<STATS>){
next unless (m/^(\w+)\s+4\s+(\d+)$/);
$json->{$1} = $2;
}
}
print to_json($json, { pretty => $pretty }) . "\n";
exit 0;
sub get_zpool_errors {
my $pool = shift;
my $errors = {
read_errors => 0,
write_errors => 0,
cksum_errors => 0
};
my $i = 0;
my $index = {};
foreach my $line (qx($zpool status $pool 2>/dev/null)){
# Output looks like
# pool: rpool
# state: ONLINE
# status: One or more devices has experienced an unrecoverable error. An
# attempt was made to correct the error. Applications are unaffected.
# action: Determine if the device needs to be replaced, and clear the errors
# using 'zpool clear' or replace the device with 'zpool replace'.
# see: http://zfsonlinux.org/msg/ZFS-8000-9P
# scan: scrub repaired 0B in 0h5m with 0 errors on Tue May 29 10:04:31 2018
# config:
#
# NAME STATE READ WRITE CKSUM
# rpool ONLINE 0 0 0
# mirror-0 ONLINE 0 0 0
# sda2 ONLINE 0 0 0
# sdb2 ONLINE 0 0 474
#
# errors: No known data errors
# We want to save status, action, scan and errors
if ($line =~ m/^\s*(scan|action|status|errors):\s+(\w+.*)/){
$errors->{$1} = $2;
$index->{$i} = $1;
} elsif ($line !~ /:/ and defined $index->{$i-1}){
# Here, we reconstitute multiline values (like status and action)
chomp($line);
$line =~ s/\s+/ /g;
$errors->{$index->{$i-1}} .= $line;
} elsif ($line =~ m/\s+[a-zA-Z0-9_\-]+\s+[A-Z]+\s+(?<read>\d+(\.\d+)?)(?<read_suffix>[KMT])?\s+(?<write>\d+(\.\d+)?)(?<write_suffix>[KMT])?\s+(?<cksum>\d+(\.\d+)?)(?<cksum_suffix>[KMT])?/){
# And here, we count the number of read, write and checksum errors
# Note that on ZoL 0.8.0 we could use zpool status -p to get rid of the suffixes
# But -p is not supported on 0.7 and earlier, so, we just convert them manually
$errors->{read_errors} += convert_suffix($+{'read'},$+{'read_suffix'});
$errors->{write_errors} += convert_suffix($+{'write'},$+{'write_suffix'});
$errors->{cksum_errors} += convert_suffix($+{'write'},$+{'write_suffix'});
}
$i++;
}
# Ensure evey item returns something
$errors->{$_} ||= '' foreach (qw(scan action status errors));
return $errors;
}
# Error counter can be suffixed. Apply this suffix to get raw error numbers
sub convert_suffix {
my $val = shift;
my $suf = shift;
if (!$suf){
return $val;
} elsif ($suf eq 'K'){
$val *= 1000;
} elsif ($suf eq 'M') {
$val *= 1000000;
} elsif ($suf eq 'T') {
$val *= 1000000000;
}
return $val;
}
sub get_zpool_stats {
my $pool = shift;
my $stats = {};
open UPTIME, "</proc/uptime";
$_ = <UPTIME>;
chomp;
my ($uptime , undef) = split;
$uptime = int $uptime;
close UPTIME;
foreach my $line (qx($zpool iostat $pool -pH)){
if ($line =~ m/^$pool\s+\d+\s+\d+\s+(?<reads>\d+)\s+(?<writes>\d+)\s+(?<nread>\d+)\s+(?<nwritten>\d+)/){
# zpool iostat shows average IO since boot, so just multiply it
# by the uptime in seconds to get cumulated IO since boot
# Zabbix server will then be able to calculate the delta between two values
$stats->{$_} = $+{$_} * $uptime foreach (keys %+);
last;
}
}
return $stats;
}

View File

@ -1,59 +0,0 @@
#!/usr/bin/perl -w
use JSON;
use POSIX;
use Getopt::Long;
use Net::Domain qw(hostfqdn);
use Data::Dumper;
my $pretty = 0;
my $status = 'all';
GetOptions(
"pretty" => \$pretty,
"status=s" => \$status
);
if (defined $service and $service !~ m/^\w+$/){
die "Invelid service name\n";
}
my $zmprov = '/opt/zimbra/bin/zmprov';
my $zmcontrol = '/opt/zimbra/bin/zmcontrol';
my $hostname = hostfqdn();
# We need to switch to zimbra
my $uid = getuid();
my $gid = getgid();
my (undef,undef,$zimuid,$zimgid) = getpwnam('zimbra');
if (not defined $zimuid or not defined $zimgid or not -e $zmprov){
print 'ZBX_NOTSUPPOTED';
exit;
}
setuid($zimuid) if ($uid ne $zimuid);
setgid($zimgid) if ($gid ne $zimgid);
# If there's no zimbra user or no zmcontrol, return unsupported item
if (not defined $zimuid or not defined $zimgid or not -e $zmprov){
print 'ZBX_NOTSUPPOTED';
exit 0;
}
my $output = {};
if (defined $status){
foreach my $line (qx($zmcontrol status)){
if ($line =~ m/^\s+(\w+)(\swebapp)?\s+(Running|Stopped)/){
$output->{$1} = ($3 eq 'Running') ? 1 : 0;
}
}
if ($status eq 'all'){
print to_json($output, { pretty => $pretty });
} elsif (defined $output->{$status}){
print $output->{$status}
} else {
print 'ZBX_NOTSUPPORTED';
}
}

View File

@ -1,24 +1,24 @@
#!/usr/bin/perl
use lib "/usr/share/BackupPC/lib";
use lib "/usr/share/backuppc/lib";
use lib "/usr/local/BackupPC/lib";
use BackupPC::Lib;
use BackupPC::CGI::Lib;
use POSIX;
use JSON;
use Getopt::Long;
use MIME::Base64 qw( decode_base64 );
my $hosts = 1;
my $entities = 0;
my $pretty = 0;
my $regex = '.*';
my $base64 = 0;
GetOptions(
"hosts" => \$hosts,
"entities" => \$entities,
"pretty" => \$pretty
"regex=s" => \$regex,
"base64" => \$base64
);
$regex = decode_base64($regex) if ($base64);
$regex = qr($regex);
# We need to switch to backuppc UID/GID
my $uid = getuid();
my $gid = getgid();
@ -26,41 +26,26 @@ my (undef,undef,$bkpuid,$bkpgid) = getpwnam('backuppc');
setuid($bkpuid) if ($uid ne $bkpuid);
setgid($bkpgid) if ($gid ne $bkpgid);
my $bpc = BackupPC::Lib->new();
my $hosts = $bpc->HostInfoRead();
my $bpc = BackupPC::Lib->new();
my $hosts = $bpc->HostInfoRead();
my $mainConf = $bpc->ConfigDataRead();
my $json;
@{$json->{data}} = ();
if ($entities) {
my %entities = ();
foreach my $host ( keys %$hosts ){
if ( $host =~ m/^(?:vm_)?([^_]+)_.*/ and $1 ne 'vm' ) {
$entities{$1}= 1;
}
}
push @{$json->{data}}, { '{#BPC_ENTITY}' => $_ } foreach ( keys %entities );
} elsif ($hosts){
foreach my $host ( keys %$hosts ){
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $warning = $conf->{EMailNotifyOldBackupDays};
my $errors = ( defined $conf->{MaxXferError} ) ? $conf->{MaxXferError} : '0';
my $monitoring = $conf->{ZabbixMonitoring} || 1;
my $sizeTooBigFactor = $conf->{ZabbixSizeTooBigFactor} || 6;
my $sizeTooSmallFactor = $conf->{ZabbixSizeTooSmallFactor} || 3;
my $status = ( $conf->{BackupsDisable} gt 0 or $monitoring eq '0' ) ? '0' : '1';
push @{$json->{data}},
{
"{#BPCHOST}" => $host,
"{#BPCNOBACKUPWARNING}" => $warning,
"{#BPCMAXERROR}" => $errors,
"{#BPCSTATUS}" => $status,
"{#BPC_TOO_BIG_FACTOR}" => $sizeTooBigFactor,
"{#BPC_TOO_SMALL_FACTOR}" => $sizeTooSmallFactor,
};
}
foreach my $host (keys %$hosts){
next unless ($host =~ m!$regex!);
my $hostConf = $bpc->ConfigDataRead($host);
my $conf = { %$mainConf, %$hostConf };
my $warning = $conf->{EMailNotifyOldBackupDays};
my $errors = (defined $conf->{MaxXferError}) ? $conf->{MaxXferError}: '0';
my $status = ($conf->{BackupsDisable} eq '1') ? 'disabled':(($conf->{ZabbixMonitoring} eq '0') ? 'disabled':'enabled');
push @{$json->{data}},
{
"{#BPCHOST}" => $host,
"{#BPCNOBACKUPWARNING}" => $warning,
"{#BPCMAXERROR}" => $errors,
"{#BPCSTATUS}" => $status,
};
}
print to_json( $json, { pretty => $pretty } );
print to_json($json);
exit(0);

View File

@ -1,39 +1,22 @@
#!/usr/bin/perl -w
use warnings;
use strict;
use Zabbix::Agent::Addons::Disks;
use JSON;
use File::Which;
use Getopt::Long;
my $pretty = 0;
GetOptions(
'pretty' => \$pretty
);
my $lsblk = which('lsblk');
opendir(my $dh, "/sys/block") or die "Couldn't open /sys/block: $!";
my @blocks = grep { $_ !~ m/^\./ } readdir($dh);
closedir($dh);
my $json;
@{$json->{data}} = ();
if (defined $lsblk){
foreach my $line (qx($lsblk -o KNAME,TYPE,SIZE -r -n -b)){
my ($block,$type,$size) = split(/\s+/, $line);
push @{$json->{data}}, {
"{#BLOCKDEVICE}" => $block, # Compat with previous zabbix-agent-addons
"{#DEVNAME}" => $block, # New macro name for the native vfs.dev.discovery key in 4.4
"{#DEVTYPE}" => $type,
};
}
} else {
# Fallback if lsblk is not available
foreach my $block (Zabbix::Agent::Addons::Disks::list_block_dev()){
push @{$json->{data}}, {
"{#BLOCKDEVICE}" => $block,
"{#DEVNAME}" => $block,
"{#DEVTYPE}" => 'disk'
};
}
foreach my $block (@blocks){
my $size = 1;
if ( -e "/sys/block/$block/size"){
open SIZE, "/sys/block/$block/size";
$size = join "", <SIZE>;
close SIZE;
chomp($size);
next if ($size eq '0');
}
push @{$json->{data}}, { "{#BLOCKDEVICE}" => $block, "{#BLOCKSIZE}" => $size };
}
print to_json($json, { pretty => $pretty });
print to_json($json);
exit(0);

View File

@ -1,68 +0,0 @@
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
use Getopt::Long;
use File::Which;
use Data::Dumper;
my $what = 'containers';
my $pretty = 0;
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
my $json = [];
my $docker = which('docker');
# If the docker cli is not available, terminate now
if (not defined $docker){
print $json . "\n";
exit(0);
}
my $format;
my $cmd;
if ($what =~ m/^containers?/){
$format = '{' .
'"{#DOCKER_CONTAINER_ID}":"{{ .ID }}",' .
'"{#DOCKER_CONTAINER_IMAGE}": "{{ .Image }}",' .
'"{#DOCKER_CONTAINER_NAME}":"{{ .Names }}",' .
'"{#DOCKER_CONTAINER_STATUS}":"{{ .Status }}"' .
'}';
$cmd = "$docker container list --all --format '$format'";
} elsif ($what =~ m/^networks?/){
$format = '{' .
'"{#DOCKER_NET_ID}":"{{ .ID }}",' .
'"{#DOCKER_NET_NAME}":"{{ .Name }}",' .
'"{#DOCKER_NET_DRIVER}":"{{ .Driver }}",' .
'"{#DOCKER_NET_SCOPE}":"{{ .Scope }}"' .
'}';
$cmd = "$docker network list --format '$format'";
} elsif ($what =~ m/^volumes?/){
$format = '{' .
'"{#DOCKER_VOL_NAME}":"{{ .Name }}",' .
'"{#DOCKER_VOL_DRIVER}":"{{ .Driver }}",' .
'"{#DOCKER_VOL_SCOPE}":"{{ .Scope }}"' .
'}';
$cmd = "$docker volume list --format '$format'";
} else {
print <<_EOF
Usage: $0 --what=<item to discover> [--pretty]
with available item being
* containers : list containers, including stopped ones
* networks : list networks
* volumes : list volumes
_EOF
}
foreach my $line (qx($cmd)){
chomp $line;
push @{$json}, from_json($line);
}
print to_json($json, { pretty => $pretty }) . "\n";
exit(0);

View File

@ -1,26 +0,0 @@
#!/usr/bin/perl -w
use warnings;
use strict;
use File::Which;
use JSON;
my $json;
@{$json->{data}} = ();
my $drbdoverview = which('drbd-overview');
if ($drbdoverview){
open RES, "$drbdoverview |" || die "Couldn't execute $drbdoverview";
foreach my $l (<RES>){
if ($l =~ m{(\d+):(\w+)/\d+}){
push @{$json->{data}}, {
"{#DRBD_RES_ID}" => $1,
"{#DRBD_RES_NAME}" => $2
};
}
}
close RES;
}
print to_json($json);
exit(0);

View File

@ -1,95 +0,0 @@
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Request::Common;
use URI;
use Data::Dumper;
my $user = undef;
my $pass = undef;
my $url = 'http://localhost:9200';
my $certcheck = 1;
my $nodes = 0;
my $indices = 0;
my $pretty = 0;
my $json = [];
GetOptions (
'user:s' => \$user,
'password:s' => \$pass,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'nodes' => \$nodes,
'indices' => \$indices,
'pretty' => \$pretty
);
if ($nodes and $indices){
die "--nodes and --indices are mutually exclusive\n";
}
my $uri = URI->new($url);
if (not defined $uri){
die "$url is not a valid URL\n";
}
# If connecting over http or is host is localhost
# there's no need to check certificate
if ($uri->scheme eq 'http' or $uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my $sslopts = {};
if (not $certcheck){
$sslopts = {
verify_hostname => 0,
SSL_verify_mode => 0
}
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts
);
$ua->env_proxy;
if ($nodes){
foreach (@{make_request('/_cat/nodes?format=json&full_id&h=ip,role,master,name,id,version')}){
push @{$json}, {
'{#ES_NODE_NAME}' => $_->{name},
'{#ES_NODE_ROLE}' => $_->{role},
'{#ES_NODE_ID}' => $_->{id},
'{#ES_NODE_VERSION}' => $_->{version},
'{#ES_NODE_MASTER}' => $_->{master}
};
}
} elsif ($indices){
foreach (@{make_request('/_cat/indices?format=json')}){
push @{$json}, {
'{#ES_INDEX_NAME}' => $_->{index},
'{#ES_INDEX_STATUS}' => $_->{status},
'{#ES_INDEX_UUID}' => $_->{uuid}
};
}
}
print to_json($json, { pretty => $pretty });
sub make_request {
my $path = shift;
my $req_url = $url . $path;
my $req = GET $req_url;
if (defined $user and $user ne '' and defined $pass and $pass ne ''){
$req->authorization_basic($user, $pass);
}
my $resp = $ua->request($req);
die "Request to $req_url failed : " . $resp->message . "\n" if $resp->is_error;
return from_json($resp->decoded_content);
}

View File

@ -1,8 +1,9 @@
#!/usr/bin/perl
use JSON;
my $json;
@{$json->{data}} = ();
$first = 1;
print "{\n";
print "\t\"data\":[\n\n";
my $cmd;
my $re;
@ -35,15 +36,19 @@ for (`$cmd`){
chomp($t);
$critical = $t if ($t =~ m/^\d+$/);
}
push @{$json->{data}}, {
"{#FSNAME}" => $fsname,
"{#FSTYPE}" => $fstype,
"{#FSDEVICE}" => $block,
"{#FSWARNTHRES}" => $warning,
"{#FSCRITTHRES}" => $critical
};
$fsname =~ s!/!\\/!g;
print "\t,\n" if not $first;
$first = 0;
print "\t{\n";
print "\t\t\"{#FSNAME}\":\"$fsname\",\n";
print "\t\t\"{#FSTYPE}\":\"$fstype\"\n";
print "\t\t\"{#FSDEVICE}\":\"$block\"\n";
print "\t\t\"{#FSWARNTHRES}\":\"$warning\"\n";
print "\t\t\"{#FSCRITTHRES}\":\"$critical\"\n";
print "\t}\n";
}
print to_json($json);
exit(0);
print "\n\t]\n";
print "}\n";

View File

@ -9,7 +9,6 @@ my $json;
@{$json->{data}} = ();
my $gluster = which('gluster');
my $lock = '/var/lock/gluster-zabbix.lock';
unless($gluster){
# Gluster is not installed, just return an empty JSON object
@ -17,10 +16,6 @@ unless($gluster){
exit(0);
}
# Get an exclusive lock
open(LOCK, ">$lock") || die "Can't open $lock";
flock(LOCK, 2);
my $what = 'volumes';
GetOptions(
"what=s" => \$what,
@ -115,7 +110,6 @@ else{
exit(1);
}
close(LOCK);
print to_json($json);
exit(0);

View File

@ -1,18 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use JSON;
my $json;
@{$json->{data}} = ();
my $status = get('http://127.0.0.1/server-status?auto');
if ($status){
push @{$json->{data}}, {"{#HTTPD_STATUS_URI}" => 'http://127.0.0.1/server-status'};
}
print to_json($json);
exit(0);

View File

@ -1,6 +1,6 @@
#!/usr/bin/perl -w
use Zabbix::Agent::Addons::LVM;
use Linux::LVM;
use JSON;
my $what = $ARGV[0];
@ -10,52 +10,49 @@ open STDERR, '>/dev/null';
my $json;
@{$json->{data}} = ();
my @vg = eval { get_volume_group_list() };
unless ($@){
if ($what eq "volumes"){
foreach my $group (@vg){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
$lv = ($lv =~ m!^/dev/$group!) ? $lv : "/dev/$group/$lv";
push @{$json->{data}}, { "{#LVMVOL}" => "$lv" };
}
}
}
elsif ($what eq "snapshots"){
foreach my $group (@vg){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
if (defined $lvs{$lv}->{allocated_to_snapshot}){
$lv = ($lv =~ m!^/dev/$group!) ? $lv : "/dev/$group/$lv";
push @{$json->{data}}, { "{#LVMSNAP}" => "$lv" };
if ($what eq "volumes"){
foreach my $group (get_volume_group_list()){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
$lv = ($lv =~ m!^/dev/$group!) ? $lv : "/dev/$group/$lv";
push @{$json->{data}}, { "{#LVMVOL}" => "$lv" };
}
}
}
}
elsif ($what eq "thin_pools"){
foreach my $group (@vg){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
if (defined $lvs{$lv}->{allocated_pool_data}){
$lv = ($lv =~ m!^/dev/$group!) ? $lv : "/dev/$group/$lv";
push @{$json->{data}}, { "{#LVMTHINP}" => "$lv" };
}
elsif ($what eq "snapshots"){
foreach my $group (get_volume_group_list()){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
if (defined $lvs{$lv}->{allocated_to_snapshot}){
$lv = ($lv =~ m!^/dev/$group!) ? $lv : "/dev/$group/$lv";
push @{$json->{data}}, { "{#LVMSNAP}" => "$lv" };
}
}
}
}
}
elsif ($what eq "groups"){
foreach my $group (@vg){
push @{$json->{data}}, { "{#LVMGRP}" => $group }; }
}
else{
}
elsif ($what eq "thin_pools"){
foreach my $group (get_volume_group_list()){
my %lvs = get_logical_volume_information($group);
foreach my $lv (keys %lvs){
if (defined $lvs{$lv}->{allocated_pool_data}){
$lv = ($lv =~ m!^/dev/$group!) ? $lv : "/dev/$group/$lv";
push @{$json->{data}}, { "{#LVMTHINP}" => "$lv" };
}
}
}
}
elsif ($what eq "groups"){
foreach my $group (get_volume_group_list()){
push @{$json->{data}}, { "{#LVMGRP}" => $group };
}
}
else{
print <<"EOF";
Usage: $0 [volumes|snapshots|thin_pools|groups]
Usage: $0 [volumes|snapshots|groups]
EOF
exit 1;
}
}
print to_json($json);
exit(0);

View File

@ -1,37 +0,0 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $mpath = 1;
my $pretty = 0;
my $json = [];
GetOptions(
"mpath" => \$mpath,
"pretty" => \$pretty
);
my $multipath = which('multipath');
if (not defined $multipath){
print to_json($json, { pretty => $pretty });
exit 0;
}
my @dev = qx($multipath -l -v1);
# If command failed (eg no /etc/multipath.conf), then return an empty result
if ($? ne 0){
print to_json($json, { pretty => $pretty });
exit 1;
}
foreach (@dev){
chomp;
push @{$json}, { '{#MPATH_DEV}' => $_ };
}
print to_json($json, { pretty => $pretty });
exit 0;

View File

@ -1,18 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use JSON;
my $json;
@{$json->{data}} = ();
my $status = get('http://127.0.0.1/nginx-status');
if ($status){
push @{$json->{data}}, {"{#NGINX_STATUS_URI}" => 'http://127.0.0.1/nginx-status'};
}
print to_json($json);
exit(0);

View File

@ -1,13 +1,15 @@
#!/usr/bin/perl -w
use JSON;
use Zabbix::Agent::Addons::UPS;
my $json;
@{$json->{data}} = ();
foreach my $ups (Zabbix::Agent::Addons::UPS::list_ups()){
push @{$json->{data}}, {"{#UPSNAME}" => $ups};
if (system("upsc -l >/dev/null 2>&1") == 0){
foreach my $ups (`upsc -l`){
chomp($ups);
push @{$json->{data}}, {"{#UPSNAME}" => $ups};
}
}
print to_json($json);
exit(0);

View File

@ -1,35 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $what = 'nodes';
my $pretty = 0;
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
my $pmgsh = which('pmgsh');
my $json = {};
@{$json->{data}} = ();
unless($pmgsh){
print to_json($json) . "\n";
exit 0;
}
if ($what eq 'domains'){
my $domains = from_json(qx($pmgsh get /config/domains 2>/dev/null));
foreach my $item (@{$domains}){
push @{$json->{data}}, {
'{#PMG_RELAY_DOMAIN}' => $item->{domain},
};
}
}
print to_json($json, { pretty => $pretty }) . "\n";

View File

@ -1,73 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
use Sys::Hostname;
my $what = 'nodes';
my $pretty = 0;
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
my $pvesh = which('pvesh');
my $json = {};
@{$json->{data}} = ();
unless($pvesh){
print to_json($json) . "\n";
exit 0;
}
# Are we using the new pvesh for which we have to specify the output format ?
my $pvesh_opt = (system("$pvesh get /version --output-format=json >/dev/null 2>&1") == 0) ? '--output-format=json' : '';
if ($what eq 'nodes'){
my $cluster_status = from_json(qx($pvesh get /cluster/status $pvesh_opt 2>/dev/null));
foreach my $item (@{$cluster_status}){
next if ($item->{type} ne 'node');
push @{$json->{data}}, {
'{#PVE_NODE_NAME}' => $item->{name},
'{#PVE_NODE_IP}' => $item->{ip},
'{#PVE_NODE_ID}' => $item->{nodeid},
'{#PVE_NODE_LOCAL}' => $item->{local}
};
}
} elsif ($what eq 'guests'){
my $guests = from_json(qx($pvesh get /cluster/resources --type=vm $pvesh_opt 2>/dev/null));
foreach my $guest (@{$guests}){
push @{$json->{data}}, {
'{#PVE_GUEST_ID}' => $guest->{vmid},
'{#PVE_GUEST_NODE}' => $guest->{node},
'{#PVE_GUEST_TYPE}' => $guest->{type},
'{#PVE_GUEST_NAME}' => $guest->{name},
'{#PVE_GUEST_TEMPLATE}' => $guest->{template}
};
}
} elsif ($what eq 'storage'){
my $stores = from_json(qx($pvesh get /storage $pvesh_opt 2>/dev/null));
foreach my $store (@{$stores}){
push @{$json->{data}}, {
'{#PVE_STOR_ID}' => $store->{storage},
'{#PVE_STOR_TYPE}' => $store->{type},
'{#PVE_STOR_STATUS}' => (($store->{disable}) ? 0 : 1),
'{#PVE_STOR_SHARED}' => ($store->{shared} || 0),
'{#PVE_STOR_CONTENT}' => $store->{content}
};
}
} elsif ($what eq 'pools'){
my $pools = from_json(qx($pvesh get /pools $pvesh_opt 2>/dev/null));
foreach my $pool (@{$pools}){
push @{$json->{data}}, {
'{#PVE_POOL_ID}' => $pool->{poolid},
'{#PVE_POOL_DESC}' => $pool->{comment}
};
}
}
print to_json($json, { pretty => $pretty }) . "\n";

View File

@ -1,31 +1,30 @@
#!/usr/bin/perl -w
use strict;
use File::Which;
use JSON;
my $json;
@{$json->{data}} = ();
my $cli = which('hpacucli') || which('ssacli');
my $hpacucli = '/usr/sbin/hpacucli';
# hpacucli or ssacli utility is needed
if (not defined $cli){
print to_json($json);
exit(0);
# the hpacucli utility is needed
unless (-x $hpacucli){
print to_json($json);
exit(0);
}
open( CLI, "$cli controller all show status|" )
or die "An error occured while running $cli: $!";
open( HPACUCLI, "$hpacucli controller all show status|" )
or die "An error occured while running $hpacucli: $!";
foreach my $line (<CLI>){
if ( $line =~ m/Another instance of hpacucli is running! Stop it first\./i ){
die "Another instance of hpacucli is running\n";
}
elsif ( $line =~ m/(.*) in Slot (\d+)/i ) {
push @{$json->{data}}, {"{#MODEL}" => $1, "{#SLOT}" => $2};
}
foreach my $line (<HPACUCLI>){
if ( $line =~ m/Another instance of hpacucli is running! Stop it first\./i ){
die "Another instance of hpacucli is running\n";
}
elsif ( $line =~ m/(.*) in Slot (\d+)/i ) {
push @{$json->{data}}, {"{#MODEL}" => $1, "{#SLOT}" => $2};
}
}
close CLI;
close HPACUCLI;
print to_json($json);
exit(0);

View File

@ -6,13 +6,11 @@ use JSON;
my $json;
@{$json->{data}} = ();
if (!-x '/usr/bin/systemd-detect-virt' || system('/usr/bin/systemd-detect-virt', '-qc') != 0){
open FILE, "< /proc/mdstat" or die "Can't open /proc/mdadm : $!";
foreach my $line (<FILE>) {
next unless ($line =~ m/^(md\d+)+\s*:/);
my ($md,undef,$status,$level) = split(/\ /, $line);
push @{$json->{data}}, {"{#DEVICE}" => $md, "{#STATUS}" => $status, "{#LEVEL}" => $level};
}
open FILE, "< /proc/mdstat" or die "Can't open /proc/mdadm : $!";
foreach my $line (<FILE>) {
next unless ($line =~ m/^(md\d+)+\s*:/);
my ($md,undef,$status,$level) = split(/\ /, $line);
push @{$json->{data}}, {"{#DEVICE}" => $md, "{#STATUS}" => $status, "{#LEVEL}" => $level};
}
print to_json($json);
exit(0);

View File

@ -1,34 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $what = 'ou';
my $pretty = 0;
my $json = [];
my $samba_tool = which('samba-tool');
if (not defined $samba_tool){
print $json;
exit 0;
}
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
if ($what eq 'ou'){
foreach (qx($samba_tool ou list)){
chomp;
push @{$json}, {
'{#SAMBA_OU}' => $_
}
}
}
print to_json($json, { pretty => $pretty });

View File

@ -1,41 +1,22 @@
#!/usr/bin/perl -w
use Config::Simple;
use Getopt::Long;
use JSON;
my $type = 'temp';
GetOptions(
"type:s" => \$type
);
# empty means temp
$type = ($type eq '') ? 'temp' : $type;
my $json;
@{$json->{data}} = ();
my $cfg = new Config::Simple;
$cfg->read('/etc/zabbix/sensors.ini');
$cfg->syntax('ini');
my %sensors = ();
foreach my $k (keys %{$cfg->vars}){
$k =~ s/\..*$//;
$sensors{$k} = 1 unless $sensors{$k};
}
open SENSORS, ('</etc/zabbix/sensors.conf') ||
die "Couldn't open /etc/zabbix/sensors.conf: $!\n";
foreach my $k (keys %sensors){
my $sensor = $cfg->get_block($k);
next if ($type ne 'all' && $type ne $sensor->{type});
push @{$json->{data}}, {
"{#SENSORNAME}" => $k,
"{#SENSORDESC}" => $sensor->{description},
"{#SENSORTHRESHIGH}" => $sensor->{threshold_high},
"{#SENSORTHRESLOW}" => $sensor->{threshold_low},
"{#SENSORTYPE}" => $sensor->{type},
"{#SENSORUNIT}" => $sensor->{unit}
};
}
foreach (<SENSORS>){
next unless (/^(\w+)(\s+)?=(\s+)?(.*)!(\-?\d+)!(\-?\d+)$/);
my ($sensor,$threshigh,$threslow) = ($1,$5,$6);
push @{$json->{data}}, {
"{#SENSORNAME}" => $sensor,
"{#SENSORTHRESHIGH}" => $threshigh,
"{#SENSORTHRESLOW}" => $threslow
};
}
close SENSORS;
print to_json($json);
exit(0);

View File

@ -2,14 +2,35 @@
use warnings;
use strict;
use Zabbix::Agent::Addons::Disks;
use JSON;
my $json;
@{$json->{data}} = ();
foreach my $block (Zabbix::Agent::Addons::Disks::list_smart_hdd({ skip_remouvable => 1 })){
push @{$json->{data}}, { "{#SMARTDRIVE}" => "/dev/$block" };
opendir(my $dh, "/sys/block") or die "Couldn't open /sys/block: $!";
my @blocks = grep { $_ !~ m/^\./ } readdir($dh);
closedir($dh);
foreach my $block (@blocks){
my $removable = 0;
my $size = 1;
# Skip block we already know they won't support SMART
next if ($block =~ m/^(ram|loop|md|dm\-)\d+/);
if ( -e "/sys/block/$block/removable"){
open REMOVABLE, "/sys/block/$block/removable";
$removable = join "", <REMOVABLE>;
close REMOVABLE;
chomp($removable);
next if ($removable eq '1');
}
if ( -e "/sys/block/$block/size"){
open SIZE, "/sys/block/$block/size";
$size = join "", <SIZE>;
close SIZE;
chomp($size);
next if ($size eq '0');
}
next unless (system("/usr/sbin/smartctl -A /dev/$block >/dev/null 2>&1") == 0);
push @{$json->{data}}, { "{#SMARTDRIVE}" => "/dev/$block" };
}
print to_json($json);
exit(0);

View File

@ -1,18 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use LWP::Simple;
use JSON;
my $json;
@{$json->{data}} = ();
my $status = get('http://127.0.0.1:3128/squid-internal-mgr/info');
if ($status){
push @{$json->{data}}, {"{#SQUID_STATUS_URI}" => 'http://127.0.0.1:3128/squid-internal-mgr/info'};
}
print to_json($json);
exit(0);

View File

@ -1,61 +0,0 @@
#!/usr/bin/perl
use strict;
use JSON;
use Getopt::Long;
use Data::Dumper;
use File::Which;
my $pretty = 0;
GetOptions(
'pretty' => \$pretty
);
my $smartctl = which('smartctl');
my $json = [];
sub print_out {
print to_json($json, { pretty => $pretty });
}
if (not defined $smartctl){
print_out();
exit 0;
}
my $smart_scan = from_json(qx($smartctl --scan-open --json=c));
if (not defined $smart_scan){
print_out();
exit 0;
}
foreach my $device (@{$smart_scan->{devices}}){
my ($model, $sn, $has_smart) = "";
my $smart_info = from_json(qx($smartctl -i $device->{name} -d $device->{type} --json=c));
if (defined $smart_info){
$model = $smart_info->{model_name};
$sn = $smart_info->{serial_number};
$has_smart = (
$smart_info->{in_smartctl_database} or (
defined $smart_info->{smart_support} and
$smart_info->{smart_support}->{available} and
$smart_info->{smart_support}->{enabled}
)
) ? 1 : 0;
}
push @{$json}, {
'{#STOR_DEV_NAME}' => $device->{name},
'{#STOR_DEV_DESC}' => $device->{info_name},
'{#STOR_DEV_TYPE}' => $device->{type},
'{#STOR_DEV_PROTO}' => $device->{protocol},
'{#STOR_DEV_MODEL}' => $model,
'{#STOR_DEV_SN}' => $sn,
'{#STOR_DEV_SMART}' => int $has_smart
};
}
print_out();

View File

@ -1,141 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Cookies;
use URI;
use Data::Dumper;
umask 077;
my $user = 'zabbix';
my $pass = 'secret';
my $site = 'default';
my $url = 'https://localhost:8443';
my $certcheck = 1;
my $what = 'devices';
my $type = 'all';
my $pretty = 0;
my $json = {};
@{$json->{data}} = ();
GetOptions (
'user=s' => \$user,
'password|p=s' => \$pass,
'site=s' => \$site,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'what=s' => \$what,
'type:s' => \$type,
'pretty' => \$pretty
);
# An empty type is the same as all
$type = 'all' if ($type eq '');
# If connecting to localhost, no need to check certificate
my $uri = URI->new($url);
if ($uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my $site_id;
my $resp;
my $username = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<);
my $cj = HTTP::Cookies->new(
file => "/tmp/.unifi_$username.txt",
autosave => 1,
ignore_discard => 1
);
my $sslopts = {};
if (not $certcheck){
$sslopts = { verify_hostname => 0, SSL_verify_mode => 0 }
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts,
cookie_jar => $cj
);
# Check if we need to login
$resp = $ua->get($url . '/api/self/sites');
if ($resp->is_error){
# Login on the API
$resp = $ua->post(
$url . '/api/login',
Content => to_json({ username => $user, password => $pass }),
Content_Type => 'application/json;charset=UTF-8'
);
die "Login failed: " . $resp->message . "\n" if $resp->is_error;
$resp = $ua->get($url . '/api/self/sites');
die $resp->message . "\n" if $resp->is_error;
}
foreach (@{from_json($resp->decoded_content)->{data}}){
if ($_->{name} eq $site || $_->{desc} eq $site){
$site_id = $_->{_id};
# If site is referenced by description, translate it to name
$site = $_->{name} if ($_->{name} ne $site);
last;
}
}
die "Site $site not found\n" unless ($site_id);
if ($what eq 'devices'){
$resp = $ua->get($url . '/api/s/' . $site . '/stat/device');
die $resp->message . "\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
next if ($type ne 'all' && $entry->{type} ne $type);
push @{$json->{data}}, {
'{#UNIFI_DEV_ID}' => $entry->{device_id},
'{#UNIFI_DEV_ADOPTED}' => $entry->{adopted},
'{#UNIFI_DEV_MODEL}' => $entry->{model},
'{#UNIFI_DEV_NAME}' => $entry->{name} || $entry->{mac},
'{#UNIFI_DEV_MAC}' => $entry->{mac},
'{#UNIFI_DEV_TYPE}' => $entry->{type}
};
}
} elsif ($what eq 'stations'){
$resp = $ua->get($url . '/api/s/' . $site . '/stat/sta');
die $resp->message . "\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
# Ignore other sites
next if ($entry->{site_id} ne $site_id);
next if ($type eq 'wireless' and $entry->{is_wired} == JSON::true);
next if ($type eq 'wired' and $entry->{is_wired} == JSON::false);
push @{$json->{data}}, {
'{#UNIFI_STA_ID}' => $entry->{_id},
'{#UNIFI_STA_NAME}' => (defined $entry->{hostname}) ? $entry->{hostname} : $entry->{mac},
'{#UNIFI_STA_MAC}' => $entry->{mac}
};
}
} elsif ($what eq 'networks'){
$resp = $ua->get($url . '/api/s/' . $site . '/rest/networkconf');
die $resp->message . "\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
# Ignore other sites
next if ($entry->{site_id} ne $site_id);
next if ($type ne 'all' and $entry->{purpose} ne $type);
push @{$json->{data}}, {
'{#UNIFI_NET_ID}' => $entry->{_id},
'{#UNIFI_NET_NAME}' => $entry->{name}
};
}
} elsif ($what eq 'wlan') {
$resp = $ua->get($url . '/api/s/' . $site . '/rest/wlanconf');
die $resp->message . "\n" if $resp->is_error;
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
push @{$json->{data}}, {
'{#UNIFI_WLAN_ID}' => $entry->{_id},
'{#UNIFI_WLAN_NAME}' => $entry->{name}
};
}
}
print to_json($json, { pretty => $pretty });

View File

@ -1,31 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use File::Which;
use Getopt::Long;
my $json;
@{$json->{data}} = ();
my $what = 'volumes';
my $pretty = 0;
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
my $vdostats = which('vdostats');
if (defined $vdostats) {
foreach my $line (qx($vdostats)) {
if ($line =~ m|^/dev/mapper/([^\s]+)|) {
push @{$json->{data}}, {
'{#VDO_VOL}' => $1
};
}
}
}
print to_json($json, { pretty => $pretty }) . "\n";

View File

@ -1,77 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use JSON;
use File::Which;
use Getopt::Long;
my $json;
@{$json->{data}} = ();
my $zpool = which('zpool');
my $zfs = which('zfs');
my $sanoid = which('sanoid');
if (not $zpool or not $zfs){
print 'ZBX_NOTSUPPOTED';
exit(0);
}
my $pools = 1;
my $fs = 0;
my $zvol = 0;
my $snap = 0;
my $sanoidmon = 0;
my $arcstats = 0;
my $pretty = 0;
GetOptions(
"pools" => \$pools,
"fs|filesystems" => \$fs,
"zvols|volumes" => \$zvol,
"snapshots" => \$snap,
"sanoid" => \$sanoidmon,
"arcstats" => \$arcstats,
"pretty" => \$pretty
);
if ($fs or $zvol or $snap or $sanoidmon or $arcstats){
$pools = 0;
}
if ($pools + $fs + $zvol + $snap + $sanoidmon + $arcstats != 1){
die "One and only one type of discovery should be provided\n";
}
if ($sanoidmon and not $sanoid){
print to_json($json);
exit 0;
}
if ($pools){
foreach (qx($zpool list -H -o name)){
chomp;
push @{$json->{data}}, { '{#ZPOOL}' => $_ };
}
} elsif ($fs){
foreach (qx($zfs list -H -o name -t filesystem)){
chomp;
push @{$json->{data}}, { '{#ZFS_FS}' => $_ };
}
} elsif ($zvol){
foreach (qx($zfs list -H -o name -t volume)){
chomp;
push @{$json->{data}}, { '{#ZFS_ZVOL}' => $_ };
}
} elsif ($snap){
foreach (qx($zfs list -H -o name -t snap)){
chomp;
# Remove @ as they are not allowed in item key names
# They will be converted back to @ by check_zfs script
$_ =~ s/\@/%40/g;
push @{$json->{data}}, { '{#ZFS_SNAP}' => $_ };
}
} elsif ($sanoidmon){
push @{$json->{data}}, { '{#ZFS_SANOID}' => $_ } foreach (qw(snapshot));
} elsif ($arcstats){
push @{$json->{data}}, { '{#ZFS_STATS}' => 'arcstats' };
}
print to_json($json, { pretty => $pretty });

View File

@ -1,63 +0,0 @@
#!/usr/bin/perl -w
use JSON;
use POSIX;
use Getopt::Long;
use Net::Domain qw(hostfqdn);
use Data::Dumper;
my $json = [];
my $pretty = 0;
my $services = 1;
my $servers = 0;
GetOptions(
"pretty" => \$pretty,
"services" => \$services,
"servers" => \$servers
);
if ($servers) {
$services = 0;
}
my $uid = getuid();
my $gid = getgid();
my (undef,undef,$zimuid,$zimgid) = getpwnam('zimbra');
my $zmprov = '/opt/zimbra/bin/zmprov';
my $hostname = hostfqdn();
# If there's no zimbra user or no zmcontrol, just return an empty list
if (not defined $zimuid or not defined $zimgid or not -e $zmprov){
print to_json($json);
exit;
}
# Switch to Zimbra user
setuid($zimuid) if ($uid ne $zimuid);
setgid($zimgid) if ($gid ne $zimgid);
if ($services){
# zmconfigd is always enabled and should be running
push @{$json}, {
'{#ZM_SERVICE}' => 'zmconfigd'
};
foreach my $service (qx($zmprov getServer $hostname zimbraServiceEnabled)){
if ($service =~ m/^zimbraServiceEnabled:\s+(\w+)/){
push @{$json}, {
'{#ZM_SERVICE}' => $1
};
}
}
} elsif ($servers){
foreach my $server (qx($zmprov getAllServers)){
chomp $server;
push @{$json}, {
'{#ZM_SERVER}' => $server
};
}
}
print to_json($json, { pretty => $pretty });

View File

@ -1,47 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use Config::Simple '-strict';
use JSON;
my $old = shift;
$old ||= '/etc/zabbix/sensors.conf';
my $new = '/etc/zabbix/sensors.ini';
my $sensors = {};
my $units = {
temp => '°C',
fan => 'rpm',
power => 'W'
};
open OLDSENSORS, ("<$old") ||
die "Couldn't open $old: $!\n";
foreach (<OLDSENSORS>){
next unless (/^(\w+)(\s+)?=(\s+)?(.*)!(\-?\d+)!(\-?\d+)(!(\w+))?$/);
my ($sensor,$cmd,$threshigh,$threslow,$type) = ($1,$4,$5,$6,$8);
$type ||= 'temp';
$sensors->{$sensor} = {
description => $sensor,
cmd => $cmd,
threshold_high => $threshigh,
threshold_low => $threslow,
type => $type,
unit => $units->{$type}
};
}
my $cfg = new Config::Simple(syntax => 'ini');
foreach my $k (keys %$sensors){
$cfg->set_block($k, $sensors->{$k});
}
$cfg->write($new);
rename $old, $old . '.bak';

View File

@ -1,403 +0,0 @@
#!/usr/bin/perl -w
use strict;
use warnings;
use Config::Simple '-strict';
use Getopt::Long;
use File::Which;
use File::Basename;
use Zabbix::Agent::Addons::Disks;
use Zabbix::Agent::Addons::UPS;
# Output file
my $output = undef;
# When a threshold can be automatically detected,
# you may want to be notified before it's reached, so you can
# set a margin which will be substracted from the real threshold
my $temp_margin = '20';
my $temp_hd_margin = '10';
my $pwr_margin = '200';
my $pwr_rel_margin = '20';
# This value will be substracted from the higher threshold to define the low one
# so you can have hysteresis to prevent flip-flop
my $temp_hyst = '10';
my $temp_hd_hyst = '5';
my $temp_ups_hyst = '5';
my $pwr_hyst = '200';
my $pwr_rel_hyst = '10';
# Default threshold if not detected
my $def_temp_thres_high = '50';
my $def_hd_temp_thres_high = '50';
my $def_ups_temp_thres_high = '40';
my $def_fan_thres_high = '1000';
my $def_fan_thres_low = '700';
my $def_pwr_thres_high = '1000';
my $def_pwr_rel_thres_high = '80';
GetOptions(
"output=s" => \$output,
"temp-margin=i" => \$temp_margin,
"temp-hd-margin=i" => \$temp_hd_margin,
"pwr-margin=i" => \$pwr_margin,
"pwr-rel-margin=i" => \$pwr_rel_margin,
"temp-hyst=i" => \$temp_hyst,
"temp-hd-hyst=i" => \$temp_hd_hyst,
"temp-ups-hyst=i" => \$temp_ups_hyst,
"pwr-hyst=i" => \$pwr_hyst,
"pwr-rel-hys=i" => \$pwr_rel_hyst
);
sub usage(){
print<<"_EOF";
Usage: $0 --output=/etc/zabbix/sensors.ini
_EOF
}
unless ($output){
usage();
exit 1;
}
# Path
my $ipmitool = which('ipmitool');
my $smartctl = which('smartctl');
my $lmsensor = which('sensors');
my $upsc = which('upsc');
my $cfg = new Config::Simple(syntax => 'ini');
my $sensors = {};
# Try to detect IPMI sensors
if ($ipmitool && -x $ipmitool){
# First check for temperature sensors
my @lines = qx($ipmitool sdr type Temperature 2>/dev/null);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
# Looks like
# Inlet Temp | 04h | ok | 7.1 | 25 degrees C
if ($l !~ m/^(\w+[\s\w\/\-]+?\w+)\s*\|.*\|\s*([\w\.\s]+)\s*\|.*\|\s*([\-\w\.\s]+)$/){
next SENSOR;
}
my $name = $1;
my $sensor = {};
my @details = qx($ipmitool sdr get '$name' 2>/dev/null);
if ($? != 0){
print "Couldn't get detail for sensor $name\n";
next SENSOR;
}
my $val = undef;
foreach my $d (@details){
chomp $d;
if ($d =~ m/^\s*Sensor\sReading\s*:\s*(\-?\w+)/){
$val = $1;
print "Sensor $name has value: $val\n";
if ($val !~ m/^\-?\d+$/){
print "Skipping sensor $name, couldn't parse its value: $val\n";
next SENSOR;
}
}
elsif ($d =~ m/^\s*Upper\scritical\s*:\s*(\-?\d+(\.\d+))/){
$sensor->{threshold_high} = $1-$temp_margin;
}
elsif ($d =~ m/^\s*Upper\snon\-critical\s*:\s*(\-?\d+(\.\d+))/){
$sensor->{threshold_low} = $1-$temp_margin;
}
}
# Another loop to check for Normal max if Upper critical wasn't found
if (!$sensor->{threshold_high}){
foreach my $d (@details){
chomp $d;
if ($d =~ m/^\s*Normal\sMaximum\s*:\s*(\-?\d+(\.\d+))/){
$sensor->{threshold_high} = $1-$temp_margin;
}
}
}
next SENSOR unless $val;
$sensor->{threshold_low} ||= ($sensor->{threshold_high}) ? $sensor->{threshold_high}-$temp_hyst : $def_temp_thres_high-$temp_hyst;
$sensor->{threshold_high} ||= $def_temp_thres_high;
$sensor->{threshold_high} =~ s/\.0+$//;
$sensor->{threshold_low} =~ s/\.0+$//;
$sensor->{description} = $name;
$sensor->{type} = 'temp';
$sensor->{unit} = '°C';
$sensor->{cmd} = "$ipmitool sdr get '$name' 2>/dev/null | perl -ne 'if (/Sensor Reading\\s*:\\s*([^\\s]+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a temperature sensor using IPMI: $name\n";
}
}
# Now check for Fan, nearly the same as Temp, but
# * We try to detect the unit
# * threshold handling is not the same
@lines = qx($ipmitool sdr type Fan 2>/dev/null);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
if ($l !~ m/^(\w+[\s\w]+?\w+)\s*\|.*\|\s*([\w\.\s]+)\s*\|.*\|\s*([\-\w\.\s]+)$/){
next SENSOR;
}
my $name = $1;
my $value = $3;
my $sensor = {};
my @details = qx($ipmitool sdr get '$name' 2>/dev/null);
if ($? != 0){
print "Couldn't get detail for sensor $name\n";
next SENSOR;
}
my $val = undef;
foreach my $d (@details){
chomp $d;
if ($d =~ m/^\s*Sensor\sReading\s*:\s*(\d+(\.\d+)?)/){
$val = $1;
if ($val !~ m/^\d+(\.\d+)?$/){
print "Skipping sensor $name, couldn't parse its value: $val\n";
next SENSOR;
}
}
elsif ($d =~ m/^\s*Lower\scritical\s*:\s*(\d+(\.\d+))/){
$sensor->{threshold_low} = $1-$temp_margin;
}
elsif ($d =~ m/^\s*Lower\snon\-critical\s*:\s*(\d+(\.\d+))/){
$sensor->{threshold_high} = $1-$temp_margin;
}
}
next SENSOR unless $val;
$sensor->{threshold_high} ||= $def_fan_thres_high;
$sensor->{threshold_low} ||= $def_fan_thres_high-$temp_hyst;
$sensor->{threshold_high} =~ s/\.0+$//;
$sensor->{threshold_low} =~ s/\.0+$//;
$sensor->{description} = $name;
$sensor->{type} = 'fan';
$sensor->{unit} = ($value =~ m/percent|%/ || $val < 100) ? '%' : 'rpm';
$sensor->{cmd} = "$ipmitool sdr get '$name' 2>/dev/null | perl -ne 'if (/Sensor Reading\\s*:\\s*([^\\s]+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a fan sensor using IPMI: $name\n";
}
}
# Now look for power information
@lines = qx($ipmitool sdr type 'Current' 2>/dev/null);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
if ($l !~ m/^(\w+[\s\w]+?\w+(\s%)?)\s*\|.*\|\s*([\w\.\s]+)\s*\|.*\|\s*([\-\w\.\s]+)$/){
print "Skiping line $l\n";
next SENSOR;
}
my $name = $1;
my $value = $4;
my $sensor = {};
if ($name =~ m/(Power)|(Pwr)|(Consumption)|(PS\d+\sCurr\sOut)/i || $value =~ m/W(att)?/i){
my @details = qx($ipmitool sdr get '$name' 2>/dev/null);
if ($? != 0){
print "Couldn't get detail for sensor $name\n";
next SENSOR;
}
my $val = undef;
my $unit = ($name =~ m/%/) ? '%' : 'Watt';
foreach my $d (@details){
chomp $d;
if ($d =~ m/^\s*Sensor\sReading\s*:\s*(\w+)/){
$val = $1;
if ($val !~ m/^\d+$/){
print "Skipping sensor $name, couldn't parse its value: $val\n";
next SENSOR;
}
}
elsif ($d =~ m/^\s*Upper\scritical\s*:\s*(\d+(\.\d+)?)/){
$sensor->{threshold_high} = ($unit eq '%') ? $1-$pwr_rel_margin : $1-$pwr_margin;
}
elsif ($d =~ m/^\s*Upper\snon\-critical\s*:\s*(\d+(\.\d+)?)/){
$sensor->{threshold_low} = ($unit eq '%') ? $1-$pwr_rel_margin : $1-$pwr_margin;
}
}
next SENSOR unless $val;
$sensor->{threshold_high} ||= ($unit eq '%') ? $def_pwr_rel_thres_high : $def_pwr_thres_high;
$sensor->{threshold_low} ||= ($unit eq '%') ? $def_pwr_rel_thres_high-$pwr_rel_hyst : $def_pwr_thres_high-$pwr_hyst;
$sensor->{threshold_high} =~ s/\.0+$//;
$sensor->{threshold_low} =~ s/\.0+$//;
$sensor->{description} = $name;
$sensor->{type} = 'power';
$sensor->{unit} = ($name =~ m/%/) ? '%' : 'Watt';
$sensor->{cmd} = "$ipmitool sdr get '$name' 2>/dev/null | perl -ne 'if (/Sensor Reading\\s*:\\s*([^\\s]+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a power sensor using IPMI: $name\n";
}
}
}
}
# Try to detect lm_sensors, using the sensors command
if ($lmsensor && -x $lmsensor){
my @lines = qx($lmsensor);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
# Looks like
# temp1: +27.8°C (crit = +119.0°C)
# or
# Core 0: +36.0°C (high = +80.0°C, crit = +100.0°C)
if ($l !~ m/^(\w+[\s\w]+?):\s*\+?(\d+)(\.\d+)?°C\s*(.*)$/){
next SENSOR;
}
my $name = $1;
my $val = $2;
my $thr = $4;
my $sensor = {};
if ($val !~ m/^\-?\d+$/){
print "Skipping sensor $name, couldn't parse its value: $val\n";
next SENSOR;
}
if ($name =~ m/^Core\s+\d+/){
print "Skipping individual core sensor $name\n";
next SENSOR;
}
if ($thr =~ m/high\s+=\s+\+(\d+(\.\d+)?)/){
$sensor->{threshold_high} = $1;
}
elsif ($thr =~ m/^crit\s+=\s+\+(\d+(\.\d+)?)/){
$sensor->{threshold_high} = $1 - $temp_margin;
}
next SENSOR unless $val;
$sensor->{threshold_low} ||= ($sensor->{threshold_high}) ? $sensor->{threshold_high}-$temp_hyst : $def_temp_thres_high-$temp_hyst;
$sensor->{threshold_high} ||= $def_temp_thres_high;
$sensor->{threshold_high} =~ s/\.0+$//;
$sensor->{threshold_low} =~ s/\.0+$//;
$sensor->{description} = $name;
$sensor->{type} = 'temp';
$sensor->{unit} = '°C';
$sensor->{cmd} = "$lmsensor | perl -ne 'if (/^$name:\\s*\\+(\\d+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a temperature sensor using lm_sensors: $name\n";
}
}
}
# Now, try to detect smart capable HDD
if ($smartctl && -x $smartctl){
foreach my $block (Zabbix::Agent::Addons::Disks::list_smart_hdd({ skip_remouvable => 1 })){
my @lines = qx($smartctl -A /dev/$block);
next if ($? != 0);
foreach my $l (@lines){
if ($l =~ /(Temperature_Celsius|Airflow_Temperature_Cel)/){
$sensors->{$block} = {
description => "$block temperature",
threshold_low => $def_hd_temp_thres_high-$temp_hd_hyst,
threshold_high => $def_hd_temp_thres_high,
type => 'temp',
unit => '°C',
cmd => "$smartctl -A /dev/$block | perl -ne 'if (/Temperature_Celsius(\\s+[^\\s]+){7}\\s+(\\d+(\\.\\d+)?)/) { print \"\$2\\n\"; last }'"
};
print "Found a temperature sensor using smartctl: $block\n";
last;
}
# Format found on some NVMe SSD
elsif ($l =~ /Temperature:\s+(\d+(\.\d+)?)\sCelsius/){
$sensors->{$block} = {
description => "$block temperature",
threshold_low => $def_hd_temp_thres_high-$temp_hd_hyst,
threshold_high => $def_hd_temp_thres_high,
type => 'temp',
unit => '°C',
cmd => "$smartctl -A /dev/$block | perl -ne 'if (/Temperature:\\s+(\\d+(\\.\\d+)?)/) { print \"\$1\\n\"; last }'"
};
print "Found a temperature sensor using smartctl: $block\n";
last;
}
}
}
# Some LSI based hardware RAID controller can report HDD temp
if (-e '/dev/megaraid_sas_ioctl_node'){
# Only check for the firsts 26 drives
foreach my $i (0..25){
my @res = qx($smartctl -d megaraid,$i -A /dev/sda);
next if ($? != 0);
foreach my $l (@res){
if ($l =~ m/Drive\sTrip\sTemperature:\s+(\d+)\s/){
$sensors->{'sda-' . $i} = {
description => "Temperature for disk No $i on sda",
type => 'temp',
threshold_high => $1-$temp_hd_margin,
threshold_low => $1-$temp_hd_margin-$temp_hd_hyst,
unit => '°C',
cmd => "$smartctl -A -d megaraid,$i /dev/sda | grep 'Current Drive Temperature' | awk '{print \$4}'"
};
print "Found a temperature sensor using smartctl (megaraid): sda-$i\n";
last;
}
elsif ($l =~ /(Temperature_Celsius|Airflow_Temperature_Cel)/){
$sensors->{'sda-' . $i} = {
description => "Temperature for disk No $i on sda",
threshold_low => $def_hd_temp_thres_high-$temp_hd_hyst,
threshold_high => $def_hd_temp_thres_high,
type => 'temp',
unit => '°C',
cmd => "$smartctl -A -d megaraid,$i /dev/sda | perl -ne 'if (/(Temperature_Celsius|Airflow_Temperature_Cel)(\\s+[^\\s]+){7}\\s+(\\d+)/) { print \"\$3\\n\"; last }'"
};
print "Found a temperature sensor using smartctl (megaraid): sda-$i\n";
last;
}
}
}
}
}
# Now check UPS
if ($upsc && -x $upsc){
foreach my $ups (Zabbix::Agent::Addons::UPS::list_ups()){
my @lines = qx($upsc $ups);
next if ($? != 0);
foreach my $l (@lines){
if ($l =~ m/^ups\.temperature:\s+(\d+(\.\d+)?)/){
$sensors->{'ups_' . lc $ups . '_temp'} = {
description => "ups temperature for $ups",
type => 'temp',
threshold_high => $def_ups_temp_thres_high,
threshold_low => $def_ups_temp_thres_high-$temp_ups_hyst,
unit => '°C',
cmd => "$upsc $ups ups.temperature 2>/dev/null"
};
print "Found a temperature sensor for ups $ups\n";
last;
}
elsif ($l =~ m/^ups\.load:\s+(\d+(\.\d+)?)/){
$sensors->{'ups_' . lc $ups . '_load'} = {
description => "ups load for $ups",
type => 'power',
threshold_high => $def_pwr_rel_thres_high,
threshold_low => $def_pwr_rel_thres_high-$pwr_rel_hyst,
unit => '%',
cmd => "$upsc $ups ups.load 2>/dev/null"
};
}
}
}
}
# TODO: add support for lm sensors, but its ouput is harder to parse
foreach my $s (sort keys %$sensors){
$cfg->set_block($s, $sensors->{$s});
}
$cfg->write($output);
# Take a sensor description and return a suitable string as sensor ID
sub sensor_name{
my $desc = shift;
my $id = lc $desc;
$id =~ s/[^\w]/_/g;
$id =~ s/%/percent/g;
$id =~ s/_rpm$//;
return $id;
}

View File

@ -1,27 +0,0 @@
#!/usr/bin/perl -w
use JSON;
# Ensure data are fresh in the cache for Zabbix to pick them quickly
# You can add this script in a cron job every 5 min for example, so the cache will always be fresh
# for Zabbix Agent can run check_pve_sudo quickly
qx(/var/lib/zabbix/bin/check_pve_sudo --cluster --cache=120);
my $nodes = from_json(qx(/var/lib/zabbix/bin/disco_pve_sudo --what=nodes));
foreach my $node (@{$nodes->{data}}){
qx(/var/lib/zabbix/bin/check_pve_sudo --node=$node->{'{#PVE_NODE_NAME}'} --cache=120);
}
my $pools = from_json(qx(/var/lib/zabbix/bin/disco_pve_sudo --what=pools));
foreach my $pool (@{$pools->{data}}){
qx(/var/lib/zabbix/bin/check_pve_sudo --pool=$pool->{'{#PVE_POOL_ID}'} --cache=120);
}
my $storages = from_json(qx(/var/lib/zabbix/bin/disco_pve_sudo --what=storage));
foreach my $stor (@{$storages->{data}}){
qx(/var/lib/zabbix/bin/check_pve_sudo --storage=$stor->{'{#PVE_STOR_ID}'} --cache=120);
}
my $guests = from_json(qx(/var/lib/zabbix/bin/disco_pve_sudo --what=guests));
foreach my $guest (@{$guests->{data}}){
qx(/var/lib/zabbix/bin/check_pve_sudo --guest=$guest->{'{#PVE_GUEST_ID}'} --cache=120);
}

View File

@ -1,923 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-09-22T16:02:43Z</date>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Template_App_BackupPC</template>
<name>Template_App_BackupPC</name>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<items>
<item>
<name>BackupPC: General info</name>
<key>backuppc.general</key>
<delay>15m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
</item>
<item>
<name>BackupPC: Total number of backups</name>
<type>DEPENDENT</type>
<key>backuppc.general[bkp]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>!backup(s)</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.bkp</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.general</key>
</master_item>
</item>
<item>
<name>BackupPC: Total full size</name>
<type>DEPENDENT</type>
<key>backuppc.general[full_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.full_size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.general</key>
</master_item>
</item>
<item>
<name>BackupPC: Total history size</name>
<type>DEPENDENT</type>
<key>backuppc.general[history_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.history_size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.general</key>
</master_item>
</item>
<item>
<name>BackupPC: Total number of hosts</name>
<type>DEPENDENT</type>
<key>backuppc.general[hosts]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>!host(s)</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.hosts</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.general</key>
</master_item>
</item>
<item>
<name>BackupPC: Global perf score</name>
<type>DEPENDENT</type>
<key>backuppc.general[perf]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.perf</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.general</key>
</master_item>
</item>
<item>
<name>BackupPC: Global compression ratio</name>
<type>DEPENDENT</type>
<key>backuppc.general[ratio]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.ratio</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.general</key>
</master_item>
</item>
<item>
<name>BackupPC: Total size</name>
<type>DEPENDENT</type>
<key>backuppc.general[total_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.total_size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.general</key>
</master_item>
</item>
<item>
<name>Number of BackupPC processes</name>
<key>proc.num[,backuppc,,BackupPC]</key>
<delay>30m</delay>
<history>60d</history>
<trends>0</trends>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<request_method>POST</request_method>
</item>
<item>
<name>Number of raidsync processes</name>
<key>proc.num[,root,,BackupPC_raidsync]</key>
<delay>10m</delay>
<history>60d</history>
<trends>0</trends>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<request_method>POST</request_method>
<triggers>
<trigger>
<expression>{sum({$EXT_BACKUPS})}&lt;1 and {$EXT_BACKUPS} &gt; 0</expression>
<name>No recent copy on external support</name>
<priority>WARNING</priority>
</trigger>
</triggers>
</item>
</items>
<discovery_rules>
<discovery_rule>
<name>BackupPC: Entity discovery</name>
<key>backuppc.entity.discovery[]</key>
<delay>2h</delay>
<item_prototypes>
<item_prototype>
<name>BackupPC: Number of backups for {#BPC_ENTITY}</name>
<type>DEPENDENT</type>
<key>backuppc.entity[{#BPC_ENTITY},bkp]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.bkp</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.entity[{#BPC_ENTITY}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: Sum of last full sizes for {#BPC_ENTITY}</name>
<type>DEPENDENT</type>
<key>backuppc.entity[{#BPC_ENTITY},full_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>o</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.full_size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.entity[{#BPC_ENTITY}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: Sum of history sizes for {#BPC_ENTITY}</name>
<type>DEPENDENT</type>
<key>backuppc.entity[{#BPC_ENTITY},history_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>o</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.history_size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.entity[{#BPC_ENTITY}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: Number of backed up hosts for {#BPC_ENTITY}</name>
<type>DEPENDENT</type>
<key>backuppc.entity[{#BPC_ENTITY},hosts]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.hosts</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.entity[{#BPC_ENTITY}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: Performance indicator for {#BPC_ENTITY}</name>
<type>DEPENDENT</type>
<key>backuppc.entity[{#BPC_ENTITY},perf]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!h/j</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.perf</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.entity[{#BPC_ENTITY}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: Compression ratio for {#BPC_ENTITY}</name>
<type>DEPENDENT</type>
<key>backuppc.entity[{#BPC_ENTITY},ratio]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.ratio</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.entity[{#BPC_ENTITY}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: Total backups size for {#BPC_ENTITY}</name>
<type>DEPENDENT</type>
<key>backuppc.entity[{#BPC_ENTITY},size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>o</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.entity[{#BPC_ENTITY}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: Info for entity {#BPC_ENTITY}</name>
<key>backuppc.entity[{#BPC_ENTITY}]</key>
<delay>1h</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
</item_prototype>
</item_prototypes>
<graph_prototypes>
<graph_prototype>
<name>BackupPC: Entity {#BPC_ENTITY}: Compression ratio and perf</name>
<ymin_type_1>FIXED</ymin_type_1>
<graph_items>
<graph_item>
<drawtype>GRADIENT_LINE</drawtype>
<color>43A047</color>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.entity[{#BPC_ENTITY},size]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<drawtype>BOLD_LINE</drawtype>
<color>FF0000</color>
<yaxisside>RIGHT</yaxisside>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.entity[{#BPC_ENTITY},ratio]</key>
</item>
</graph_item>
<graph_item>
<sortorder>2</sortorder>
<drawtype>DASHED_LINE</drawtype>
<color>0040FF</color>
<yaxisside>RIGHT</yaxisside>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.entity[{#BPC_ENTITY},perf]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
<graph_prototype>
<name>BackupPC: Entity {#BPC_ENTITY}: Full / History sizes</name>
<type>STACKED</type>
<graph_items>
<graph_item>
<color>4000FF</color>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.entity[{#BPC_ENTITY},full_size]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<color>4DD0E1</color>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.entity[{#BPC_ENTITY},history_size]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
</graph_prototypes>
</discovery_rule>
<discovery_rule>
<name>BackupPC: Hosts discovery</name>
<key>backuppc.host.discovery[]</key>
<delay>2h;50s/1-7,00:00-24:00</delay>
<filter>
<evaltype>OR</evaltype>
</filter>
<lifetime>15d</lifetime>
<item_prototypes>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Last backup age</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},age]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>s</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.age</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: Info de {#BPCHOST}</name>
<key>backuppc.host[{#BPCHOST},all]</key>
<delay>30m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Number of backups</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},bkp]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>!backups</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.bkp</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackuPPC: {#BPCHOST}: Compression ratio of last backup</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},comp_ratio]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.comp_ratio</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Last backup duration</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},duration]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>s</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.duration</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Backups status</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},enabled]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.enabled</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Number of errors on last backup</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},errors]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>!errors</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.errors</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Last full backup size</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},full_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.full_size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: History size</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},history_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.history_size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: New file size in last backup</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},new_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.new_size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Average new sizes</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},new_size_avg]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.new_size_avg</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Median for new files sizes</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},new_size_median]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.new_size_median</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: First quartile of new sizes</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},new_size_q1]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.new_size_q1</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Third quartile of new sizes</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},new_size_q3]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.new_size_q3</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>BackupPC: {#BPCHOST}: Total backups size</name>
<type>DEPENDENT</type>
<key>backuppc.host[{#BPCHOST},total_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
<name>BackupPC</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.total_size</params>
</step>
</preprocessing>
<master_item>
<key>backuppc.host[{#BPCHOST},all]</key>
</master_item>
</item_prototype>
</item_prototypes>
<trigger_prototypes>
<trigger_prototype>
<expression>{Template_App_BackupPC:backuppc.host[{#BPCHOST},age].last(0)}&gt;{#BPCNOBACKUPWARNING}*24*3600 and {Template_App_BackupPC:backuppc.host[{#BPCHOST},enabled].last()}=1</expression>
<name>No backup for {#BPCHOST} since {ITEM.LASTVALUE1}</name>
<priority>WARNING</priority>
</trigger_prototype>
<trigger_prototype>
<expression>{Template_App_BackupPC:backuppc.host[{#BPCHOST},enabled].last()}=1 and {Template_App_BackupPC:backuppc.host[{#BPCHOST},bkp].last()}&gt;4 and {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].last()} &gt; ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q3].last()} + ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q3].last()} - {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q1].last()}) * 1.5) and {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].last()} &gt; {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_avg].last()}*{#BPC_TOO_BIG_FACTOR} and {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].dayofweek()} &lt;&gt; 7 and {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].dayofweek()} &lt;&gt; 1 and ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].dayofweek()} &lt;&gt; 2 or {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].time()} &gt; {$BPC_SIZE_WARN_FROM_HOUR})</expression>
<recovery_mode>RECOVERY_EXPRESSION</recovery_mode>
<recovery_expression>{Template_App_BackupPC:backuppc.host[{#BPCHOST},enabled].last()}=0 or {Template_App_BackupPC:backuppc.host[{#BPCHOST},bkp].last()}&lt;=4 or {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].last()} &lt;= ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q3].last()} + ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q3].last()} - {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q1].last()}) * 1.5) or {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].last()} &lt;= {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_avg].last()}*{#BPC_TOO_BIG_FACTOR}</recovery_expression>
<name>Suspiciously big ({ITEM.VALUE3}) backup for {#BPCHOST}</name>
<priority>INFO</priority>
</trigger_prototype>
<trigger_prototype>
<expression>{#BPC_TOO_SMALL_FACTOR}&gt;0 and {Template_App_BackupPC:backuppc.host[{#BPCHOST},enabled].last()}=1 and {Template_App_BackupPC:backuppc.host[{#BPCHOST},bkp].last()}&gt;4 and {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].last()} &lt; ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q1].last()} - ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q3].last()} - {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q1].last()}) * 1.5) and {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].last()} &lt; {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_avg].last()}/{#BPC_TOO_SMALL_FACTOR} and {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].dayofweek()} &lt;&gt; 6 and {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].dayofweek()} &lt;&gt; 7 and {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].dayofweek()} &lt;&gt; 1 and ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].dayofweek()} &lt;&gt; 2 or {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].time()} &gt; {$BPC_SIZE_WARN_FROM_HOUR})</expression>
<recovery_mode>RECOVERY_EXPRESSION</recovery_mode>
<recovery_expression>{#BPC_TOO_SMALL_FACTOR}&lt;0 or {Template_App_BackupPC:backuppc.host[{#BPCHOST},enabled].last()}=0 or {Template_App_BackupPC:backuppc.host[{#BPCHOST},bkp].last()}&lt;=4 or {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].last()} &gt;= ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q1].last()} - ({Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q3].last()} - {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_q1].last()}) * 1.5) or {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size].last()} &gt;= {Template_App_BackupPC:backuppc.host[{#BPCHOST},new_size_avg].last()}/{#BPC_TOO_SMALL_FACTOR}</recovery_expression>
<name>Suspiciously small ({ITEM.VALUE3}) backups for {#BPCHOST}</name>
<priority>WARNING</priority>
</trigger_prototype>
<trigger_prototype>
<expression>{Template_App_BackupPC:backuppc.host[{#BPCHOST},errors].last(0)}&gt;{#BPCMAXERROR} and {Template_App_BackupPC:backuppc.host[{#BPCHOST},enabled].last()}=1</expression>
<name>{ITEM.LASTVALUE1} (xfer) in last backup of {#BPCHOST}</name>
<priority>WARNING</priority>
</trigger_prototype>
</trigger_prototypes>
<graph_prototypes>
<graph_prototype>
<name>BackupPC: Host {#BPCHOST}: Compression ratio and perf</name>
<ymin_type_1>FIXED</ymin_type_1>
<graph_items>
<graph_item>
<drawtype>GRADIENT_LINE</drawtype>
<color>43A047</color>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.host[{#BPCHOST},total_size]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<drawtype>BOLD_LINE</drawtype>
<color>FF0000</color>
<yaxisside>RIGHT</yaxisside>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.host[{#BPCHOST},comp_ratio]</key>
</item>
</graph_item>
<graph_item>
<sortorder>2</sortorder>
<color>7E57C2</color>
<yaxisside>RIGHT</yaxisside>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.host[{#BPCHOST},duration]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
<graph_prototype>
<name>BackupPC: Host {#BPCHOST}: Full / History sizes</name>
<type>STACKED</type>
<graph_items>
<graph_item>
<color>4000FF</color>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.host[{#BPCHOST},full_size]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<color>4DD0E1</color>
<item>
<host>Template_App_BackupPC</host>
<key>backuppc.host[{#BPCHOST},history_size]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
</graph_prototypes>
<request_method>POST</request_method>
</discovery_rule>
</discovery_rules>
<macros>
<macro>
<macro>{$BPC_SIZE_WARN_FROM_HOUR}</macro>
<value>080000</value>
<description>Size alerts will only start after this hour</description>
</macro>
<macro>
<macro>{$EXT_BACKUPS}</macro>
<value>0</value>
<description>If set to a positive duration, can alert if no raidsync backups has been made recently</description>
</macro>
</macros>
</template>
</templates>
<triggers>
<trigger>
<expression>{Template_App_BackupPC:proc.num[,backuppc,,BackupPC].sum(#2)}=0 and {Template_App_BackupPC:proc.num[,root,,BackupPC_raidsync].sum(#2)}=0</expression>
<name>BackupPC isn't running</name>
<priority>AVERAGE</priority>
</trigger>
</triggers>
</zabbix_export>

View File

@ -1,113 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:07:47Z</date>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Template_App_DRBD</template>
<name>Template_App_DRBD</name>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>DRBD</name>
</application>
<application>
<name>Storage</name>
</application>
</applications>
<discovery_rules>
<discovery_rule>
<name>DRBD resources</name>
<key>drbd.resource.discovery[]</key>
<delay>30m</delay>
<lifetime>2d</lifetime>
<item_prototypes>
<item_prototype>
<name>DRBD: {#DRBD_RES_NAME} connexion status</name>
<key>drbd.resource.status[{#DRBD_RES_NAME},cstate]</key>
<delay>5m</delay>
<history>180d</history>
<trends>0</trends>
<value_type>CHAR</value_type>
<applications>
<application>
<name>DRBD</name>
</application>
<application>
<name>Storage</name>
</application>
</applications>
<request_method>POST</request_method>
<trigger_prototypes>
<trigger_prototype>
<expression>{str(Connected)}=0 and {str(SyncSource)}=0 and&#13;
{str(SyncTarget)}=0</expression>
<name>DRBD: ({#DRBD_RES_NAME}): connexion is not established</name>
<priority>AVERAGE</priority>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
<item_prototype>
<name>DRBD: {#DRBD_RES_NAME} data status</name>
<key>drbd.resource.status[{#DRBD_RES_NAME},dstate]</key>
<delay>5m</delay>
<history>180d</history>
<trends>0</trends>
<value_type>CHAR</value_type>
<applications>
<application>
<name>DRBD</name>
</application>
<application>
<name>Storage</name>
</application>
</applications>
<request_method>POST</request_method>
<trigger_prototypes>
<trigger_prototype>
<expression>{str(UpToDate)}=0</expression>
<name>DRBD ({#DRBD_RES_NAME}): data is not up to date</name>
<priority>AVERAGE</priority>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
<item_prototype>
<name>DRBD: {#DRBD_RES_NAME} role</name>
<key>drbd.resource.status[{#DRBD_RES_NAME},role]</key>
<delay>5m</delay>
<history>180d</history>
<trends>0</trends>
<value_type>CHAR</value_type>
<applications>
<application>
<name>DRBD</name>
</application>
<application>
<name>Storage</name>
</application>
</applications>
<request_method>POST</request_method>
<trigger_prototypes>
<trigger_prototype>
<expression>{diff()}=1</expression>
<name>DRBD ({#DRBD_RES_NAME}) node role has changed</name>
<priority>AVERAGE</priority>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
</item_prototypes>
<request_method>POST</request_method>
</discovery_rule>
</discovery_rules>
</template>
</templates>
</zabbix_export>

View File

@ -1,610 +0,0 @@
zabbix_export:
version: '5.4'
date: '2022-01-20T13:55:05Z'
groups:
-
uuid: 7df96b18c230490a9a0a9e2307226338
name: Templates
templates:
-
uuid: 722c34dae28f471b992685f75b217e84
template: Template_App_Docker
name: Template_App_Docker
groups:
-
name: Templates
items:
-
uuid: b35116a368e94b0698951010ddb496b5
name: 'Docker: Number of paused container'
type: DEPENDENT
key: 'container.docker.check[global,ContainersPaused]'
delay: '0'
history: 30d
trends: 1095d
units: '!container(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.info.ContainersPaused
master_item:
key: 'container.docker.check[global]'
-
uuid: ff406b09a49f42ef80e004f7b8737c9b
name: 'Docker: Number of running container'
type: DEPENDENT
key: 'container.docker.check[global,ContainersRunning]'
delay: '0'
history: 30d
trends: 1095d
units: '!container(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.info.ContainersRunning
master_item:
key: 'container.docker.check[global]'
triggers:
-
uuid: 85e456243c714349b34ff6504d122004
expression: 'change(/Template_App_Docker/container.docker.check[global,ContainersRunning])=1'
name: 'Number of running containers changed to {ITEM.LASTVALUE1}'
priority: INFO
-
uuid: 69c12d97c6604c608fba4d2c0d4875bb
name: 'Docker: Number of stopped container'
type: DEPENDENT
key: 'container.docker.check[global,ContainersStopped]'
delay: '0'
history: 30d
trends: 1095d
units: '!container(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.info.ContainersStopped
master_item:
key: 'container.docker.check[global]'
-
uuid: c6831879964a4a3a90fc82530e6a0d43
name: 'Docker: Number of containers'
type: DEPENDENT
key: 'container.docker.check[global,Containers]'
delay: '0'
history: 30d
trends: 1095d
units: '!container(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.info.Containers
master_item:
key: 'container.docker.check[global]'
-
uuid: 1da51785c3d34faf8ba7ab17d0b46b38
name: 'Docker: Live restore status'
type: DEPENDENT
key: 'container.docker.check[global,LiveRestoreEnabled]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.info.LiveRestoreEnabled
master_item:
key: 'container.docker.check[global]'
triggers:
-
uuid: 85a2fc38a6bb42e183de3d4687f466e8
expression: 'last(/Template_App_Docker/container.docker.check[global,LiveRestoreEnabled])<>"true" and {$DOCKER_WARN_NO_LIVE_RESTORE}=1'
name: 'Docker live restore isn''t enabled'
priority: WARNING
-
uuid: ab2d3bd636964a21b764b2f33f2e109e
name: 'Docker: server version'
type: DEPENDENT
key: 'container.docker.check[global,ServerVersion]'
delay: '0'
history: 15d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.info.ServerVersion
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[global]'
triggers:
-
uuid: 8c535ff0d73240c395226ee91a8096c4
expression: '(last(/Template_App_Docker/container.docker.check[global,ServerVersion])<>last(/Template_App_Docker/container.docker.check[global,ServerVersion],#2))=1'
name: 'Docker version changed ({ITEM.LASTVALUE1})'
priority: WARNING
-
uuid: d5c424dc767c4a5ea1ee441e76770411
name: 'Docker global info'
key: 'container.docker.check[global]'
delay: 5m
history: '0'
trends: '0'
value_type: TEXT
discovery_rules:
-
uuid: e459f638e15f495db3e4a9060e60f7c8
name: 'Docker: container discovery'
key: 'container.docker.discovery[containers]'
delay: 15m
lifetime: 7d
item_prototypes:
-
uuid: 7f3fddee7aed48c8acc26e9aa98df298
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Disk IO Read'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},BlockIORead]'
delay: '0'
history: 30d
trends: 1095d
units: B/s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.BlockIORead
-
type: CHANGE_PER_SECOND
parameters:
- ''
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 27f4e3e3a33c412ead03200de17bc9b5
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Disk IO Write'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},BlockIOWrite]'
delay: '0'
history: 30d
trends: 1095d
units: B/s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.BlockIOWrite
-
type: CHANGE_PER_SECOND
parameters:
- ''
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: f59ee387b12740fc90ff6d990422e92d
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: CPU'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc]'
delay: '0'
history: 30d
trends: 1095d
value_type: FLOAT
units: '%'
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.CPUPerc
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
trigger_prototypes:
-
uuid: d2bdccc7bf8a49c6a2c584a5394a0c27
expression: 'avg(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc],1h)>{$DOCKER_CPU_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}'
recovery_mode: RECOVERY_EXPRESSION
recovery_expression: 'avg(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc],1h)<({$DOCKER_CPU_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}-2) or avg(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc],5m)<({$DOCKER_CPU_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}/3)'
name: 'Container {#DOCKER_CONTAINER_NAME} CPU usage is {ITEM.VALUE1}'
priority: INFO
-
uuid: f56ddbaef3af4fa898ae2403a812db3c
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Dead'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Dead]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.Dead
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: e09f311feb1749cc88e8c51c8425c324
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Error'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Error]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.Error
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: b75ac0b193d2451e8ee3ce0950b7b07a
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Image'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Image]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.Config.Image
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: ba301501bbec40e6add5a47d2fd0ee15
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Memory usage'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemCurrent]'
delay: '0'
history: 30d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.MemCurrent
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 5895cddf57fd4b5e8917593cba941491
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Memory limit'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemLimit]'
delay: '0'
history: 30d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.MemLimit
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 640bce04f3fc483fa495ac72208effa7
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Memory used (%)'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemPerc]'
delay: '0'
history: 30d
trends: 1095d
value_type: FLOAT
units: '%'
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.MemPerc
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
trigger_prototypes:
-
uuid: 9fdd469b0866409ca7889288eb5f229a
expression: 'last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemPerc])>{$DOCKER_MEM_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}'
recovery_mode: RECOVERY_EXPRESSION
recovery_expression: 'last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemPerc])<({$DOCKER_MEM_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}-2)'
name: 'Container {#DOCKER_CONTAINER_NAME} memory usage is at {ITEM.LASTVALUE1}'
priority: AVERAGE
manual_close: 'YES'
-
uuid: eb58fb55bc0442319650e32a8ba1faf4
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Network in'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},NetIOIn]'
delay: '0'
history: 30d
trends: 1095d
units: b/s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.NetIOIn
-
type: CHANGE_PER_SECOND
parameters:
- ''
-
type: MULTIPLIER
parameters:
- '8'
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 6a97e94be7d1432fb11a92072ea9e55f
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Network out'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},NetIOOut]'
delay: '0'
history: 30d
trends: 1095d
units: b/s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.NetIOOut
-
type: CHANGE_PER_SECOND
parameters:
- ''
-
type: MULTIPLIER
parameters:
- '8'
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 7be56a29b2464c5e96c2f4f2b3e02fba
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: OOMKilled'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},OOMKilled]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.OOMKilled
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: a5ed78ba8acf43a488b3833d14e39381
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Number of processes'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},PIDs]'
delay: '0'
history: 30d
trends: 1095d
units: '!process(es)'
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.PIDs
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 1213e0b7fc824442b50ab4ff4cd1047b
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Restarting'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Restarting]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.Restarting
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 418b35e93f114be986ecdd5a9d259283
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Status'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.Status
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
trigger_prototypes:
-
uuid: 45e35825ed184b0290d1bc1ed27e48e4
expression: 'find(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],,"regexp","^(running|exited)$")<>1'
name: 'Container {#DOCKER_CONTAINER_NAME} is {ITEM.LASTVALUE1}'
priority: WARNING
manual_close: 'YES'
-
uuid: ab2c34de197f4ea197e607604c936f34
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Uptime'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime]'
delay: '0'
history: 30d
trends: 1095d
units: s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.Uptime
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: b232a5b3aedd4072a56460dd041579b1
name: 'Docker: container {#DOCKER_CONTAINER_NAME}: Info'
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
delay: 5m
history: '0'
trends: '0'
value_type: TEXT
trigger_prototypes:
-
uuid: 57d9a11b38b246539a82bf4ba1a79a98
expression: 'last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime])<{$DOCKER_UPTIME_WARN:"{#DOCKER_CONTAINER_NAME}"} and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],#2)="running" and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status])="running"'
name: 'Container {#DOCKER_CONTAINER_NAME} has just restarted'
priority: INFO
-
uuid: ce3cc3148fa24c6097cbf38b47e9e655
expression: 'last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime])<{$DOCKER_UPTIME_WARN:"{#DOCKER_CONTAINER_NAME}"} and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],#2)="exited" and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status])="running"'
name: 'Container {#DOCKER_CONTAINER_NAME} has just started'
priority: INFO
-
uuid: ebdea3b26c624714811124fe4681ccec
expression: 'max(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime],20m)<{$DOCKER_UPTIME_WARN:"{#DOCKER_CONTAINER_NAME}"} and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime])>0 and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status])="running" and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],#2)="running" and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],#3)="running"'
name: 'Container {#DOCKER_CONTAINER_NAME} seems to restart in a loop'
priority: AVERAGE
manual_close: 'YES'
graph_prototypes:
-
uuid: 4a9aa378970045399303dcead39c1e53
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: CPU'
ymin_type_1: FIXED
graph_items:
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: F63100
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc]'
-
uuid: 7cd7cd8fd24d492ab7995a882b7bf9af
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Disk IO'
graph_items:
-
sortorder: '1'
drawtype: FILLED_REGION
color: FFAB91
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},BlockIORead]'
-
sortorder: '2'
drawtype: FILLED_REGION
color: A5D6A7
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},BlockIOWrite]'
-
uuid: 0fec757ff2f34e4fbab3be8ea455bc5f
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Memory'
ymin_type_1: FIXED
graph_items:
-
sortorder: '1'
color: 2774A4
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemCurrent]'
-
uuid: 948dccce10c54510810a64612933ca89
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Network'
graph_items:
-
sortorder: '1'
drawtype: FILLED_REGION
color: B39DDB
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},NetIOIn]'
-
sortorder: '2'
drawtype: FILLED_REGION
color: 80DEEA
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},NetIOOut]'
macros:
-
macro: '{$DOCKER_CPU_PCT_WARN}'
value: '90'
-
macro: '{$DOCKER_MEM_PCT_WARN}'
value: '85'
-
macro: '{$DOCKER_UPTIME_WARN}'
value: '300'
-
macro: '{$DOCKER_WARN_NO_LIVE_RESTORE}'
value: '1'
graphs:
-
uuid: 9c9aba9c46e145c7a8acd208edb1d147
name: 'Docker: Number of containers'
type: STACKED
graph_items:
-
sortorder: '1'
color: 1A7C11
item:
host: Template_App_Docker
key: 'container.docker.check[global,ContainersPaused]'
-
sortorder: '2'
color: F63100
item:
host: Template_App_Docker
key: 'container.docker.check[global,ContainersRunning]'
-
sortorder: '3'
color: 2774A4
item:
host: Template_App_Docker
key: 'container.docker.check[global,ContainersStopped]'

View File

@ -1,759 +0,0 @@
zabbix_export:
version: '5.4'
date: '2021-10-19T12:29:52Z'
groups:
-
uuid: 7df96b18c230490a9a0a9e2307226338
name: Templates
templates:
-
uuid: 3a658cb77f26469a8a114b1bcd4734e9
template: Template_App_Elasticsearch
name: Template_App_Elasticsearch
groups:
-
name: Templates
items:
-
uuid: 1957d2e7393348f9ae73bfc07c1122a3
name: 'ES Cluster informations'
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
delay: 3m
history: '0'
trends: '0'
value_type: TEXT
tags:
-
tag: Application
value: Elasticsearch
-
uuid: fec408ef6d3249a885a3f9842016ccd4
name: 'ES: Cluster name'
type: DEPENDENT
key: 'elasticsearch.cluster[cluster_name]'
delay: '0'
history: 30d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.cluster_name
-
type: DISCARD_UNCHANGED
parameters:
- ''
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 286f9eb4d1504704999abd6a98b09a5c
name: 'ES: Number of indices'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.count]'
delay: '0'
history: 60d
trends: 1095d
units: '!indice(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.count
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
triggers:
-
uuid: dd7bc1621b994f24917d5beb4c07e17f
expression: 'abs(change(/Template_App_Elasticsearch/elasticsearch.cluster[indices.count]))>0'
recovery_mode: NONE
name: 'Number of indices has changed to {ITEM.LASTVALUE1}'
priority: INFO
manual_close: 'YES'
-
uuid: 8b990ab728304cd98d264fdbbe919795
name: 'ES: Number of documents'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.docs.count]'
delay: '0'
history: 60d
trends: 1095d
units: '!document(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.docs.count
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 4d1b8e4e358749739e3439bf9489b074
name: 'ES: Number of deleted documents'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.docs.deleted]'
delay: '0'
history: 60d
trends: 1095d
units: '!document(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.docs.deleted
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 0f472265bad44e849866d87de2c50474
name: 'ES: Number of primary shards'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.shards.primaries]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.shards.primaries
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 7118fffe2a864915947ce19570885993
name: 'ES: Number of replication shards'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.shards.replication]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.shards.replication
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 019eb7caa2ff45aa8aad4da89b340a54
name: 'ES: Number of shards'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.shards.total]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.shards.total
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 8f0660a8b80041d987a91986608a43d2
name: 'ES: Store size'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.store.size]'
delay: '0'
history: 60d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.store.size_in_bytes
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 854e270d75ef4c6fad22ebd553f6438e
name: 'ES: Number of data nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.data]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.nodes.count.data
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 52f72a68d734465b8422fb4e3ad084a7
name: 'ES: Number of failed nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.failed]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $._nodes.failed
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
triggers:
-
uuid: c188a92cd4444d7f8fe4c635cf35e656
expression: 'last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.failed])>0'
name: 'ES cluster has {ITEM.LASTVALUE1} failed node(s)'
priority: AVERAGE
-
uuid: 94de965d25ed41cd8d5ef367e9a22497
name: 'ES: Available space'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.fs.available_in_bytes]'
delay: '0'
history: 60d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.nodes.fs.available_in_bytes
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 376c2cd6bcfa4a21867411af6d84f5d1
name: 'ES: Total space'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.fs.total_in_bytes]'
delay: '0'
history: 60d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.nodes.fs.total_in_bytes
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 20a0526ae5054c808a95c4cc9b3356eb
name: 'ES: Number of master nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.master]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.nodes.count.master
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 0acd45bc9952407ab582689b0b483149
name: 'ES: Number of working nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.successfull]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $._nodes.successful
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 8ef89bc0bcde44ed89a361a9541f7b85
name: 'ES: Number of nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.total]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $._nodes.total
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
triggers:
-
uuid: 82cad93d736248e8b520d25e09cc0fd9
expression: 'abs(change(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.total]))>0'
recovery_mode: NONE
name: 'Number of nodes has changed to {ITEM.LASTVALUE1}'
priority: INFO
manual_close: 'YES'
-
uuid: 4782a3f38fb14c16ba2066c3504727da
name: 'ES: Cluster status'
type: DEPENDENT
key: 'elasticsearch.cluster[status]'
delay: '0'
history: 30d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.status
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 10m
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
triggers:
-
uuid: b86396f5fcca4dfe825f8ae022ef8edf
expression: 'find(/Template_App_Elasticsearch/elasticsearch.cluster[status],,"like","red")=1'
name: 'ES cluster in red status'
priority: HIGH
-
uuid: 275c27ab25874666a1cfd8f36df549d0
expression: 'find(/Template_App_Elasticsearch/elasticsearch.cluster[status],,"like","green")=0'
name: 'ES cluster is in {ITEM.LASTVALUE1} status'
priority: WARNING
manual_close: 'YES'
dependencies:
-
name: 'ES cluster in red status'
expression: 'find(/Template_App_Elasticsearch/elasticsearch.cluster[status],,"like","red")=1'
-
uuid: c54941e310124d4881bcbb76912a72c9
expression: 'nodata(/Template_App_Elasticsearch/elasticsearch.cluster[status],30m)=1'
name: 'No data for Elasticsearch cluster monitoring since 10m'
priority: WARNING
discovery_rules:
-
uuid: bf3408829ed946a99e0c9640698a2eed
name: 'Indices discovery'
key: 'elasticsearch.discovery[{$ES_URL},{$ES_USER},{$ES_PASS},indices]'
delay: 30m
lifetime: 7d
item_prototypes:
-
uuid: 67125ce8f0924ee0a124f06d40715005
name: 'ES: Index: {#ES_INDEX_NAME}: Info'
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
delay: 3m
history: '0'
trends: '0'
value_type: TEXT
tags:
-
tag: Application
value: Elasticsearch
-
uuid: d457ef885ca442c58fb3c336f2dd8f1d
name: 'ES: Index: {#ES_INDEX_NAME}: Number of active primary shards'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},active_primary_shards]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.active_primary_shards
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 465dbc25a40948f392287f784aea1bbe
name: 'ES: Index: {#ES_INDEX_NAME}: Number of active shards'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},active_shards]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.active_shards
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 58c0a433f7494045a1aac59586893415
name: 'ES: Index: {#ES_INDEX_NAME}: Number of replicas'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},number_of_replicas]'
delay: '0'
history: 60d
trends: 1095d
preprocessing:
-
type: JSONPATH
parameters:
- $.number_of_replicas
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 03c1b057648c4e96937191731206b1ac
name: 'ES: Index: {#ES_INDEX_NAME}: Number of shards'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},number_of_shards]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.number_of_shards
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: a13865f166344478ae5e3ea02962b8e2
name: 'ES: Index: {#ES_INDEX_NAME}: Number of relocating shards'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},relocating_shards]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.relocating_shards
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: ba3dda72f4144a989e276b0d13c38536
name: 'ES: Index: {#ES_INDEX_NAME}: Status'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},status]'
delay: '0'
history: 30d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.status
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 10m
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
trigger_prototypes:
-
uuid: baeff34841114b31883f0b7e017357a0
expression: 'find(/Template_App_Elasticsearch/elasticsearch.index[{#ES_INDEX_NAME},status],,"like","red")=1'
name: 'ES index {#ES_INDEX_NAME} is in red status'
priority: HIGH
manual_close: 'YES'
-
uuid: 91d45c74b96049158644dc2c4bdc418e
expression: 'find(/Template_App_Elasticsearch/elasticsearch.index[{#ES_INDEX_NAME},status],,"like","green")=0'
name: 'ES index {#ES_INDEX_NAME} is in {ITEM.LASTVALUE1} status'
priority: WARNING
manual_close: 'YES'
dependencies:
-
name: 'ES index {#ES_INDEX_NAME} is in red status'
expression: 'find(/Template_App_Elasticsearch/elasticsearch.index[{#ES_INDEX_NAME},status],,"like","red")=1'
-
uuid: 421078e2f66f4fe787a710c10eb45419
expression: 'nodata(/Template_App_Elasticsearch/elasticsearch.index[{#ES_INDEX_NAME},status],30m)=1'
name: 'No data for Elasticsearch index {#ES_INDEX_NAME} monitoring since 10m'
priority: WARNING
manual_close: 'YES'
dependencies:
-
name: 'No data for Elasticsearch cluster monitoring since 10m'
expression: 'nodata(/Template_App_Elasticsearch/elasticsearch.cluster[status],30m)=1'
-
uuid: 9912e675734d42e7b3810db51a9c13de
name: 'Nodes discovery'
key: 'elasticsearch.discovery[{$ES_URL},{$ES_USER},{$ES_PASS},nodes]'
delay: 2h
lifetime: 7d
item_prototypes:
-
uuid: 70e411ebd3de4f79936ec65ad4b3e41a
name: 'ES: Node {#ES_NODE_NAME} ({#ES_NODE_ID}): Info'
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},node,{#ES_NODE_ID}]'
delay: 5m
history: '0'
trends: '0'
value_type: TEXT
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 0b7eed934ccf4e90b95daced49c7ccde
name: 'ES: Node {#ES_NODE_NAME} ({#ES_NODE_ID}): Uptime'
type: DEPENDENT
key: 'elasticsearch.node[{#ES_NODE_ID},jvm.start_time_in_millis]'
delay: '0'
history: 30d
trends: 1095d
units: ms
preprocessing:
-
type: JSONPATH
parameters:
- $.jvm.start_time_in_millis
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},node,{#ES_NODE_ID}]'
tags:
-
tag: Application
value: Elasticsearch
trigger_prototypes:
-
uuid: e2b5fac004e240e1b381a0f3ce17db4f
expression: 'last(/Template_App_Elasticsearch/elasticsearch.node[{#ES_NODE_ID},jvm.start_time_in_millis])<600000'
name: 'ES restarted recently'
priority: INFO
manual_close: 'YES'
-
uuid: c335b6ee20d8497393125f9b1a4f7157
name: 'ES: Node {#ES_NODE_NAME} ({#ES_NODE_ID}): ES version'
type: DEPENDENT
key: 'elasticsearch.node[{#ES_NODE_ID},version]'
delay: '0'
history: 30d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.version
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 6h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},node,{#ES_NODE_ID}]'
tags:
-
tag: Application
value: Elasticsearch
trigger_prototypes:
-
uuid: 15b5d7f22ca2422f9f79f6ee0196d38e
expression: 'last(/Template_App_Elasticsearch/elasticsearch.node[{#ES_NODE_ID},version],#1)<>last(/Template_App_Elasticsearch/elasticsearch.node[{#ES_NODE_ID},version],#2)'
recovery_mode: NONE
name: 'ES version changed to {ITEM.LASTVALUE1} on node {#ES_NODE_NAME} ({#ES_NODE_ID})'
priority: INFO
manual_close: 'YES'
macros:
-
macro: '{$ES_PASS}'
-
macro: '{$ES_SPACE_PCT_CRIT}'
value: '88'
-
macro: '{$ES_SPACE_PCT_WARN}'
value: '78'
-
macro: '{$ES_URL}'
value: 'http://localhost:9200'
-
macro: '{$ES_USER}'
triggers:
-
uuid: e5ff617704b047e2814469075a1a9e38
expression: '(100*last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.available_in_bytes])/last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.total_in_bytes]))<(100-{$ES_SPACE_PCT_WARN})'
name: 'Low free disk space'
priority: WARNING
dependencies:
-
name: 'Very low free disk space'
expression: '(100*last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.available_in_bytes])/last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.total_in_bytes]))<(100-{$ES_SPACE_PCT_CRIT})'
-
uuid: c6dbde1d2a844e338bb05dee6f59b114
expression: '(100*last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.available_in_bytes])/last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.total_in_bytes]))<(100-{$ES_SPACE_PCT_CRIT})'
name: 'Very low free disk space'
priority: HIGH
graphs:
-
uuid: 83738984fe6f4ab1b5a71e150c248ae4
name: 'ES: Storage size'
graph_items:
-
drawtype: GRADIENT_LINE
color: 1A7C11
item:
host: Template_App_Elasticsearch
key: 'elasticsearch.cluster[indices.store.size]'

View File

@ -1,138 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:08:07Z</date>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Template_App_GlusterFS</template>
<name>Template_App_GlusterFS</name>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>Gluster</name>
</application>
<application>
<name>Storage</name>
</application>
</applications>
<items>
<item>
<name>GlusterFS: memory used by gluster processes</name>
<key>proc.mem[glusterfs]</key>
<delay>5m</delay>
<trends>730d</trends>
<units>B</units>
<applications>
<application>
<name>Gluster</name>
</application>
<application>
<name>Storage</name>
</application>
</applications>
<request_method>POST</request_method>
</item>
<item>
<name>GlusterFS: number of processes</name>
<key>proc.num[glusterfs]</key>
<delay>5m</delay>
<trends>730d</trends>
<applications>
<application>
<name>Gluster</name>
</application>
<application>
<name>Storage</name>
</application>
</applications>
<request_method>POST</request_method>
<triggers>
<trigger>
<expression>{last()}&lt;1</expression>
<name>GlusterFS: No gluster process runing</name>
<priority>HIGH</priority>
</trigger>
</triggers>
</item>
</items>
<discovery_rules>
<discovery_rule>
<name>GlusterFS: peer discovery</name>
<key>gluster.discovery[peers]</key>
<delay>2h</delay>
<item_prototypes>
<item_prototype>
<name>GlusterFS peer: {#GLUSTER_PEER_UUID} ({#GLUSTER_PEER_HOST}) status</name>
<key>gluster.peer.status[{#GLUSTER_PEER_UUID}]</key>
<delay>633s</delay>
<trends>0</trends>
<value_type>CHAR</value_type>
<applications>
<application>
<name>Gluster</name>
</application>
<application>
<name>Storage</name>
</application>
</applications>
<request_method>POST</request_method>
<trigger_prototypes>
<trigger_prototype>
<expression>{str(Connected)}=0</expression>
<name>GlusterFS peer {#GLUSTER_PEER_HOST} disconnected</name>
<priority>HIGH</priority>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
</item_prototypes>
<request_method>POST</request_method>
</discovery_rule>
<discovery_rule>
<name>GlusterFS: Volume discovery</name>
<key>gluster.discovery[volumes]</key>
<delay>2h</delay>
<item_prototypes>
<item_prototype>
<name>GlusterFS {#GLUSTER_VOL_NAME} status</name>
<key>gluster.volume.status[{#GLUSTER_VOL_NAME},{#GLUSTER_VOL_BRICKS}]</key>
<delay>614s</delay>
<trends>0</trends>
<value_type>CHAR</value_type>
<applications>
<application>
<name>Gluster</name>
</application>
<application>
<name>Storage</name>
</application>
</applications>
<request_method>POST</request_method>
<trigger_prototypes>
<trigger_prototype>
<expression>{str(OK)}=0</expression>
<name>GlusterFS: {#GLUSTER_VOL_NAME} failure detected</name>
<priority>HIGH</priority>
</trigger_prototype>
<trigger_prototype>
<expression>{nodata(2460)}=1</expression>
<name>GlusterFS: {#GLUSTER_VOL_NAME} not sending health info</name>
<priority>AVERAGE</priority>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
</item_prototypes>
<request_method>POST</request_method>
</discovery_rule>
</discovery_rules>
</template>
</templates>
</zabbix_export>

View File

@ -1,97 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:08:22Z</date>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Template_App_HSQL</template>
<name>Template_App_HSQL</name>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>HSQL</name>
</application>
</applications>
<items>
<item>
<name>HSQL TCP Service</name>
<key>net.tcp.listen[{$HSQL_PORT}]</key>
<delay>6m</delay>
<history>30d</history>
<trends>0</trends>
<applications>
<application>
<name>HSQL</name>
</application>
</applications>
<triggers>
<trigger>
<expression>{last()}&lt;&gt;1</expression>
<name>HSQL port {$HSQL_PORT} is not listening</name>
<priority>WARNING</priority>
<dependencies>
<dependency>
<name>No HSQL Service running</name>
<expression>{Template_App_HSQL:proc.num[manta,hyperfile,,HFCS-Serveur].last()}&lt;1</expression>
</dependency>
</dependencies>
</trigger>
</triggers>
</item>
<item>
<name>HSQL Server Processes</name>
<key>proc.num[manta,hyperfile,,HFCS-Serveur]</key>
<delay>5m</delay>
<history>30d</history>
<units>!process</units>
<applications>
<application>
<name>HSQL</name>
</application>
</applications>
<triggers>
<trigger>
<expression>{last()}&lt;1</expression>
<name>No HSQL Service running</name>
<priority>WARNING</priority>
</trigger>
</triggers>
</item>
<item>
<name>HSQL MantaManager Processes</name>
<key>proc.num[mantamanager,root,,]</key>
<delay>5m</delay>
<history>30d</history>
<units>!process</units>
<applications>
<application>
<name>HSQL</name>
</application>
</applications>
<triggers>
<trigger>
<expression>{last()}&lt;1</expression>
<name>No HSQL Manager Service running</name>
<priority>WARNING</priority>
</trigger>
</triggers>
</item>
</items>
<macros>
<macro>
<macro>{$HSQL_PORT}</macro>
<value>4900</value>
</macro>
</macros>
</template>
</templates>
</zabbix_export>

View File

@ -1,363 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:08:45Z</date>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Template_App_Multipath</template>
<name>Template_App_Multipath</name>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<discovery_rules>
<discovery_rule>
<name>Multipath devices discovery</name>
<key>vfs.mpath.discovery[mpath]</key>
<delay>20m</delay>
<lifetime>2d</lifetime>
<item_prototypes>
<item_prototype>
<name>Multipath info for {#MPATH_DEV}</name>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
<delay>5m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
</item_prototype>
<item_prototype>
<name>Multipath: Status for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},dm_st]</key>
<delay>0</delay>
<history>30d</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.dm_st</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
<trigger_prototypes>
<trigger_prototype>
<expression>{str(active)}&lt;&gt;1</expression>
<name>mpath device {#MPATH_DEV} is in {ITEM.LASTVALUE1} state</name>
<priority>AVERAGE</priority>
</trigger_prototype>
<trigger_prototype>
<expression>{nodata(1800)}=1</expression>
<name>No more monitoring data for {#MPATH_DEV}</name>
<priority>AVERAGE</priority>
<manual_close>YES</manual_close>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
<item_prototype>
<name>Multipath: Errors for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},errors]</key>
<delay>0</delay>
<history>30d</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.errors</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>Multipath: Number device failures for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},failures]</key>
<delay>0</delay>
<history>60d</history>
<units>!errors</units>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.failures</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
<trigger_prototypes>
<trigger_prototype>
<expression>{last()}&gt;0</expression>
<name>Failures on mpath device {#MPATH_DEV}</name>
<priority>AVERAGE</priority>
<manual_close>YES</manual_close>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
<item_prototype>
<name>Multipath: Features for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},features]</key>
<delay>0</delay>
<history>30d</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.features</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
<trigger_prototypes>
<trigger_prototype>
<expression>{change()}&lt;&gt;0</expression>
<recovery_mode>NONE</recovery_mode>
<name>Features for mpath {#MPATH_DEV} changed</name>
<priority>INFO</priority>
<manual_close>YES</manual_close>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
<item_prototype>
<name>Multipath: Number of active paths for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},paths_num_active]</key>
<delay>0</delay>
<history>60d</history>
<units>!path(s)</units>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.paths_num_active</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>Multipath: Number of inactive paths for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},paths_num_inactive]</key>
<delay>0</delay>
<history>60d</history>
<units>!path(s)</units>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.paths_num_inactive</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>Multipath: Number of paths having issues for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},paths_num_ko]</key>
<delay>0</delay>
<history>60d</history>
<units>!path(s)</units>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.paths_num_ko</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>Multipath: Number of operational paths for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},paths_num_ok]</key>
<delay>0</delay>
<history>60d</history>
<units>!path(s)</units>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.paths_num_ok</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>Multipath: Total number of paths for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},paths_num_total]</key>
<delay>0</delay>
<history>60d</history>
<units>!path(s)</units>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.paths_num_total</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>Multipath: Paths with issues for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},paths_with_issue]</key>
<delay>0</delay>
<history>30d</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.paths_with_issue</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>Multipath: Number path failures for {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},path_failures]</key>
<delay>0</delay>
<history>60d</history>
<units>!path(s)</units>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.path_failures</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
</item_prototype>
<item_prototype>
<name>Multipath: Size of the device {#MPATH_DEV}</name>
<type>DEPENDENT</type>
<key>vfs.mpath[{#MPATH_DEV},size]</key>
<delay>0</delay>
<history>60d</history>
<units>B</units>
<applications>
<application>
<name>mpath</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.size</params>
</step>
</preprocessing>
<master_item>
<key>vfs.mpath.info[{#MPATH_DEV}]</key>
</master_item>
<trigger_prototypes>
<trigger_prototype>
<expression>{change()}&lt;&gt;0</expression>
<recovery_mode>NONE</recovery_mode>
<name>Size of mpath {#MPATH_DEV} changed</name>
<priority>INFO</priority>
<manual_close>YES</manual_close>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
</item_prototypes>
<trigger_prototypes>
<trigger_prototype>
<expression>{Template_App_Multipath:vfs.mpath[{#MPATH_DEV},path_failures].last()}&gt;0 or {Template_App_Multipath:vfs.mpath[{#MPATH_DEV},paths_num_ko].last()}&gt;0 or {Template_App_Multipath:vfs.mpath[{#MPATH_DEV},failures].last()}&gt;0 or {Template_App_Multipath:vfs.mpath[{#MPATH_DEV},paths_num_ok].last()}&lt;&gt;{Template_App_Multipath:vfs.mpath[{#MPATH_DEV},paths_num_active].last()} or {Template_App_Multipath:vfs.mpath[{#MPATH_DEV},paths_with_issue].strlen()}&lt;&gt;0</expression>
<name>Issues detected on {#MPATH_DEV} ({ITEM.VALUE6})</name>
<priority>WARNING</priority>
</trigger_prototype>
</trigger_prototypes>
</discovery_rule>
</discovery_rules>
</template>
</templates>
</zabbix_export>

File diff suppressed because it is too large Load Diff

View File

@ -1,78 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:09:19Z</date>
<groups>
<group>
<name>Modèles</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Template_App_MySQL_legacy</template>
<name>Template_App_MySQL_legacy</name>
<groups>
<group>
<name>Modèles</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<items>
<item>
<name>MySQL queries per second</name>
<key>mysql.qps</key>
<delay>5m</delay>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>qps</units>
<request_method>POST</request_method>
</item>
<item>
<name>MySQL number of slow queries</name>
<key>mysql.slowqueries</key>
<delay>5m</delay>
<trends>1095d</trends>
<request_method>POST</request_method>
</item>
<item>
<name>MySQL number of threads</name>
<key>mysql.threads</key>
<delay>5m</delay>
<trends>1095d</trends>
<request_method>POST</request_method>
</item>
<item>
<name>MySQL uptime</name>
<key>mysql.uptime</key>
<delay>5m</delay>
<trends>1095d</trends>
<status>DISABLED</status>
<units>uptime</units>
<request_method>POST</request_method>
</item>
<item>
<name>MySQL is alive</name>
<key>mysql[ping]</key>
<delay>5m</delay>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<request_method>POST</request_method>
</item>
<item>
<name>MySQL version</name>
<key>version[mysql]</key>
<delay>2h</delay>
<trends>0</trends>
<status>DISABLED</status>
<value_type>CHAR</value_type>
<request_method>POST</request_method>
</item>
</items>
</template>
</templates>
</zabbix_export>

View File

@ -1,991 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:09:38Z</date>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Template_App_PMG</template>
<name>Template_App_PMG</name>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>Email</name>
</application>
<application>
<name>pmg</name>
</application>
</applications>
<items>
<item>
<name>PMG: Global stats</name>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
<delay>15m</delay>
<history>1h</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Inbound email trafic</name>
<type>DEPENDENT</type>
<key>pmg.check[global,bytes_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>B</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.bytes_in</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Outbound email trafic</name>
<type>DEPENDENT</type>
<key>pmg.check[global,bytes_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>B</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.bytes_out</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Inbound emails</name>
<type>DEPENDENT</type>
<key>pmg.check[global,count_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.count_in</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Outbound email</name>
<type>DEPENDENT</type>
<key>pmg.check[global,count_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.count_out</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Pregreet rejections</name>
<type>DEPENDENT</type>
<key>pmg.check[global,pregreet]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.pregreet</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Inbound average processing time</name>
<type>DEPENDENT</type>
<key>pmg.check[global,ptime_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<value_type>FLOAT</value_type>
<units>s</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.ptime_in</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Outbound average processing time</name>
<type>DEPENDENT</type>
<key>pmg.check[global,ptime_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<value_type>FLOAT</value_type>
<units>s</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.ptime_out</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Emails in the active queue</name>
<type>DEPENDENT</type>
<key>pmg.check[global,queue_active]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.queue_active</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
</item>
<item>
<name>PMG: Emails in the deferred queue</name>
<type>DEPENDENT</type>
<key>pmg.check[global,queue_deferred]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.queue_deferred</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
</item>
<item>
<name>PMG: Emails in the hold queue</name>
<type>DEPENDENT</type>
<key>pmg.check[global,queue_hold]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.queue_hold</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<triggers>
<trigger>
<expression>{last()}&gt;0</expression>
<name>Quarantined emails</name>
<priority>WARNING</priority>
<description>Check hold queue with the mailq command then either release with postsuper -H &lt;ID&gt; or drop with postsuper -d &lt;ID&gt;</description>
</trigger>
</triggers>
</item>
<item>
<name>PMG: RBL rejections</name>
<type>DEPENDENT</type>
<key>pmg.check[global,rbl]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.rbl</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Inbound spams</name>
<type>DEPENDENT</type>
<key>pmg.check[global,spam_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.spam_in</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Outbound spam</name>
<type>DEPENDENT</type>
<key>pmg.check[global,spam_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.spam_out</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
<triggers>
<trigger>
<expression>{max(1h)}&gt;0</expression>
<name>PMG: Outbound spam detected</name>
<priority>WARNING</priority>
<manual_close>YES</manual_close>
</trigger>
</triggers>
</item>
<item>
<name>PMG: Inbound viruses</name>
<type>DEPENDENT</type>
<key>pmg.check[global,virus_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.virus_in</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
</item>
<item>
<name>PMG: Outbound viruses</name>
<type>DEPENDENT</type>
<key>pmg.check[global,virus_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.virus_out</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES}]</key>
</master_item>
<request_method>POST</request_method>
<triggers>
<trigger>
<expression>{max(1h)}&gt;0</expression>
<name>PMG: Outbound viruses detected</name>
<priority>AVERAGE</priority>
<manual_close>YES</manual_close>
</trigger>
</triggers>
</item>
</items>
<discovery_rules>
<discovery_rule>
<name>Domains discovery</name>
<key>pmg.discovery[domains]</key>
<delay>7200</delay>
<item_prototypes>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN} Info</name>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
<delay>15m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
<application>
<name>Email</name>
</application>
<application>
<name>pmg</name>
</application>
</applications>
<request_method>POST</request_method>
</item_prototype>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN}: Inbound trafic</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},bytes_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>B</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.bytes_in</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
</item_prototype>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN}: Outbound trafic</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},bytes_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>B</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.bytes_out</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
</item_prototype>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN}: Inbound emails</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},count_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.count_in</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
</item_prototype>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN}: Outbound emails</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},count_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.count_out</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
</item_prototype>
<item_prototype>
<name>PMG Domain {#PMG_RELAY_DOMAIN}: Inbound average processing time</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},ptime_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<value_type>FLOAT</value_type>
<units>s</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.ptime_in</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
</item_prototype>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN}: Outbound average processing time</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},ptime_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<value_type>FLOAT</value_type>
<units>s</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.ptime_out</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
</item_prototype>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN}: Inbound spam</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},spam_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.spam_in</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
</item_prototype>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN}: Outbound spam</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},spam_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.spam_out</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
<trigger_prototypes>
<trigger_prototype>
<expression>{max(1800)}&gt;1</expression>
<name>PMG: {#PMG_RELAY_DOMAIN}: Outbound spam detected</name>
<priority>AVERAGE</priority>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN}: Inbound viruses</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},virus_in]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.virus_in</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
</item_prototype>
<item_prototype>
<name>PMG Domain: {#PMG_RELAY_DOMAIN}: Outbound viruses</name>
<type>DEPENDENT</type>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},virus_out]</key>
<delay>0</delay>
<trends>1825d</trends>
<units>mails/h</units>
<applications>
<application>
<name>Email</name>
</application>
</applications>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.virus_out</params>
</step>
<step>
<type>MULTIPLIER</type>
<params>4</params>
</step>
</preprocessing>
<master_item>
<key>pmg.check.all[{$PMG_FREQ},{$PMG_SPAM_THRES},domain,{#PMG_RELAY_DOMAIN}]</key>
</master_item>
<request_method>POST</request_method>
<trigger_prototypes>
<trigger_prototype>
<expression>{max(1800)}&gt;1</expression>
<name>PMG: {#PMG_RELAY_DOMAIN}: Outbound viruses detected</name>
<priority>HIGH</priority>
</trigger_prototype>
</trigger_prototypes>
</item_prototype>
</item_prototypes>
<graph_prototypes>
<graph_prototype>
<name>PMG: Domain {#PMG_RELAY_DOMAIN}: Email flows</name>
<type>STACKED</type>
<graph_items>
<graph_item>
<color>DD0000</color>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},virus_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<color>FF33FF</color>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},spam_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>2</sortorder>
<color>00DD00</color>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},count_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>3</sortorder>
<color>0000DD</color>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},spam_out]</key>
</item>
</graph_item>
<graph_item>
<sortorder>4</sortorder>
<color>000000</color>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},virus_out]</key>
</item>
</graph_item>
<graph_item>
<sortorder>5</sortorder>
<color>00EEEE</color>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},count_out]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
<graph_prototype>
<name>PMG: Domain {#PMG_RELAY_DOMAIN}: Trafic and processing time</name>
<graph_items>
<graph_item>
<drawtype>GRADIENT_LINE</drawtype>
<color>00DD00</color>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},bytes_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<drawtype>GRADIENT_LINE</drawtype>
<color>FF9999</color>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},bytes_out]</key>
</item>
</graph_item>
<graph_item>
<sortorder>2</sortorder>
<drawtype>BOLD_LINE</drawtype>
<color>3333FF</color>
<yaxisside>RIGHT</yaxisside>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},ptime_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>3</sortorder>
<drawtype>BOLD_LINE</drawtype>
<color>DD0000</color>
<yaxisside>RIGHT</yaxisside>
<item>
<host>Template_App_PMG</host>
<key>pve.check[domain,{#PMG_RELAY_DOMAIN},ptime_out]</key>
</item>
</graph_item>
</graph_items>
</graph_prototype>
</graph_prototypes>
<request_method>POST</request_method>
</discovery_rule>
</discovery_rules>
<macros>
<macro>
<macro>{$PMG_FREQ}</macro>
<value>900</value>
</macro>
<macro>
<macro>{$PMG_SPAM_THRES}</macro>
<value>5</value>
</macro>
</macros>
</template>
</templates>
<graphs>
<graph>
<name>PMG: Global email flows</name>
<type>STACKED</type>
<graph_items>
<graph_item>
<color>FF6F00</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,virus_out]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<color>CE93D8</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,spam_out]</key>
</item>
</graph_item>
<graph_item>
<sortorder>2</sortorder>
<color>6A1B9A</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,count_out]</key>
</item>
</graph_item>
<graph_item>
<sortorder>3</sortorder>
<color>1A7C11</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,count_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>4</sortorder>
<color>80FF00</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,spam_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>5</sortorder>
<color>B71C1C</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,virus_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>6</sortorder>
<color>0040FF</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,rbl]</key>
</item>
</graph_item>
<graph_item>
<sortorder>7</sortorder>
<color>90A4AE</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,pregreet]</key>
</item>
</graph_item>
</graph_items>
</graph>
<graph>
<name>PMG: Global email trafic and processing time</name>
<graph_items>
<graph_item>
<drawtype>GRADIENT_LINE</drawtype>
<color>00DD00</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,bytes_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<drawtype>GRADIENT_LINE</drawtype>
<color>FF9999</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,bytes_out]</key>
</item>
</graph_item>
<graph_item>
<sortorder>2</sortorder>
<drawtype>BOLD_LINE</drawtype>
<color>0000DD</color>
<yaxisside>RIGHT</yaxisside>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,ptime_in]</key>
</item>
</graph_item>
<graph_item>
<sortorder>3</sortorder>
<drawtype>BOLD_LINE</drawtype>
<color>DD0000</color>
<yaxisside>RIGHT</yaxisside>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,ptime_out]</key>
</item>
</graph_item>
</graph_items>
</graph>
<graph>
<name>PMG: Global queues</name>
<type>STACKED</type>
<graph_items>
<graph_item>
<color>81C784</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,queue_active]</key>
</item>
</graph_item>
<graph_item>
<sortorder>1</sortorder>
<color>8E24AA</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,queue_hold]</key>
</item>
</graph_item>
<graph_item>
<sortorder>2</sortorder>
<color>EC407A</color>
<item>
<host>Template_App_PMG</host>
<key>pmg.check[global,queue_deferred]</key>
</item>
</graph_item>
</graph_items>
</graph>
</graphs>
</zabbix_export>

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More