Compare commits

...

56 Commits

Author SHA1 Message Date
164cd8cb3b Automatic commit of package [zabbix-agent-addons] release [0.2.172-1].
Created by command:

/usr/bin/tito tag
2023-12-21 15:58:47 +01:00
ff4a05ae59 Add Zabbix template for storageDevices 2023-12-21 15:58:31 +01:00
c1e46207da Read raw value for SSL_Life_Left 2023-12-21 15:00:07 +01:00
f72910cc6b Read SSD_Life_Left if available 2023-12-21 14:47:28 +01:00
62b8de1b05 /dev/bus/0 might not exist but can be queried 2023-12-21 14:23:58 +01:00
066f622888 Report more info from some NVMe 2023-12-21 14:16:31 +01:00
12c8396be3 Adjust default values for stor dev 2023-12-21 14:09:42 +01:00
f7835e1b90 Fix UserParam 2023-12-20 16:22:15 +01:00
a5ad4081f0 Add new script for smart monitoring 2023-12-20 16:19:21 +01:00
00790af9bf Automatic commit of package [zabbix-agent-addons] release [0.2.171-1].
Created by command:

/usr/bin/tito tag
2023-09-19 12:03:17 +02:00
55c878cf24 Ignore samba NT_STATUS_PROTOCOL_UNREACHABLE errors 2023-09-19 12:03:04 +02:00
2cec18b4a5 Automatic commit of package [zabbix-agent-addons] release [0.2.170-1].
Created by command:

/usr/bin/tito tag
2023-06-29 14:06:00 +02:00
1ddf903568 Typo 2023-06-29 14:04:38 +02:00
e5047e7b92 Fix + discover NMVe 2023-06-29 14:02:30 +02:00
34d19c8622 Automatic commit of package [zabbix-agent-addons] release [0.2.169-1].
Created by command:

/usr/bin/tito tag
2023-06-29 12:12:23 +02:00
2c29e4ecaa Better sensor output parsing 2023-06-29 12:12:07 +02:00
995bd50151 Automatic commit of package [zabbix-agent-addons] release [0.2.168-1].
Created by command:

/usr/bin/tito tag
2023-06-29 11:15:27 +02:00
7b42d3f2a9 Drop ipmitool stderr and simplify output parsing 2023-06-29 11:15:10 +02:00
313e022894 Automatic commit of package [zabbix-agent-addons] release [0.2.167-1].
Created by command:

/usr/bin/tito tag
2023-06-29 09:31:14 +02:00
3159f43ced Fix fan detection on some BMC boards 2023-06-29 09:29:15 +02:00
b0958e6fba Update ZFS template 2022-05-06 17:05:06 +02:00
ed5c8b745f Automatic commit of package [zabbix-agent-addons] release [0.2.166-1].
Created by command:

/usr/bin/tito tag
2022-03-26 18:30:53 +01:00
5acc49a55c Fix counting samba computers auth tries 2022-03-26 18:30:16 +01:00
7cfbd64eb4 Automatic commit of package [zabbix-agent-addons] release [0.2.165-1].
Created by command:

/usr/bin/tito tag
2022-03-21 11:30:00 +01:00
221a0afe5d last_seen might not be defined in check_unifi 2022-03-21 11:29:41 +01:00
e0be2c506f Automatic commit of package [zabbix-agent-addons] release [0.2.164-1].
Created by command:

/usr/bin/tito tag
2022-03-21 10:49:24 +01:00
4e50e2b2d4 Use JSON bool in unifi scripts 2022-03-21 10:48:50 +01:00
f46a580f90 Automatic commit of package [zabbix-agent-addons] release [0.2.163-1].
Created by command:

/usr/bin/tito tag
2022-01-24 11:01:15 +01:00
e0d2825f21 Fix check_zimbra_sudo 2022-01-24 11:01:03 +01:00
56fcaf7f6c Automatic commit of package [zabbix-agent-addons] release [0.2.162-1].
Created by command:

/usr/bin/tito tag
2022-01-21 13:46:24 +01:00
5afabbfaad Add alloc_ct for LVM VG when missing 2022-01-21 13:46:06 +01:00
849170c10b Automatic commit of package [zabbix-agent-addons] release [0.2.161-1].
Created by command:

/usr/bin/tito tag
2022-01-21 12:19:45 +01:00
31ccccd2e0 Fix Zimbra discovery and check scripts 2022-01-21 12:19:29 +01:00
1950abbc0f Automatic commit of package [zabbix-agent-addons] release [0.2.160-1].
Created by command:

/usr/bin/tito tag
2022-01-20 17:28:52 +01:00
51fa9f602a Add a {#DOCKER_CONTAINER_STATUS} LLD macro 2022-01-20 17:28:37 +01:00
607204a150 Automatic commit of package [zabbix-agent-addons] release [0.2.159-1].
Created by command:

/usr/bin/tito tag
2022-01-20 14:55:55 +01:00
5e7e22d311 Update Docker template 2022-01-20 14:55:34 +01:00
80bfaee714 Don't query state in docker discovery
As it's not supported on some older docker, and not used anyway
2022-01-20 14:54:19 +01:00
8140dcb7cf Automatic commit of package [zabbix-agent-addons] release [0.2.158-1].
Created by command:

/usr/bin/tito tag
2022-01-13 13:59:21 +01:00
820d12a682 Small fixes for Docker check script and template 2022-01-13 13:58:58 +01:00
2d88e6fe34 Automatic commit of package [zabbix-agent-addons] release [0.2.157-1].
Created by command:

/usr/bin/tito tag
2022-01-13 09:44:00 +01:00
cd48caa24c Enhacements in the Docker template 2022-01-13 09:43:45 +01:00
7c42540a66 Automatic commit of package [zabbix-agent-addons] release [0.2.156-1].
Created by command:

/usr/bin/tito tag
2022-01-12 16:24:07 +01:00
a24f4adb81 Add Docker scripts 2022-01-12 16:23:50 +01:00
a73021a2d5 Automatic commit of package [zabbix-agent-addons] release [0.2.155-1].
Created by command:

/usr/bin/tito tag
2022-01-11 16:29:51 +01:00
8df27d15d7 Automatic commit of package [zabbix-agent-addons] release [0.2.154-1].
Created by command:

/usr/bin/tito tag
2021-12-16 16:47:35 +01:00
cfdd92b9c6 Fix zpool iostat as /proc/spl/kstat/zfs/pool/io doesn't exist anymore 2021-12-16 16:46:43 +01:00
Daniel Berteaud
72682f9bad Add nodata triggers for Elasticsearch 2021-10-19 14:31:56 +02:00
Daniel Berteaud
1272a06771 Include Zabbix template for Elasticsearch 2021-10-19 10:27:16 +02:00
Daniel Berteaud
c17260c519 Automatic commit of package [zabbix-agent-addons] release [0.2.153-1].
Created by command:

/usr/bin/tito tag
2021-10-19 10:19:04 +02:00
Daniel Berteaud
5387ae53b8 Tweak elasticsearch monitoring scripts 2021-10-19 10:18:44 +02:00
Daniel Berteaud
415c608252 Automatic commit of package [zabbix-agent-addons] release [0.2.152-1].
Created by command:

/usr/bin/tito tag
2021-10-18 12:18:55 +02:00
Daniel Berteaud
9ad6d8b1b3 Small fixes in elasticsearch scripts 2021-10-18 12:18:37 +02:00
Daniel Berteaud
6dc46b819f Automatic commit of package [zabbix-agent-addons] release [0.2.151-1].
Created by command:

/usr/bin/tito tag
2021-10-18 11:35:14 +02:00
Daniel Berteaud
74b3ba5928 Add Elasticsearch monitoring scripts 2021-10-18 11:34:53 +02:00
Daniel Berteaud
ad9b9b569a Updates and fixes in Zabbix templates 2021-09-22 18:06:08 +02:00
30 changed files with 4638 additions and 2603 deletions

View File

@ -1 +1 @@
0.2.150-1 ./
0.2.172-1 ./

View File

@ -36,6 +36,10 @@ sub list_smart_hdd{
foreach my $line (@smart_info){
if ($line =~ m/^SMART support is:\s+Enabled/i){
$smart_enabled = 1;
last;
} elsif ($line =~ m/NVMe/i){
$smart_enabled = 1;
last;
} elsif ($line =~ m/^Transport protocol:\s+iSCSI/i){
# Skip iSCSI block
next BLOCK;

View File

@ -4,7 +4,7 @@
Summary: Scripts for Zabbix monitoring
Name: zabbix-agent-addons
Version: 0.2.150
Version: 0.2.172
Release: 1%{?dist}
Source0: %{name}-%{version}.tar.gz
BuildArch: noarch
@ -106,6 +106,85 @@ fi
%endif
%changelog
* Thu Dec 21 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.172-1
- Add Zabbix template for storageDevices (dbd@ehtrace.com)
- Read raw value for SSL_Life_Left (dbd@ehtrace.com)
- Read SSD_Life_Left if available (dbd@ehtrace.com)
- /dev/bus/0 might not exist but can be queried (dbd@ehtrace.com)
- Report more info from some NVMe (dbd@ehtrace.com)
- Adjust default values for stor dev (dbd@ehtrace.com)
- Fix UserParam (dbd@ehtrace.com)
- Add new script for smart monitoring (dbd@ehtrace.com)
* Tue Sep 19 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.171-1
- Ignore samba NT_STATUS_PROTOCOL_UNREACHABLE errors (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.170-1
- Fix + discover NMVe (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.169-1
- Better sensor output parsing (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.168-1
- Drop ipmitool stderr and simplify output parsing (dbd@ehtrace.com)
* Thu Jun 29 2023 Daniel Berteaud <dbd@ehtrace.com> 0.2.167-1
- Fix fan detection on some BMC boards (dbd@ehtrace.com)
- Update ZFS template (dbd@ehtrace.com)
* Sat Mar 26 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.166-1
- Fix counting samba computers auth tries (dbd@ehtrace.com)
* Mon Mar 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.165-1
- last_seen might not be defined in check_unifi (dbd@ehtrace.com)
* Mon Mar 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.164-1
- Use JSON bool in unifi scripts (dbd@ehtrace.com)
* Mon Jan 24 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.163-1
- Fix check_zimbra_sudo (dbd@ehtrace.com)
* Fri Jan 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.162-1
- Add alloc_ct for LVM VG when missing (dbd@ehtrace.com)
* Fri Jan 21 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.161-1
- Fix Zimbra discovery and check scripts (dbd@ehtrace.com)
* Thu Jan 20 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.160-1
- Add a {#DOCKER_CONTAINER_STATUS} LLD macro (dbd@ehtrace.com)
* Thu Jan 20 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.159-1
- Update Docker template (dbd@ehtrace.com)
- Don't query state in docker discovery (dbd@ehtrace.com)
* Thu Jan 13 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.158-1
- Small fixes for Docker check script and template (dbd@ehtrace.com)
* Thu Jan 13 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.157-1
- Enhacements in the Docker template (dbd@ehtrace.com)
* Wed Jan 12 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.156-1
- Add Docker scripts (dbd@ehtrace.com)
* Tue Jan 11 2022 Daniel Berteaud <dbd@ehtrace.com> 0.2.155-1
- Release bump
* Thu Dec 16 2021 Daniel Berteaud <dani@lapiole.org> 0.2.154-1
- Fix zpool iostat as /proc/spl/kstat/zfs/pool/io doesn't exist anymore
(dani@lapiole.org)
- Add nodata triggers for Elasticsearch (daniel@firewall-services.com)
- Include Zabbix template for Elasticsearch (daniel@firewall-services.com)
* Tue Oct 19 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.153-1
- Tweak elasticsearch monitoring scripts (daniel@firewall-services.com)
* Mon Oct 18 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.152-1
- Small fixes in elasticsearch scripts (daniel@firewall-services.com)
* Mon Oct 18 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.151-1
- Add Elasticsearch monitoring scripts (daniel@firewall-services.com)
- Updates and fixes in Zabbix templates (daniel@firewall-services.com)
* Fri Jul 16 2021 Daniel Berteaud <daniel@firewall-services.com> 0.2.150-1
- Do not count Unconfigured(good) drives as an error (daniel@firewall-
services.com)

9
zabbix_conf/docker.conf Normal file
View File

@ -0,0 +1,9 @@
# Discover Docker items
# $1 can be containers, networks, volumes
UserParameter=container.docker.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_docker_sudo --what $1
# Type: Agent or Agent (active)
# container.docker.check.all[type,id]
# Where type is what to monitor (global, container, network, volume)
# id is the id of the item to monitor. Can be a name or an ID. For the global check, there's no ID
UserParameter=container.docker.check[*],/usr/bin/sudo /var/lib/zabbix/bin/check_docker_sudo --$1 $2

View File

@ -0,0 +1,2 @@
UserParameter=elasticsearch.discovery[*],/var/lib/zabbix/bin/disco_elasticsearch --url=$1 --user=$2 --pass=$3 --$4
UserParameter=elasticsearch.check[*],/var/lib/zabbix/bin/check_elasticsearch --url=$1 --user=$2 --pass=$3 --$4 $5

View File

@ -5,3 +5,7 @@ UserParameter=hardware.disk.smart.discovery,/usr/bin/sudo /var/lib/zabbix/bin/di
# Takes two args: the drives to check, and the value to get
# eg: hardward.disk.smart[/dev/sda,Reallocated_Sector_Ct]
UserParameter=hardware.disk.smart[*],/usr/bin/sudo /var/lib/zabbix/bin/check_smart_sudo $1 $2
# New smart disk discovery/monitoring
UserParameter=stor.dev.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_stor_dev_sudo
UserParameter=stor.dev.info[*],/usr/bin/sudo /var/lib/zabbix/bin/check_stor_dev_sudo --dev "$1" --type "$2"

View File

@ -0,0 +1,90 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
use Date::Parse;
my $docker = which('docker');
my $json = {};
my $pretty = 0;
my ($global, $container, $network, $volume) = undef;
GetOptions(
'global' => \$global,
'container=s' => \$container,
'network=s' => \$network,
'volume=s' => \$volume,
'pretty' => \$pretty
);
# Sanitize args
if (defined $container and not $container =~ m/^[a-zA-Z0-9\-_]+/){
die "Invalid container ID $container\n";
} elsif (defined $network and not $network =~ m/^[a-zA-Z0-9\-_]+/){
die "Invalid network ID\n";
} elsif (defined $volume and not $volume =~ m/^[a-zA-Z0-9\-_]+/){
die "Invalid volume name\n";
}
# Default formating
my $format = '{{ json . }}';
my $cmd;
if ($global){
$json->{info} = from_json(qx($docker info --format '$format'));
} elsif (defined $container) {
$json->{inspect} = from_json(qx($docker container inspect $container --format '$format'));
$json->{stats} = from_json(qx($docker container stats $container --format '$format' --no-stream));
# Remove percent sign so Zabbix can get raw value
foreach my $stat (qw(MemPerc CPUPerc)){
$json->{stats}->{$stat} =~ s/%$//;
}
# Extract mem usage vs mem limit, net in vs net out and blk read vs blk write
($json->{stats}->{MemCurrent}, $json->{stats}->{MemLimit}) = split(/\s*\/\s*/, $json->{stats}->{MemUsage});
($json->{stats}->{NetIOIn}, $json->{stats}->{NetIOOut}) = split(/\s*\/\s*/, $json->{stats}->{NetIO});
($json->{stats}->{BlockIORead}, $json->{stats}->{BlockIOWrite}) = split(/\s*\/\s*/, $json->{stats}->{BlockIO});
# Convert into Bytes
foreach my $stat (qw(MemCurrent MemLimit NetIOIn NetIOOut BlockIORead BlockIOWrite)){
$json->{stats}->{$stat} = convert_unit($json->{stats}->{$stat});
}
# Compute a useful Uptime from the StartedAt value
if ($json->{inspect}->{State}->{Running}){
$json->{stats}->{Uptime} = int(time() - str2time($json->{inspect}->{State}->{StartedAt}));
} else {
$json->{stats}->{Uptime} = 0;
}
} elsif (defined $network){
$json->{inspect} = from_json(qx($docker network inspect $network --format '$format'));
} elsif (defined $volume){
$json->{inspect} = from_json(qx($docker volume inspect $volume --format '$format'));
}
print to_json($json, { pretty => $pretty }) . "\n";
sub convert_unit {
my $val = shift;
my $suffix_multiplier = {
ki => 1024,
Ki => 1024,
Mi => 1024 * 1024,
Gi => 1024 * 1024 * 1024,
Ti => 1024 * 1024 * 1024 * 1024,
Pi => 1024 * 1024 * 1024 * 1024 * 1024,
k => 1000,
K => 1000,
M => 1000 * 1000,
G => 1000 * 1000 * 1000,
T => 1000 * 1000 * 1000 * 1000,
P => 1000 * 1000 * 1000 * 1000 * 1000
};
if ($val =~ m/^(\d+(\.\d+)?)(ki|Ki|Mi|Gi|Ti|Pi|k|K|M|G|T|P)?B/){
$val = int($1 * $suffix_multiplier->{$3}) if (defined $3 and defined $suffix_multiplier->{$3});
# Remove the Bytes suffix if remaining
$val =~ s/B$//;
}
return $val;
}

View File

@ -0,0 +1,94 @@
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Request::Common;
use URI;
use Data::Dumper;
my $user = undef;
my $pass = undef;
my $url = 'http://localhost:9200';
my $certcheck = 1;
my $cluster = 0;
my $node = undef;
my $index = undef;
my $pretty = 0;
my $json = {};
GetOptions (
'user:s' => \$user,
'password:s' => \$pass,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'cluster' => \$cluster,
'node=s' => \$node,
'index=s' => \$index,
'pretty' => \$pretty
);
# If no option is given, default to fetch the cluster status
if (not defined $cluster and not defined $node and not defined $index){
$cluster = 1;
}
my $uri = URI->new($url);
if (not defined $uri){
die "COuldn't parse $url as a valid url\n";
}
# If connecting over http or is host is localhost
# there's no need to check certificate
if ($uri->scheme eq 'http' or $uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my $resp;
my $sslopts = {};
if (not $certcheck){
$sslopts = {
verify_hostname => 0,
SSL_verify_mode => 0
}
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts
);
$ua->env_proxy;
if ($cluster){
$json = make_request('/_cluster/stats');
} elsif (defined $node){
my $resp = make_request('/_nodes/' . $node)->{'nodes'};
# We can specify node by ID, name or IP
if (defined $resp->{$node}){
$json = $resp->{$node};
} else {
my $node_id = (keys %{$resp})[0];
$json = $resp->{$node_id};
}
} elsif (defined $index){
$json = make_request('/_cluster/health/' . $index . '?level=indices')->{'indices'}->{$index};
}
print to_json($json, { pretty => $pretty });
sub make_request {
my $path = shift;
my $req_url = $url . $path;
my $req = GET $req_url;
if (defined $user and $user ne '' and defined $pass and $pass ne ''){
$req->authorization_basic($user, $pass);
}
my $resp = $ua->request($req);
die "Request to $req_url failed : " . $resp->message . "\n" if $resp->is_error;
return from_json($resp->decoded_content);
}

View File

@ -43,6 +43,10 @@ EOF
my $json;
if (defined $vg){
%{$json} = get_volume_group_information($vg);
# Depending on LVM version, alloc_ct might not be present
if (not defined $json->{alloc_ct}){
$json->{alloc_ct} = sprintf("%.1f", 100 * $json->{alloc_pe_size} / $json->{vg_size});
}
} elsif (defined $lv) {
%{$json} = get_lv_info($lv);
} else{

View File

@ -166,8 +166,12 @@ if (defined $ou){
my $subject;
if ($type eq 'Authentication'){
if ($event->{Authentication}->{status} eq 'NT_STATUS_PROTOCOL_UNREACHABLE'){
# Ignore NT_STATUS_PROTOCOL_UNREACHABLE as they are harmless
next;
}
# Accounts ending with $ are for computers
$subject = (($event->{$type}->{mappedAccount} || $event->{$type}->{clientAccount} || '')=~ m/\$$/) ? 'computers' : 'users';
$subject = (($event->{$type}->{mappedAccount} || $event->{$type}->{clientAccount} || '')=~ m/\$(\@.+)?$/) ? 'computers' : 'users';
if ($event->{Authentication}->{status} eq 'NT_STATUS_OK'){
$json->{activity}->{authentications}->{$subject}->{success}++;
} else {

View File

@ -0,0 +1,120 @@
#!/usr/bin/perl
use strict;
use warnings;
use JSON;
use Getopt::Long;
use File::Which;
my $dev = undef;
my $type = 'auto';
my $what = 'json';
my $pretty = 0;
GetOptions(
'device=s' => \$dev,
'type=s' => \$type,
'what=s' => \$what,
'pretty' => \$pretty
);
if (not defined $dev or $dev !~ m|^/dev/\w+(/\w+)*$|){
print "Invalid --device\n";
exit 1;
} elsif ($what !~ m/^\w+$/){
print "Invalid --what\n";
exit 1;
} elsif ($type !~ m/^\w+\+*\w+(,\w+)*$/){
print "Invalid --type\n";
exit 1;
}
my $json = {
temperature_celsius => 25,
power_on_hours => 0,
power_cycle_count => 0,
reallocated_sector_count => 0,
current_pending_sector => 0,
offline_uncorrectable => 0,
percent_lifetime_remain => 100,
firmware_version => 0
};
my $smartctl = which('smartctl');
sub print_out {
if ($what eq 'json'){
print to_json($json, { pretty => $pretty });
exit 0;
} elsif (defined $json->{$what}){
print $json->{$what} . "\n";
exit 0;
} else {
print "ZBX_NOTSUPPORTED\n";
exit 1;
}
}
sub get_smart_attr {
my $smart = shift;
my $attr = shift;
if (defined $smart->{ata_smart_attributes}->{table}){
foreach (@{$smart->{ata_smart_attributes}->{table}}){
if ($_->{name} eq $attr){
return $_;
}
}
}
return undef;
}
if (not defined $smartctl){
$what = 'error';
print_out();
}
my $data = from_json(qx($smartctl -a $dev -d $type --json=c));
if (defined $data->{temperature}->{current}){
$json->{temperature_celsius} = $data->{temperature}->{current};
}
if (defined $data->{power_on_time}->{hours}){
$json->{power_on_hours} = $data->{power_on_time}->{hours};
}
if (defined $data->{power_cycle_count}){
$json->{power_cycle_count} = $data->{power_cycle_count};
}
if (defined $data->{firmware_version}){
$json->{firmware_version} = $data->{firmware_version};
}
my ($pending, $realloc, $offline, $remain);
if ($pending = get_smart_attr($data, 'Current_Pending_Sector')){
$json->{current_pending_sector} = $pending->{raw}->{value};
}
if ($realloc = get_smart_attr($data, 'Reallocated_Sector_Ct') || get_smart_attr($data, 'Reallocated_Event_Count')){
$json->{reallocated_sector_count} = $realloc->{raw}->{value};
} elsif (defined $data->{nvme_smart_health_information_log}->{media_errors}){
# NMVe can report media error, so report it as reallocated sectors
$json->{reallocated_sector_count} = $data->{nvme_smart_health_information_log}->{media_errors};
}
if ($offline = get_smart_attr($data, 'Offline_Uncorrectable')){
$json->{offline_uncorrectable} = $offline->{raw}->{value};
}
if ($remain = get_smart_attr($data, 'Percent_Lifetime_Remain')){
$json->{percent_lifetime_remain} = $remain->{value};
} elsif ($remain = get_smart_attr($data, 'SSD_Life_Left')){
$json->{percent_lifetime_remain} = $remain->{raw}->{value};
} elsif ($remain = get_smart_attr($data, 'Wear_Leveling_Count')){
$json->{percent_lifetime_remain} = $remain->{value};
} elsif (defined $data->{nvme_smart_health_information_log}->{percentage_used}){
# NMVe sometime report the estimated life used, instead of the remaining
$json->{percent_lifetime_remain} = 100 - $data->{nvme_smart_health_information_log}->{percentage_used};
}
print_out();

View File

@ -136,7 +136,7 @@ if ($unifi){
$json->{$_} = $obj->{$_} if (defined $obj->{$_});
}
# Convert last seen into a relative time
$json->{last_seen} = time - $obj->{last_seen};
$json->{last_seen} = (defined $obj->{last_seen}) ? time - $obj->{last_seen} : time;
# Add some more info in sys_stats
$json->{sys_stats}->{$_} = $obj->{'system-stats'}->{$_} foreach (qw/cpu mem uptime/);
@ -162,7 +162,7 @@ if ($unifi){
}
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
next if (not $entry->{ap_mac} or $entry->{ap_mac} ne $dev or $entry->{is_wired} == JSON::PP::true);
next if (not $entry->{ap_mac} or $entry->{ap_mac} ne $dev or $entry->{is_wired} == JSON::true);
foreach (@radio_proto){
if ($entry->{radio_proto} eq $_){
$json->{'num_sta_' . $_}++;
@ -199,10 +199,10 @@ if ($unifi){
$json->{$_} = $obj->{$_} || 0;
}
# Convert last_seen to relative
$json->{last_seen} = time - $json->{last_seen};
$json->{last_seen} = (defined $obj->{last_seen}) ? time - $obj->{last_seen} : time;
# For wireless stations, we gather some more info
if ($obj->{is_wired} == JSON::PP::false){
if ($obj->{is_wired} == JSON::false){
my @client_wireless = qw/rx_rate tx_rate essid ap_mac tx_power radio_proto signal noise satisfaction/;
foreach (@client_wireless){
$json->{$_} = $obj->{$_} || 0;

View File

@ -163,13 +163,20 @@ sub convert_suffix {
sub get_zpool_stats {
my $pool = shift;
my $stats = {};
return $stats unless (-e "/proc/spl/kstat/zfs/$pool/io");
open STAT, "</proc/spl/kstat/zfs/$pool/io";
while (<STAT>){
if (m/^(?<nread>\d+)\s+(?<nwritten>\d+)\s+(?<reads>\d+)\s+(?<writes>\d+)\s+(?<wtime>\d+)\s+(?<wlentime>\d+)\s+(?<wupdate>\d+)\s+(?<rtime>\d+)\s+(?<rlentime>\d+)\s+(?<rupdate>\d+)\s+(?<wcnt>\d+)\s+(?<rcnt>\d+)/){
$stats->{$_} = $+{$_} foreach (keys %+);
open UPTIME, "</proc/uptime";
$_ = <UPTIME>;
chomp;
my ($uptime , undef) = split;
$uptime = int $uptime;
close UPTIME;
foreach my $line (qx($zpool iostat $pool -pH)){
if ($line =~ m/^$pool\s+\d+\s+\d+\s+(?<reads>\d+)\s+(?<writes>\d+)\s+(?<nread>\d+)\s+(?<nwritten>\d+)/){
# zpool iostat shows average IO since boot, so just multiply it
# by the uptime in seconds to get cumulated IO since boot
# Zabbix server will then be able to calculate the delta between two values
$stats->{$_} = $+{$_} * $uptime foreach (keys %+);
last;
}
}
close STAT;
return $stats;
}

View File

@ -45,8 +45,8 @@ my $output = {};
if (defined $status){
foreach my $line (qx($zmcontrol status)){
if ($line =~ m/^\s+(\w+)\s+(Running|Stopped)/){
$output->{$1} = ($2 eq 'Running') ? 1 : 0;
if ($line =~ m/^\s+(\w+)(\swebapp)?\s+(Running|Stopped)/){
$output->{$1} = ($3 eq 'Running') ? 1 : 0;
}
}
if ($status eq 'all'){

View File

@ -0,0 +1,68 @@
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
use Getopt::Long;
use File::Which;
use Data::Dumper;
my $what = 'containers';
my $pretty = 0;
GetOptions(
'what=s' => \$what,
'pretty' => \$pretty
);
my $json = [];
my $docker = which('docker');
# If the docker cli is not available, terminate now
if (not defined $docker){
print $json . "\n";
exit(0);
}
my $format;
my $cmd;
if ($what =~ m/^containers?/){
$format = '{' .
'"{#DOCKER_CONTAINER_ID}":"{{ .ID }}",' .
'"{#DOCKER_CONTAINER_IMAGE}": "{{ .Image }}",' .
'"{#DOCKER_CONTAINER_NAME}":"{{ .Names }}",' .
'"{#DOCKER_CONTAINER_STATUS}":"{{ .Status }}"' .
'}';
$cmd = "$docker container list --all --format '$format'";
} elsif ($what =~ m/^networks?/){
$format = '{' .
'"{#DOCKER_NET_ID}":"{{ .ID }}",' .
'"{#DOCKER_NET_NAME}":"{{ .Name }}",' .
'"{#DOCKER_NET_DRIVER}":"{{ .Driver }}",' .
'"{#DOCKER_NET_SCOPE}":"{{ .Scope }}"' .
'}';
$cmd = "$docker network list --format '$format'";
} elsif ($what =~ m/^volumes?/){
$format = '{' .
'"{#DOCKER_VOL_NAME}":"{{ .Name }}",' .
'"{#DOCKER_VOL_DRIVER}":"{{ .Driver }}",' .
'"{#DOCKER_VOL_SCOPE}":"{{ .Scope }}"' .
'}';
$cmd = "$docker volume list --format '$format'";
} else {
print <<_EOF
Usage: $0 --what=<item to discover> [--pretty]
with available item being
* containers : list containers, including stopped ones
* networks : list networks
* volumes : list volumes
_EOF
}
foreach my $line (qx($cmd)){
chomp $line;
push @{$json}, from_json($line);
}
print to_json($json, { pretty => $pretty }) . "\n";
exit(0);

View File

@ -0,0 +1,95 @@
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
use Getopt::Long;
use LWP::UserAgent;
use HTTP::Request::Common;
use URI;
use Data::Dumper;
my $user = undef;
my $pass = undef;
my $url = 'http://localhost:9200';
my $certcheck = 1;
my $nodes = 0;
my $indices = 0;
my $pretty = 0;
my $json = [];
GetOptions (
'user:s' => \$user,
'password:s' => \$pass,
'url=s' => \$url,
'cert-check!' => \$certcheck,
'nodes' => \$nodes,
'indices' => \$indices,
'pretty' => \$pretty
);
if ($nodes and $indices){
die "--nodes and --indices are mutually exclusive\n";
}
my $uri = URI->new($url);
if (not defined $uri){
die "$url is not a valid URL\n";
}
# If connecting over http or is host is localhost
# there's no need to check certificate
if ($uri->scheme eq 'http' or $uri->host =~ m/^localhost|127\.0\.0/){
$certcheck = 0;
}
my $sslopts = {};
if (not $certcheck){
$sslopts = {
verify_hostname => 0,
SSL_verify_mode => 0
}
}
my $ua = LWP::UserAgent->new(
ssl_opts => $sslopts
);
$ua->env_proxy;
if ($nodes){
foreach (@{make_request('/_cat/nodes?format=json&full_id&h=ip,role,master,name,id,version')}){
push @{$json}, {
'{#ES_NODE_NAME}' => $_->{name},
'{#ES_NODE_ROLE}' => $_->{role},
'{#ES_NODE_ID}' => $_->{id},
'{#ES_NODE_VERSION}' => $_->{version},
'{#ES_NODE_MASTER}' => $_->{master}
};
}
} elsif ($indices){
foreach (@{make_request('/_cat/indices?format=json')}){
push @{$json}, {
'{#ES_INDEX_NAME}' => $_->{index},
'{#ES_INDEX_STATUS}' => $_->{status},
'{#ES_INDEX_UUID}' => $_->{uuid}
};
}
}
print to_json($json, { pretty => $pretty });
sub make_request {
my $path = shift;
my $req_url = $url . $path;
my $req = GET $req_url;
if (defined $user and $user ne '' and defined $pass and $pass ne ''){
$req->authorization_basic($user, $pass);
}
my $resp = $ua->request($req);
die "Request to $req_url failed : " . $resp->message . "\n" if $resp->is_error;
return from_json($resp->decoded_content);
}

View File

@ -0,0 +1,61 @@
#!/usr/bin/perl
use strict;
use JSON;
use Getopt::Long;
use Data::Dumper;
use File::Which;
my $pretty = 0;
GetOptions(
'pretty' => \$pretty
);
my $smartctl = which('smartctl');
my $json = [];
sub print_out {
print to_json($json, { pretty => $pretty });
}
if (not defined $smartctl){
print_out();
exit 0;
}
my $smart_scan = from_json(qx($smartctl --scan-open --json=c));
if (not defined $smart_scan){
print_out();
exit 0;
}
foreach my $device (@{$smart_scan->{devices}}){
my ($model, $sn, $has_smart) = "";
my $smart_info = from_json(qx($smartctl -i $device->{name} -d $device->{type} --json=c));
if (defined $smart_info){
$model = $smart_info->{model_name};
$sn = $smart_info->{serial_number};
$has_smart = (
$smart_info->{in_smartctl_database} or (
defined $smart_info->{smart_support} and
$smart_info->{smart_support}->{available} and
$smart_info->{smart_support}->{enabled}
)
) ? 1 : 0;
}
push @{$json}, {
'{#STOR_DEV_NAME}' => $device->{name},
'{#STOR_DEV_DESC}' => $device->{info_name},
'{#STOR_DEV_TYPE}' => $device->{type},
'{#STOR_DEV_PROTO}' => $device->{protocol},
'{#STOR_DEV_MODEL}' => $model,
'{#STOR_DEV_SN}' => $sn,
'{#STOR_DEV_SMART}' => int $has_smart
};
}
print_out();

View File

@ -107,8 +107,8 @@ if ($what eq 'devices'){
foreach my $entry (@{from_json($resp->decoded_content)->{data}}){
# Ignore other sites
next if ($entry->{site_id} ne $site_id);
next if ($type eq 'wireless' and $entry->{is_wired} eq 'true');
next if ($type eq 'wired' and $entry->{is_wired} eq 'false');
next if ($type eq 'wireless' and $entry->{is_wired} == JSON::true);
next if ($type eq 'wired' and $entry->{is_wired} == JSON::false);
push @{$json->{data}}, {
'{#UNIFI_STA_ID}' => $entry->{_id},
'{#UNIFI_STA_NAME}' => (defined $entry->{hostname}) ? $entry->{hostname} : $entry->{mac},

View File

@ -6,8 +6,7 @@ use Getopt::Long;
use Net::Domain qw(hostfqdn);
use Data::Dumper;
my $json;
@{$json->{data}} = ();
my $json = [];
my $pretty = 0;
my $services = 1;
@ -47,7 +46,7 @@ if ($services){
};
foreach my $service (qx($zmprov getServer $hostname zimbraServiceEnabled)){
if ($service =~ m/^zimbraServiceEnabled:\s+(\w+)/){
push @{$json->{data}}, {
push @{$json}, {
'{#ZM_SERVICE}' => $1
};
}
@ -55,7 +54,7 @@ if ($services){
} elsif ($servers){
foreach my $server (qx($zmprov getAllServers)){
chomp $server;
push @{$json->{data}}, {
push @{$json}, {
'{#ZM_SERVER}' => $server
};
}

View File

@ -72,7 +72,7 @@ my $sensors = {};
# Try to detect IPMI sensors
if ($ipmitool && -x $ipmitool){
# First check for temperature sensors
my @lines = qx($ipmitool sdr type Temperature);
my @lines = qx($ipmitool sdr type Temperature 2>/dev/null);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
@ -84,7 +84,7 @@ if ($ipmitool && -x $ipmitool){
my $name = $1;
my $sensor = {};
my @details = qx($ipmitool sdr get '$name');
my @details = qx($ipmitool sdr get '$name' 2>/dev/null);
if ($? != 0){
print "Couldn't get detail for sensor $name\n";
next SENSOR;
@ -124,7 +124,7 @@ if ($ipmitool && -x $ipmitool){
$sensor->{description} = $name;
$sensor->{type} = 'temp';
$sensor->{unit} = '°C';
$sensor->{cmd} = "$ipmitool sdr get '$name' | grep 'Sensor Reading' | awk '{print \$4}' | head -1";
$sensor->{cmd} = "$ipmitool sdr get '$name' 2>/dev/null | perl -ne 'if (/Sensor Reading\\s*:\\s*([^\\s]+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a temperature sensor using IPMI: $name\n";
@ -133,7 +133,7 @@ if ($ipmitool && -x $ipmitool){
# Now check for Fan, nearly the same as Temp, but
# * We try to detect the unit
# * threshold handling is not the same
@lines = qx($ipmitool sdr type Fan);
@lines = qx($ipmitool sdr type Fan 2>/dev/null);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
@ -144,7 +144,7 @@ if ($ipmitool && -x $ipmitool){
my $value = $3;
my $sensor = {};
my @details = qx($ipmitool sdr get '$name');
my @details = qx($ipmitool sdr get '$name' 2>/dev/null);
if ($? != 0){
print "Couldn't get detail for sensor $name\n";
next SENSOR;
@ -152,9 +152,9 @@ if ($ipmitool && -x $ipmitool){
my $val = undef;
foreach my $d (@details){
chomp $d;
if ($d =~ m/^\s*Sensor\sReading\s*:\s*(\w+)/){
if ($d =~ m/^\s*Sensor\sReading\s*:\s*(\d+(\.\d+)?)/){
$val = $1;
if ($val !~ m/^\d+$/){
if ($val !~ m/^\d+(\.\d+)?$/){
print "Skipping sensor $name, couldn't parse its value: $val\n";
next SENSOR;
}
@ -174,14 +174,14 @@ if ($ipmitool && -x $ipmitool){
$sensor->{description} = $name;
$sensor->{type} = 'fan';
$sensor->{unit} = ($value =~ m/percent|%/ || $val < 100) ? '%' : 'rpm';
$sensor->{cmd} = "$ipmitool sdr get '$name' | grep 'Sensor Reading' | awk '{print \$4}' | head -1";
$sensor->{cmd} = "$ipmitool sdr get '$name' 2>/dev/null | perl -ne 'if (/Sensor Reading\\s*:\\s*([^\\s]+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a fan sensor using IPMI: $name\n";
}
}
# Now look for power information
@lines = qx($ipmitool sdr type 'Current');
@lines = qx($ipmitool sdr type 'Current' 2>/dev/null);
if ($? == 0){
SENSOR: foreach my $l (@lines){
chomp $l;
@ -193,7 +193,7 @@ if ($ipmitool && -x $ipmitool){
my $value = $4;
my $sensor = {};
if ($name =~ m/(Power)|(Pwr)|(Consumption)|(PS\d+\sCurr\sOut)/i || $value =~ m/W(att)?/i){
my @details = qx($ipmitool sdr get '$name');
my @details = qx($ipmitool sdr get '$name' 2>/dev/null);
if ($? != 0){
print "Couldn't get detail for sensor $name\n";
next SENSOR;
@ -224,7 +224,7 @@ if ($ipmitool && -x $ipmitool){
$sensor->{description} = $name;
$sensor->{type} = 'power';
$sensor->{unit} = ($name =~ m/%/) ? '%' : 'Watt';
$sensor->{cmd} = "$ipmitool sdr get '$name' | grep 'Sensor Reading' | awk '{print \$4}' | head -1";
$sensor->{cmd} = "$ipmitool sdr get '$name' 2>/dev/null | perl -ne 'if (/Sensor Reading\\s*:\\s*([^\\s]+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a power sensor using IPMI: $name\n";
@ -276,7 +276,7 @@ if ($lmsensor && -x $lmsensor){
$sensor->{description} = $name;
$sensor->{type} = 'temp';
$sensor->{unit} = '°C';
$sensor->{cmd} = "$lmsensor | grep '$name:' | cut -d+ -f2 | cut -d. -f1 | head -1";
$sensor->{cmd} = "$lmsensor | perl -ne 'if (/^$name:\\s*\\+(\\d+)/) { print \"\$1\\n\"; last }'";
my $id = sensor_name($name);
$sensors->{$id} = $sensor;
print "Found a temperature sensor using lm_sensors: $name\n";
@ -297,7 +297,7 @@ if ($smartctl && -x $smartctl){
threshold_high => $def_hd_temp_thres_high,
type => 'temp',
unit => '°C',
cmd => "$smartctl -A /dev/$block | grep $1 | awk '{print \$10}'"
cmd => "$smartctl -A /dev/$block | perl -ne 'if (/Temperature_Celsius(\\s+[^\\s]+){7}\\s+(\\d+(\\.\\d+)?)/) { print \"\$2\\n\"; last }'"
};
print "Found a temperature sensor using smartctl: $block\n";
last;
@ -310,7 +310,7 @@ if ($smartctl && -x $smartctl){
threshold_high => $def_hd_temp_thres_high,
type => 'temp',
unit => '°C',
cmd => "$smartctl -A /dev/$block | grep Temperature: | awk '{ print \$2 }'"
cmd => "$smartctl -A /dev/$block | perl -ne 'if (/Temperature:\\s+(\\d+(\\.\\d+)?)/) { print \"\$1\\n\"; last }'"
};
print "Found a temperature sensor using smartctl: $block\n";
last;
@ -343,7 +343,7 @@ if ($smartctl && -x $smartctl){
threshold_high => $def_hd_temp_thres_high,
type => 'temp',
unit => '°C',
cmd => "$smartctl -A -d megaraid,$i /dev/sda | grep $1 | awk '{print \$10}'"
cmd => "$smartctl -A -d megaraid,$i /dev/sda | perl -ne 'if (/(Temperature_Celsius|Airflow_Temperature_Cel)(\\s+[^\\s]+){7}\\s+(\\d+)/) { print \"\$3\\n\"; last }'"
};
print "Found a temperature sensor using smartctl (megaraid): sda-$i\n";
last;

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:07:27Z</date>
<date>2021-09-22T16:02:43Z</date>
<groups>
<group>
<name>Templates</name>
@ -41,7 +41,7 @@
<key>backuppc.general[bkp]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!backup(s)</units>
<applications>
<application>
@ -64,7 +64,7 @@
<key>backuppc.general[full_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -87,7 +87,7 @@
<key>backuppc.general[history_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -110,7 +110,7 @@
<key>backuppc.general[hosts]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!host(s)</units>
<applications>
<application>
@ -133,7 +133,7 @@
<key>backuppc.general[perf]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<applications>
<application>
@ -156,7 +156,7 @@
<key>backuppc.general[ratio]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<applications>
<application>
@ -179,7 +179,7 @@
<key>backuppc.general[total_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -199,9 +199,9 @@
<item>
<name>Number of BackupPC processes</name>
<key>proc.num[,backuppc,,BackupPC]</key>
<delay>10m</delay>
<delay>30m</delay>
<history>60d</history>
<trends>730d</trends>
<trends>0</trends>
<applications>
<application>
<name>BackupPC</name>
@ -214,7 +214,7 @@
<key>proc.num[,root,,BackupPC_raidsync]</key>
<delay>10m</delay>
<history>60d</history>
<trends>730d</trends>
<trends>0</trends>
<applications>
<application>
<name>BackupPC</name>
@ -242,7 +242,7 @@
<key>backuppc.entity[{#BPC_ENTITY},bkp]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<applications>
<application>
<name>BackupPC</name>
@ -264,7 +264,7 @@
<key>backuppc.entity[{#BPC_ENTITY},full_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>o</units>
<applications>
<application>
@ -287,7 +287,7 @@
<key>backuppc.entity[{#BPC_ENTITY},history_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>o</units>
<applications>
<application>
@ -310,7 +310,7 @@
<key>backuppc.entity[{#BPC_ENTITY},hosts]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<applications>
<application>
<name>BackupPC</name>
@ -332,7 +332,7 @@
<key>backuppc.entity[{#BPC_ENTITY},perf]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!h/j</units>
<applications>
@ -356,7 +356,7 @@
<key>backuppc.entity[{#BPC_ENTITY},ratio]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -380,7 +380,7 @@
<key>backuppc.entity[{#BPC_ENTITY},size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>o</units>
<applications>
<application>
@ -400,7 +400,7 @@
<item_prototype>
<name>BackupPC: Info for entity {#BPC_ENTITY}</name>
<key>backuppc.entity[{#BPC_ENTITY}]</key>
<delay>1800</delay>
<delay>1h</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
@ -484,7 +484,7 @@
<key>backuppc.host[{#BPCHOST},age]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>s</units>
<applications>
<application>
@ -504,7 +504,7 @@
<item_prototype>
<name>BackupPC: Info de {#BPCHOST}</name>
<key>backuppc.host[{#BPCHOST},all]</key>
<delay>15m</delay>
<delay>30m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
@ -520,7 +520,7 @@
<key>backuppc.host[{#BPCHOST},bkp]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!backups</units>
<applications>
<application>
@ -543,7 +543,7 @@
<key>backuppc.host[{#BPCHOST},comp_ratio]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -567,7 +567,7 @@
<key>backuppc.host[{#BPCHOST},duration]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>s</units>
<applications>
<application>
@ -590,7 +590,7 @@
<key>backuppc.host[{#BPCHOST},enabled]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<applications>
<application>
<name>BackupPC</name>
@ -612,7 +612,7 @@
<key>backuppc.host[{#BPCHOST},errors]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!errors</units>
<applications>
<application>
@ -635,7 +635,7 @@
<key>backuppc.host[{#BPCHOST},full_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -658,7 +658,7 @@
<key>backuppc.host[{#BPCHOST},history_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -681,7 +681,7 @@
<key>backuppc.host[{#BPCHOST},new_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -704,7 +704,7 @@
<key>backuppc.host[{#BPCHOST},new_size_avg]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -727,7 +727,7 @@
<key>backuppc.host[{#BPCHOST},new_size_median]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -750,7 +750,7 @@
<key>backuppc.host[{#BPCHOST},new_size_q1]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -773,7 +773,7 @@
<key>backuppc.host[{#BPCHOST},new_size_q3]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -796,7 +796,7 @@
<key>backuppc.host[{#BPCHOST},total_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>

View File

@ -0,0 +1,610 @@
zabbix_export:
version: '5.4'
date: '2022-01-20T13:55:05Z'
groups:
-
uuid: 7df96b18c230490a9a0a9e2307226338
name: Templates
templates:
-
uuid: 722c34dae28f471b992685f75b217e84
template: Template_App_Docker
name: Template_App_Docker
groups:
-
name: Templates
items:
-
uuid: b35116a368e94b0698951010ddb496b5
name: 'Docker: Number of paused container'
type: DEPENDENT
key: 'container.docker.check[global,ContainersPaused]'
delay: '0'
history: 30d
trends: 1095d
units: '!container(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.info.ContainersPaused
master_item:
key: 'container.docker.check[global]'
-
uuid: ff406b09a49f42ef80e004f7b8737c9b
name: 'Docker: Number of running container'
type: DEPENDENT
key: 'container.docker.check[global,ContainersRunning]'
delay: '0'
history: 30d
trends: 1095d
units: '!container(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.info.ContainersRunning
master_item:
key: 'container.docker.check[global]'
triggers:
-
uuid: 85e456243c714349b34ff6504d122004
expression: 'change(/Template_App_Docker/container.docker.check[global,ContainersRunning])=1'
name: 'Number of running containers changed to {ITEM.LASTVALUE1}'
priority: INFO
-
uuid: 69c12d97c6604c608fba4d2c0d4875bb
name: 'Docker: Number of stopped container'
type: DEPENDENT
key: 'container.docker.check[global,ContainersStopped]'
delay: '0'
history: 30d
trends: 1095d
units: '!container(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.info.ContainersStopped
master_item:
key: 'container.docker.check[global]'
-
uuid: c6831879964a4a3a90fc82530e6a0d43
name: 'Docker: Number of containers'
type: DEPENDENT
key: 'container.docker.check[global,Containers]'
delay: '0'
history: 30d
trends: 1095d
units: '!container(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.info.Containers
master_item:
key: 'container.docker.check[global]'
-
uuid: 1da51785c3d34faf8ba7ab17d0b46b38
name: 'Docker: Live restore status'
type: DEPENDENT
key: 'container.docker.check[global,LiveRestoreEnabled]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.info.LiveRestoreEnabled
master_item:
key: 'container.docker.check[global]'
triggers:
-
uuid: 85a2fc38a6bb42e183de3d4687f466e8
expression: 'last(/Template_App_Docker/container.docker.check[global,LiveRestoreEnabled])<>"true" and {$DOCKER_WARN_NO_LIVE_RESTORE}=1'
name: 'Docker live restore isn''t enabled'
priority: WARNING
-
uuid: ab2d3bd636964a21b764b2f33f2e109e
name: 'Docker: server version'
type: DEPENDENT
key: 'container.docker.check[global,ServerVersion]'
delay: '0'
history: 15d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.info.ServerVersion
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[global]'
triggers:
-
uuid: 8c535ff0d73240c395226ee91a8096c4
expression: '(last(/Template_App_Docker/container.docker.check[global,ServerVersion])<>last(/Template_App_Docker/container.docker.check[global,ServerVersion],#2))=1'
name: 'Docker version changed ({ITEM.LASTVALUE1})'
priority: WARNING
-
uuid: d5c424dc767c4a5ea1ee441e76770411
name: 'Docker global info'
key: 'container.docker.check[global]'
delay: 5m
history: '0'
trends: '0'
value_type: TEXT
discovery_rules:
-
uuid: e459f638e15f495db3e4a9060e60f7c8
name: 'Docker: container discovery'
key: 'container.docker.discovery[containers]'
delay: 15m
lifetime: 7d
item_prototypes:
-
uuid: 7f3fddee7aed48c8acc26e9aa98df298
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Disk IO Read'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},BlockIORead]'
delay: '0'
history: 30d
trends: 1095d
units: B/s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.BlockIORead
-
type: CHANGE_PER_SECOND
parameters:
- ''
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 27f4e3e3a33c412ead03200de17bc9b5
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Disk IO Write'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},BlockIOWrite]'
delay: '0'
history: 30d
trends: 1095d
units: B/s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.BlockIOWrite
-
type: CHANGE_PER_SECOND
parameters:
- ''
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: f59ee387b12740fc90ff6d990422e92d
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: CPU'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc]'
delay: '0'
history: 30d
trends: 1095d
value_type: FLOAT
units: '%'
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.CPUPerc
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
trigger_prototypes:
-
uuid: d2bdccc7bf8a49c6a2c584a5394a0c27
expression: 'avg(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc],1h)>{$DOCKER_CPU_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}'
recovery_mode: RECOVERY_EXPRESSION
recovery_expression: 'avg(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc],1h)<({$DOCKER_CPU_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}-2) or avg(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc],5m)<({$DOCKER_CPU_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}/3)'
name: 'Container {#DOCKER_CONTAINER_NAME} CPU usage is {ITEM.VALUE1}'
priority: INFO
-
uuid: f56ddbaef3af4fa898ae2403a812db3c
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Dead'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Dead]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.Dead
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: e09f311feb1749cc88e8c51c8425c324
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Error'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Error]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.Error
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: b75ac0b193d2451e8ee3ce0950b7b07a
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Image'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Image]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.Config.Image
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: ba301501bbec40e6add5a47d2fd0ee15
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Memory usage'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemCurrent]'
delay: '0'
history: 30d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.MemCurrent
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 5895cddf57fd4b5e8917593cba941491
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Memory limit'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemLimit]'
delay: '0'
history: 30d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.MemLimit
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 640bce04f3fc483fa495ac72208effa7
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Memory used (%)'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemPerc]'
delay: '0'
history: 30d
trends: 1095d
value_type: FLOAT
units: '%'
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.MemPerc
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
trigger_prototypes:
-
uuid: 9fdd469b0866409ca7889288eb5f229a
expression: 'last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemPerc])>{$DOCKER_MEM_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}'
recovery_mode: RECOVERY_EXPRESSION
recovery_expression: 'last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemPerc])<({$DOCKER_MEM_PCT_WARN:"{#DOCKER_CONTAINER_NAME}"}-2)'
name: 'Container {#DOCKER_CONTAINER_NAME} memory usage is at {ITEM.LASTVALUE1}'
priority: AVERAGE
manual_close: 'YES'
-
uuid: eb58fb55bc0442319650e32a8ba1faf4
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Network in'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},NetIOIn]'
delay: '0'
history: 30d
trends: 1095d
units: b/s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.NetIOIn
-
type: CHANGE_PER_SECOND
parameters:
- ''
-
type: MULTIPLIER
parameters:
- '8'
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 6a97e94be7d1432fb11a92072ea9e55f
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Network out'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},NetIOOut]'
delay: '0'
history: 30d
trends: 1095d
units: b/s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.NetIOOut
-
type: CHANGE_PER_SECOND
parameters:
- ''
-
type: MULTIPLIER
parameters:
- '8'
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 7be56a29b2464c5e96c2f4f2b3e02fba
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: OOMKilled'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},OOMKilled]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.OOMKilled
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: a5ed78ba8acf43a488b3833d14e39381
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Number of processes'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},PIDs]'
delay: '0'
history: 30d
trends: 1095d
units: '!process(es)'
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.PIDs
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 1213e0b7fc824442b50ab4ff4cd1047b
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Restarting'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Restarting]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.Restarting
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: 418b35e93f114be986ecdd5a9d259283
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Status'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status]'
delay: '0'
history: 7d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.inspect.State.Status
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
trigger_prototypes:
-
uuid: 45e35825ed184b0290d1bc1ed27e48e4
expression: 'find(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],,"regexp","^(running|exited)$")<>1'
name: 'Container {#DOCKER_CONTAINER_NAME} is {ITEM.LASTVALUE1}'
priority: WARNING
manual_close: 'YES'
-
uuid: ab2c34de197f4ea197e607604c936f34
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Uptime'
type: DEPENDENT
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime]'
delay: '0'
history: 30d
trends: 1095d
units: s
preprocessing:
-
type: JSONPATH
parameters:
- $.stats.Uptime
master_item:
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
-
uuid: b232a5b3aedd4072a56460dd041579b1
name: 'Docker: container {#DOCKER_CONTAINER_NAME}: Info'
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME}]'
delay: 5m
history: '0'
trends: '0'
value_type: TEXT
trigger_prototypes:
-
uuid: 57d9a11b38b246539a82bf4ba1a79a98
expression: 'last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime])<{$DOCKER_UPTIME_WARN:"{#DOCKER_CONTAINER_NAME}"} and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],#2)="running" and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status])="running"'
name: 'Container {#DOCKER_CONTAINER_NAME} has just restarted'
priority: INFO
-
uuid: ce3cc3148fa24c6097cbf38b47e9e655
expression: 'last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime])<{$DOCKER_UPTIME_WARN:"{#DOCKER_CONTAINER_NAME}"} and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],#2)="exited" and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status])="running"'
name: 'Container {#DOCKER_CONTAINER_NAME} has just started'
priority: INFO
-
uuid: ebdea3b26c624714811124fe4681ccec
expression: 'max(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime],20m)<{$DOCKER_UPTIME_WARN:"{#DOCKER_CONTAINER_NAME}"} and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Uptime])>0 and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status])="running" and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],#2)="running" and last(/Template_App_Docker/container.docker.check[container,{#DOCKER_CONTAINER_NAME},Status],#3)="running"'
name: 'Container {#DOCKER_CONTAINER_NAME} seems to restart in a loop'
priority: AVERAGE
manual_close: 'YES'
graph_prototypes:
-
uuid: 4a9aa378970045399303dcead39c1e53
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: CPU'
ymin_type_1: FIXED
graph_items:
-
sortorder: '1'
drawtype: GRADIENT_LINE
color: F63100
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},CPUPerc]'
-
uuid: 7cd7cd8fd24d492ab7995a882b7bf9af
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Disk IO'
graph_items:
-
sortorder: '1'
drawtype: FILLED_REGION
color: FFAB91
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},BlockIORead]'
-
sortorder: '2'
drawtype: FILLED_REGION
color: A5D6A7
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},BlockIOWrite]'
-
uuid: 0fec757ff2f34e4fbab3be8ea455bc5f
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Memory'
ymin_type_1: FIXED
graph_items:
-
sortorder: '1'
color: 2774A4
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},MemCurrent]'
-
uuid: 948dccce10c54510810a64612933ca89
name: 'Docker: Container {#DOCKER_CONTAINER_NAME}: Network'
graph_items:
-
sortorder: '1'
drawtype: FILLED_REGION
color: B39DDB
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},NetIOIn]'
-
sortorder: '2'
drawtype: FILLED_REGION
color: 80DEEA
item:
host: Template_App_Docker
key: 'container.docker.check[container,{#DOCKER_CONTAINER_NAME},NetIOOut]'
macros:
-
macro: '{$DOCKER_CPU_PCT_WARN}'
value: '90'
-
macro: '{$DOCKER_MEM_PCT_WARN}'
value: '85'
-
macro: '{$DOCKER_UPTIME_WARN}'
value: '300'
-
macro: '{$DOCKER_WARN_NO_LIVE_RESTORE}'
value: '1'
graphs:
-
uuid: 9c9aba9c46e145c7a8acd208edb1d147
name: 'Docker: Number of containers'
type: STACKED
graph_items:
-
sortorder: '1'
color: 1A7C11
item:
host: Template_App_Docker
key: 'container.docker.check[global,ContainersPaused]'
-
sortorder: '2'
color: F63100
item:
host: Template_App_Docker
key: 'container.docker.check[global,ContainersRunning]'
-
sortorder: '3'
color: 2774A4
item:
host: Template_App_Docker
key: 'container.docker.check[global,ContainersStopped]'

View File

@ -0,0 +1,759 @@
zabbix_export:
version: '5.4'
date: '2021-10-19T12:29:52Z'
groups:
-
uuid: 7df96b18c230490a9a0a9e2307226338
name: Templates
templates:
-
uuid: 3a658cb77f26469a8a114b1bcd4734e9
template: Template_App_Elasticsearch
name: Template_App_Elasticsearch
groups:
-
name: Templates
items:
-
uuid: 1957d2e7393348f9ae73bfc07c1122a3
name: 'ES Cluster informations'
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
delay: 3m
history: '0'
trends: '0'
value_type: TEXT
tags:
-
tag: Application
value: Elasticsearch
-
uuid: fec408ef6d3249a885a3f9842016ccd4
name: 'ES: Cluster name'
type: DEPENDENT
key: 'elasticsearch.cluster[cluster_name]'
delay: '0'
history: 30d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.cluster_name
-
type: DISCARD_UNCHANGED
parameters:
- ''
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 286f9eb4d1504704999abd6a98b09a5c
name: 'ES: Number of indices'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.count]'
delay: '0'
history: 60d
trends: 1095d
units: '!indice(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.count
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
triggers:
-
uuid: dd7bc1621b994f24917d5beb4c07e17f
expression: 'abs(change(/Template_App_Elasticsearch/elasticsearch.cluster[indices.count]))>0'
recovery_mode: NONE
name: 'Number of indices has changed to {ITEM.LASTVALUE1}'
priority: INFO
manual_close: 'YES'
-
uuid: 8b990ab728304cd98d264fdbbe919795
name: 'ES: Number of documents'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.docs.count]'
delay: '0'
history: 60d
trends: 1095d
units: '!document(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.docs.count
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 4d1b8e4e358749739e3439bf9489b074
name: 'ES: Number of deleted documents'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.docs.deleted]'
delay: '0'
history: 60d
trends: 1095d
units: '!document(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.docs.deleted
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 0f472265bad44e849866d87de2c50474
name: 'ES: Number of primary shards'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.shards.primaries]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.shards.primaries
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 7118fffe2a864915947ce19570885993
name: 'ES: Number of replication shards'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.shards.replication]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.shards.replication
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 019eb7caa2ff45aa8aad4da89b340a54
name: 'ES: Number of shards'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.shards.total]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.shards.total
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 8f0660a8b80041d987a91986608a43d2
name: 'ES: Store size'
type: DEPENDENT
key: 'elasticsearch.cluster[indices.store.size]'
delay: '0'
history: 60d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.indices.store.size_in_bytes
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 854e270d75ef4c6fad22ebd553f6438e
name: 'ES: Number of data nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.data]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.nodes.count.data
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 52f72a68d734465b8422fb4e3ad084a7
name: 'ES: Number of failed nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.failed]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $._nodes.failed
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
triggers:
-
uuid: c188a92cd4444d7f8fe4c635cf35e656
expression: 'last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.failed])>0'
name: 'ES cluster has {ITEM.LASTVALUE1} failed node(s)'
priority: AVERAGE
-
uuid: 94de965d25ed41cd8d5ef367e9a22497
name: 'ES: Available space'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.fs.available_in_bytes]'
delay: '0'
history: 60d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.nodes.fs.available_in_bytes
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 376c2cd6bcfa4a21867411af6d84f5d1
name: 'ES: Total space'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.fs.total_in_bytes]'
delay: '0'
history: 60d
trends: 1095d
units: B
preprocessing:
-
type: JSONPATH
parameters:
- $.nodes.fs.total_in_bytes
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 20a0526ae5054c808a95c4cc9b3356eb
name: 'ES: Number of master nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.master]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.nodes.count.master
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 0acd45bc9952407ab582689b0b483149
name: 'ES: Number of working nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.successfull]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $._nodes.successful
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 8ef89bc0bcde44ed89a361a9541f7b85
name: 'ES: Number of nodes'
type: DEPENDENT
key: 'elasticsearch.cluster[nodes.total]'
delay: '0'
history: 60d
trends: 1095d
units: '!node(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $._nodes.total
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
triggers:
-
uuid: 82cad93d736248e8b520d25e09cc0fd9
expression: 'abs(change(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.total]))>0'
recovery_mode: NONE
name: 'Number of nodes has changed to {ITEM.LASTVALUE1}'
priority: INFO
manual_close: 'YES'
-
uuid: 4782a3f38fb14c16ba2066c3504727da
name: 'ES: Cluster status'
type: DEPENDENT
key: 'elasticsearch.cluster[status]'
delay: '0'
history: 30d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.status
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 10m
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},cluster]'
tags:
-
tag: Application
value: Elasticsearch
triggers:
-
uuid: b86396f5fcca4dfe825f8ae022ef8edf
expression: 'find(/Template_App_Elasticsearch/elasticsearch.cluster[status],,"like","red")=1'
name: 'ES cluster in red status'
priority: HIGH
-
uuid: 275c27ab25874666a1cfd8f36df549d0
expression: 'find(/Template_App_Elasticsearch/elasticsearch.cluster[status],,"like","green")=0'
name: 'ES cluster is in {ITEM.LASTVALUE1} status'
priority: WARNING
manual_close: 'YES'
dependencies:
-
name: 'ES cluster in red status'
expression: 'find(/Template_App_Elasticsearch/elasticsearch.cluster[status],,"like","red")=1'
-
uuid: c54941e310124d4881bcbb76912a72c9
expression: 'nodata(/Template_App_Elasticsearch/elasticsearch.cluster[status],30m)=1'
name: 'No data for Elasticsearch cluster monitoring since 10m'
priority: WARNING
discovery_rules:
-
uuid: bf3408829ed946a99e0c9640698a2eed
name: 'Indices discovery'
key: 'elasticsearch.discovery[{$ES_URL},{$ES_USER},{$ES_PASS},indices]'
delay: 30m
lifetime: 7d
item_prototypes:
-
uuid: 67125ce8f0924ee0a124f06d40715005
name: 'ES: Index: {#ES_INDEX_NAME}: Info'
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
delay: 3m
history: '0'
trends: '0'
value_type: TEXT
tags:
-
tag: Application
value: Elasticsearch
-
uuid: d457ef885ca442c58fb3c336f2dd8f1d
name: 'ES: Index: {#ES_INDEX_NAME}: Number of active primary shards'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},active_primary_shards]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.active_primary_shards
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 465dbc25a40948f392287f784aea1bbe
name: 'ES: Index: {#ES_INDEX_NAME}: Number of active shards'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},active_shards]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.active_shards
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 58c0a433f7494045a1aac59586893415
name: 'ES: Index: {#ES_INDEX_NAME}: Number of replicas'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},number_of_replicas]'
delay: '0'
history: 60d
trends: 1095d
preprocessing:
-
type: JSONPATH
parameters:
- $.number_of_replicas
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 03c1b057648c4e96937191731206b1ac
name: 'ES: Index: {#ES_INDEX_NAME}: Number of shards'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},number_of_shards]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.number_of_shards
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: a13865f166344478ae5e3ea02962b8e2
name: 'ES: Index: {#ES_INDEX_NAME}: Number of relocating shards'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},relocating_shards]'
delay: '0'
history: 60d
trends: 1095d
units: '!shard(s)'
preprocessing:
-
type: JSONPATH
parameters:
- $.relocating_shards
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 1h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
-
uuid: ba3dda72f4144a989e276b0d13c38536
name: 'ES: Index: {#ES_INDEX_NAME}: Status'
type: DEPENDENT
key: 'elasticsearch.index[{#ES_INDEX_NAME},status]'
delay: '0'
history: 30d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.status
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 10m
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},index,{#ES_INDEX_NAME}]'
tags:
-
tag: Application
value: Elasticsearch
trigger_prototypes:
-
uuid: baeff34841114b31883f0b7e017357a0
expression: 'find(/Template_App_Elasticsearch/elasticsearch.index[{#ES_INDEX_NAME},status],,"like","red")=1'
name: 'ES index {#ES_INDEX_NAME} is in red status'
priority: HIGH
manual_close: 'YES'
-
uuid: 91d45c74b96049158644dc2c4bdc418e
expression: 'find(/Template_App_Elasticsearch/elasticsearch.index[{#ES_INDEX_NAME},status],,"like","green")=0'
name: 'ES index {#ES_INDEX_NAME} is in {ITEM.LASTVALUE1} status'
priority: WARNING
manual_close: 'YES'
dependencies:
-
name: 'ES index {#ES_INDEX_NAME} is in red status'
expression: 'find(/Template_App_Elasticsearch/elasticsearch.index[{#ES_INDEX_NAME},status],,"like","red")=1'
-
uuid: 421078e2f66f4fe787a710c10eb45419
expression: 'nodata(/Template_App_Elasticsearch/elasticsearch.index[{#ES_INDEX_NAME},status],30m)=1'
name: 'No data for Elasticsearch index {#ES_INDEX_NAME} monitoring since 10m'
priority: WARNING
manual_close: 'YES'
dependencies:
-
name: 'No data for Elasticsearch cluster monitoring since 10m'
expression: 'nodata(/Template_App_Elasticsearch/elasticsearch.cluster[status],30m)=1'
-
uuid: 9912e675734d42e7b3810db51a9c13de
name: 'Nodes discovery'
key: 'elasticsearch.discovery[{$ES_URL},{$ES_USER},{$ES_PASS},nodes]'
delay: 2h
lifetime: 7d
item_prototypes:
-
uuid: 70e411ebd3de4f79936ec65ad4b3e41a
name: 'ES: Node {#ES_NODE_NAME} ({#ES_NODE_ID}): Info'
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},node,{#ES_NODE_ID}]'
delay: 5m
history: '0'
trends: '0'
value_type: TEXT
tags:
-
tag: Application
value: Elasticsearch
-
uuid: 0b7eed934ccf4e90b95daced49c7ccde
name: 'ES: Node {#ES_NODE_NAME} ({#ES_NODE_ID}): Uptime'
type: DEPENDENT
key: 'elasticsearch.node[{#ES_NODE_ID},jvm.start_time_in_millis]'
delay: '0'
history: 30d
trends: 1095d
units: ms
preprocessing:
-
type: JSONPATH
parameters:
- $.jvm.start_time_in_millis
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},node,{#ES_NODE_ID}]'
tags:
-
tag: Application
value: Elasticsearch
trigger_prototypes:
-
uuid: e2b5fac004e240e1b381a0f3ce17db4f
expression: 'last(/Template_App_Elasticsearch/elasticsearch.node[{#ES_NODE_ID},jvm.start_time_in_millis])<600000'
name: 'ES restarted recently'
priority: INFO
manual_close: 'YES'
-
uuid: c335b6ee20d8497393125f9b1a4f7157
name: 'ES: Node {#ES_NODE_NAME} ({#ES_NODE_ID}): ES version'
type: DEPENDENT
key: 'elasticsearch.node[{#ES_NODE_ID},version]'
delay: '0'
history: 30d
trends: '0'
value_type: CHAR
preprocessing:
-
type: JSONPATH
parameters:
- $.version
-
type: DISCARD_UNCHANGED_HEARTBEAT
parameters:
- 6h
master_item:
key: 'elasticsearch.check[{$ES_URL},{$ES_USER},{$ES_PASS},node,{#ES_NODE_ID}]'
tags:
-
tag: Application
value: Elasticsearch
trigger_prototypes:
-
uuid: 15b5d7f22ca2422f9f79f6ee0196d38e
expression: 'last(/Template_App_Elasticsearch/elasticsearch.node[{#ES_NODE_ID},version],#1)<>last(/Template_App_Elasticsearch/elasticsearch.node[{#ES_NODE_ID},version],#2)'
recovery_mode: NONE
name: 'ES version changed to {ITEM.LASTVALUE1} on node {#ES_NODE_NAME} ({#ES_NODE_ID})'
priority: INFO
manual_close: 'YES'
macros:
-
macro: '{$ES_PASS}'
-
macro: '{$ES_SPACE_PCT_CRIT}'
value: '88'
-
macro: '{$ES_SPACE_PCT_WARN}'
value: '78'
-
macro: '{$ES_URL}'
value: 'http://localhost:9200'
-
macro: '{$ES_USER}'
triggers:
-
uuid: e5ff617704b047e2814469075a1a9e38
expression: '(100*last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.available_in_bytes])/last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.total_in_bytes]))<(100-{$ES_SPACE_PCT_WARN})'
name: 'Low free disk space'
priority: WARNING
dependencies:
-
name: 'Very low free disk space'
expression: '(100*last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.available_in_bytes])/last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.total_in_bytes]))<(100-{$ES_SPACE_PCT_CRIT})'
-
uuid: c6dbde1d2a844e338bb05dee6f59b114
expression: '(100*last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.available_in_bytes])/last(/Template_App_Elasticsearch/elasticsearch.cluster[nodes.fs.total_in_bytes]))<(100-{$ES_SPACE_PCT_CRIT})'
name: 'Very low free disk space'
priority: HIGH
graphs:
-
uuid: 83738984fe6f4ab1b5a71e150c248ae4
name: 'ES: Storage size'
graph_items:
-
drawtype: GRADIENT_LINE
color: 1A7C11
item:
host: Template_App_Elasticsearch
key: 'elasticsearch.cluster[indices.store.size]'

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:09:04Z</date>
<date>2021-09-22T16:01:48Z</date>
<groups>
<group>
<name>Templates</name>
@ -28,7 +28,7 @@
<key>db.mysql[Aborted_clients]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!connections</units>
<applications>
<application>
@ -55,7 +55,7 @@
<key>db.mysql[Aborted_connects]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!conn</units>
<applications>
<application>
@ -82,7 +82,7 @@
<key>db.mysql[Access_denied_errors]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!connections</units>
<applications>
<application>
@ -110,7 +110,7 @@ return json.Access_denied_errors || 0;</params>
<key>db.mysql[Acl_users]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!users</units>
<applications>
<application>
@ -134,7 +134,7 @@ return json.Acl_users || 0;</params>
<key>db.mysql[Bytes_received]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B/s</units>
<applications>
<application>
@ -161,7 +161,7 @@ return json.Acl_users || 0;</params>
<key>db.mysql[Bytes_sent]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B/s</units>
<applications>
<application>
@ -188,7 +188,8 @@ return json.Acl_users || 0;</params>
<key>db.mysql[Com_admin_commands]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -215,7 +216,8 @@ return json.Acl_users || 0;</params>
<key>db.mysql[Com_alter_user]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -243,7 +245,8 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Com_begin]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -270,7 +273,8 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Com_commit]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -297,7 +301,8 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Com_create_user]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -324,7 +329,8 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Com_delete]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -351,7 +357,8 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Com_insert]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -378,7 +385,8 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Com_prepare_sql]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -405,7 +413,8 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Com_rollback]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -432,7 +441,8 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Com_select]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -459,7 +469,8 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Com_update]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -486,7 +497,7 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Connections]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!conn</units>
<applications>
<application>
@ -513,7 +524,7 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Innodb_buffer_pool_bytes_data]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -536,7 +547,7 @@ return json.Com_alter_user || 0;</params>
<key>db.mysql[Innodb_buffer_pool_bytes_dirty]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -594,7 +605,8 @@ return json.Innodb_buffer_pool_load_status || 'Buffer pool(s) load completed at'
<key>db.mysql[Innodb_buffer_pool_reads]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!reads</units>
<applications>
<application>
@ -621,7 +633,8 @@ return json.Innodb_buffer_pool_load_status || 'Buffer pool(s) load completed at'
<key>db.mysql[Innodb_data_fsyncs]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!fsync/s</units>
<applications>
<application>
@ -648,7 +661,8 @@ return json.Innodb_buffer_pool_load_status || 'Buffer pool(s) load completed at'
<key>db.mysql[Innodb_os_log_fsyncs]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!fsync/s</units>
<applications>
<application>
@ -675,7 +689,8 @@ return json.Innodb_buffer_pool_load_status || 'Buffer pool(s) load completed at'
<key>db.mysql[Max_statement_time_exceeded]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -703,7 +718,7 @@ return json.Max_statement_time_exceeded || 0;</params>
<key>db.mysql[Memory_used]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -727,7 +742,7 @@ return json.Memory_used || 0;</params>
<key>db.mysql[Open_files]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!files</units>
<applications>
<application>
@ -773,7 +788,8 @@ return json.Memory_used || 0;</params>
<key>db.mysql[Qcache_hits]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -800,7 +816,8 @@ return json.Memory_used || 0;</params>
<key>db.mysql[Queries]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -834,7 +851,8 @@ return json.Memory_used || 0;</params>
<key>db.mysql[Slow_queries]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>!qps</units>
<applications>
<application>
@ -868,7 +886,7 @@ return json.Memory_used || 0;</params>
<key>db.mysql[Threads_running]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!conn</units>
<applications>
<application>
@ -891,7 +909,7 @@ return json.Memory_used || 0;</params>
<key>db.mysql[Uptime]</key>
<delay>0</delay>
<history>30d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>s</units>
<applications>
<application>

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:12:25Z</date>
<date>2021-09-22T16:05:01Z</date>
<groups>
<group>
<name>Templates</name>
@ -33,8 +33,8 @@
<type>DEPENDENT</type>
<key>unifi.alarm</key>
<delay>0</delay>
<history>30d</history>
<trends>730d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Network</name>
@ -69,6 +69,7 @@
<type>DEPENDENT</type>
<key>unifi.build</key>
<delay>0</delay>
<history>30d</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
@ -96,8 +97,8 @@
<item>
<name>Unifi: Controler info</name>
<key>unifi.check.all[{$UNIFI_URL},{$UNIFI_USER},{$UNIFI_PASS},{$UNIFI_SITE},unifi]</key>
<delay>5m</delay>
<history>1h</history>
<delay>10m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
@ -115,8 +116,8 @@
<type>DEPENDENT</type>
<key>unifi.dev_adopted</key>
<delay>0</delay>
<history>30d</history>
<trends>730d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Unifi</name>
@ -145,8 +146,8 @@
<type>DEPENDENT</type>
<key>unifi.dev_pending</key>
<delay>0</delay>
<history>30d</history>
<trends>730d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Unifi</name>
@ -175,6 +176,7 @@
<type>DEPENDENT</type>
<key>unifi.version</key>
<delay>0</delay>
<history>30d</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
@ -205,8 +207,8 @@
<type>DEPENDENT</type>
<key>unifi.wired_clients</key>
<delay>0</delay>
<history>30d</history>
<trends>730d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Unifi</name>
@ -228,8 +230,8 @@
<type>DEPENDENT</type>
<key>unifi.wired_guests</key>
<delay>0</delay>
<history>30d</history>
<trends>730d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Unifi</name>
@ -251,8 +253,8 @@
<type>DEPENDENT</type>
<key>unifi.wireless_clients</key>
<delay>0</delay>
<history>30d</history>
<trends>730d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Unifi</name>
@ -274,8 +276,8 @@
<type>DEPENDENT</type>
<key>unifi.wireless_guests</key>
<delay>0</delay>
<history>30d</history>
<trends>730d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Unifi</name>
@ -302,7 +304,7 @@
<item_prototype>
<name>Unifi: AP: {#UNIFI_DEV_NAME}: Info</name>
<key>unifi.check.all[{$UNIFI_URL},{$UNIFI_USER},{$UNIFI_PASS},{$UNIFI_SITE},dev,{#UNIFI_DEV_MAC},uap]</key>
<delay>5m</delay>
<delay>8m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
@ -2366,7 +2368,7 @@
<item_prototype>
<name>Unifi: Switch: {#UNIFI_DEV_NAME}: Info</name>
<key>unifi.check.all[{$UNIFI_URL},{$UNIFI_USER},{$UNIFI_PASS},{$UNIFI_SITE},dev,{#UNIFI_DEV_MAC},usw]</key>
<delay>5m</delay>
<delay>8m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
@ -3142,7 +3144,7 @@
<item_prototype>
<name>Unifi: Station {#UNIFI_STA_NAME}: Info</name>
<key>unifi.check.all[{$UNIFI_URL},{$UNIFI_USER},{$UNIFI_PASS},{$UNIFI_SITE},station,{#UNIFI_STA_MAC},wireless]</key>
<delay>5m</delay>
<delay>8m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
@ -3811,7 +3813,7 @@
<item_prototype>
<name>Unifi: WLAN {#UNIFI_WLAN_NAME}: Info</name>
<key>unifi.check.all[{$UNIFI_URL},{$UNIFI_USER},{$UNIFI_PASS},{$UNIFI_SITE},wlan,{#UNIFI_WLAN_ID},]</key>
<delay>5m</delay>
<delay>8m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,195 @@
zabbix_export:
version: '6.0'
date: '2023-12-21T14:57:39Z'
groups:
- uuid: 7df96b18c230490a9a0a9e2307226338
name: Templates
templates:
- uuid: 98cb8260bbeb4a94a8b07b54608521c8
template: Template_HW_storageDevices
name: Template_HW_storageDevices
groups:
- name: Templates
discovery_rules:
- uuid: 12b9a456943f49aca440958063a5bfe2
name: 'SMART capable Storage devices discovery'
key: 'stor.dev.discovery[smart]'
delay: 1h
filter:
conditions:
- macro: '{#STOR_DEV_SMART}'
value: ^1$
formulaid: A
lifetime: 7d
item_prototypes:
- uuid: 0743506e1c1b49f5b47bd5cef5462c55
name: 'Stor dev {#STOR_DEV_SN} ({#STOR_DEV_DESC}): Pending sectors'
type: DEPENDENT
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},current_pending_sector]'
delay: '0'
history: 30d
value_type: FLOAT
units: '!sector(s)'
preprocessing:
- type: JSONPATH
parameters:
- $.current_pending_sector
master_item:
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE}]'
trigger_prototypes:
- uuid: 9ffa5efc061542e8a5f227936396e5ab
expression: 'change(/Template_HW_storageDevices/stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},current_pending_sector])>0'
recovery_mode: NONE
name: '{ITEM.LASTVALUE1} pending on {#STOR_DEV_SN} ({#STOR_DEV_DESC})'
priority: AVERAGE
manual_close: 'YES'
- uuid: 3da3d8491a024c7ab4f0bb3fc3887a64
name: 'Stor dev {#STOR_DEV_SN} ({#STOR_DEV_DESC}): Firmware version'
type: DEPENDENT
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},firmware_version]'
delay: '0'
history: 30d
trends: '0'
value_type: CHAR
preprocessing:
- type: JSONPATH
parameters:
- $.firmware_version
master_item:
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE}]'
- uuid: 6d4967ca28f549048a507209737fa7bd
name: 'Stor dev {#STOR_DEV_SN} ({#STOR_DEV_DESC}): Offline uncorrectable'
type: DEPENDENT
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},offline_uncorrectable]'
delay: '0'
history: 30d
value_type: FLOAT
units: '!sector(s)'
preprocessing:
- type: JSONPATH
parameters:
- $.offline_uncorrectable
master_item:
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE}]'
trigger_prototypes:
- uuid: 66c5fc3a96354eb3aae8d5a08eea4544
expression: 'change(/Template_HW_storageDevices/stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},offline_uncorrectable])>0'
recovery_mode: NONE
name: '{ITEM.LASTVALUE1} offline uncorrectable on {#STOR_DEV_SN} ({#STOR_DEV_DESC})'
priority: HIGH
manual_close: 'YES'
- uuid: df1ef0de1bc54200895a5d173a583905
name: 'Stor dev {#STOR_DEV_SN} ({#STOR_DEV_DESC}): Percent Lifetime Remain'
type: DEPENDENT
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},percent_lifetime_remain]'
delay: '0'
history: 30d
value_type: FLOAT
units: '%'
preprocessing:
- type: JSONPATH
parameters:
- $.percent_lifetime_remain
master_item:
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE}]'
trigger_prototypes:
- uuid: caaa6496f8e941a08f37c8746bb34bc2
expression: 'last(/Template_HW_storageDevices/stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},percent_lifetime_remain])<{$STOR_LIFETIME_REMAINING_CRITICAL:"{#STOR_DEV_SN}"}'
name: '{ITEM.LASTVALUE1} lifetime remaining for {#STOR_DEV_SN} ({#STOR_DEV_DESC})'
priority: HIGH
manual_close: 'YES'
- uuid: 1df675ca6c4649a081ea315599ce4284
expression: 'last(/Template_HW_storageDevices/stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},percent_lifetime_remain])<{$STOR_LIFETIME_REMAINING_WARN:"{#STOR_DEV_SN}"}'
name: '{ITEM.LASTVALUE1} lifetime remaining for {#STOR_DEV_SN} ({#STOR_DEV_DESC})'
priority: WARNING
manual_close: 'YES'
dependencies:
- name: '{ITEM.LASTVALUE1} lifetime remaining for {#STOR_DEV_SN} ({#STOR_DEV_DESC})'
expression: 'last(/Template_HW_storageDevices/stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},percent_lifetime_remain])<{$STOR_LIFETIME_REMAINING_CRITICAL:"{#STOR_DEV_SN}"}'
- uuid: 70dd4bcd3994423594f7330bbb448038
name: 'Stor dev {#STOR_DEV_SN} ({#STOR_DEV_DESC}): Power Cycle Count'
type: DEPENDENT
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},power_cycle_count]'
delay: '0'
history: 30d
value_type: FLOAT
preprocessing:
- type: JSONPATH
parameters:
- $.power_cycle_count
master_item:
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE}]'
- uuid: f68f569400954636a7e4231c97c57c81
name: 'Stor dev {#STOR_DEV_SN} ({#STOR_DEV_DESC}): Power on hours'
type: DEPENDENT
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},power_on_hours]'
delay: '0'
history: 30d
value_type: FLOAT
units: s
preprocessing:
- type: JSONPATH
parameters:
- $.power_on_hours
- type: MULTIPLIER
parameters:
- '3600'
master_item:
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE}]'
- uuid: 76e724749c304d7da9c38b81bfb9b711
name: 'Stor dev {#STOR_DEV_SN} ({#STOR_DEV_DESC}): Reallocated Sectors'
type: DEPENDENT
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},reallocated_sector_count]'
delay: '0'
history: 30d
value_type: FLOAT
units: '!sector(s)'
preprocessing:
- type: JSONPATH
parameters:
- $.reallocated_sector_count
master_item:
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE}]'
trigger_prototypes:
- uuid: f7b3fa0f6a8347e3b67387da531a2d1a
expression: 'change(/Template_HW_storageDevices/stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},reallocated_sector_count])>0'
recovery_mode: NONE
name: '{ITEM.LASTVALUE1} reallocated on {#STOR_DEV_SN} ({#STOR_DEV_DESC})'
priority: WARNING
manual_close: 'YES'
dependencies:
- name: '{ITEM.LASTVALUE1} reallocated on {#STOR_DEV_SN} ({#STOR_DEV_DESC})'
expression: 'last(/Template_HW_storageDevices/stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},reallocated_sector_count])>{$STOR_REALLOC_SECTOR_WARN:"{#STOR_DEV_SN}"}'
- uuid: 85d6eb29ca7b4175bf8ad15414a44936
expression: 'last(/Template_HW_storageDevices/stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},reallocated_sector_count])>{$STOR_REALLOC_SECTOR_WARN:"{#STOR_DEV_SN}"}'
name: '{ITEM.LASTVALUE1} reallocated on {#STOR_DEV_SN} ({#STOR_DEV_DESC})'
priority: AVERAGE
manual_close: 'YES'
- uuid: 28117d0d998f467ea542821e4a6507c9
name: 'Stor dev {#STOR_DEV_SN} ({#STOR_DEV_DESC}): Temperature'
type: DEPENDENT
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE},temperature_celsius]'
delay: '0'
history: 30d
value_type: FLOAT
units: °C
preprocessing:
- type: JSONPATH
parameters:
- $.temperature_celsius
master_item:
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE}]'
- uuid: 6d3da698590e43f4be62d5daab7f72f0
name: 'Stor dev {#STOR_DEV_SN} ({#STOR_DEV_DESC}) info'
key: 'stor.dev.info[{#STOR_DEV_NAME},{#STOR_DEV_TYPE}]'
delay: 8m
history: '0'
trends: '0'
value_type: TEXT
macros:
- macro: '{$STOR_LIFETIME_REMAINING_CRITICAL}'
value: '6'
- macro: '{$STOR_LIFETIME_REMAINING_WARN}'
value: '10'
- macro: '{$STOR_REALLOC_SECTOR_WARN}'
value: '40'

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:16:52Z</date>
<date>2021-09-22T16:03:32Z</date>
<groups>
<group>
<name>Modèles</name>
@ -51,7 +51,7 @@
<key>agent.ping</key>
<delay>5m</delay>
<history>30d</history>
<trends>730d</trends>
<trends>180d</trends>
<valuemap>
<name>Service state</name>
</valuemap>
@ -75,7 +75,8 @@
<name>Opened files limit</name>
<key>kernel.maxfiles</key>
<delay>1h</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>Fichiers</units>
<preprocessing>
<step>
@ -89,7 +90,8 @@
<name>Max number of processes</name>
<key>kernel.maxproc</key>
<delay>1h</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>!process</units>
<preprocessing>
<step>
@ -103,7 +105,8 @@
<name>Opened files</name>
<key>kernel.openedfiles</key>
<delay>5m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>Fichiers</units>
<request_method>POST</request_method>
</item>
@ -124,8 +127,9 @@
<item>
<name>Number of active processes</name>
<key>proc.num[,,run]</key>
<delay>2m</delay>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<request_method>POST</request_method>
<triggers>
<trigger>
@ -139,7 +143,8 @@
<name>Number of processes</name>
<key>proc.num[]</key>
<delay>5m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<request_method>POST</request_method>
<triggers>
<trigger>
@ -154,7 +159,8 @@
<item>
<name>CPU load</name>
<key>system.cpu.load[,avg1]</key>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<applications>
<application>
@ -167,7 +173,8 @@
<name>CPU load (5min)</name>
<key>system.cpu.load[,avg5]</key>
<delay>5m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<applications>
<application>
@ -180,7 +187,8 @@
<name>CPU load (15min)</name>
<key>system.cpu.load[,avg15]</key>
<delay>15m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<applications>
<application>
@ -193,7 +201,8 @@
<name>Number of CPU</name>
<key>system.cpu.num[]</key>
<delay>1d</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>cpu</units>
<applications>
<application>
@ -205,8 +214,9 @@
<item>
<name>CPU context switch</name>
<key>system.cpu.switches</key>
<delay>30s</delay>
<trends>1825d</trends>
<delay>2m</delay>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Processor</name>
@ -223,8 +233,8 @@
<item>
<name>CPU usage: $2</name>
<key>system.cpu.util[,interrupt,avg1]</key>
<delay>45s</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -237,8 +247,8 @@
<item>
<name>CPU usage: $2</name>
<key>system.cpu.util[,iowait,avg1]</key>
<delay>45s</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -251,8 +261,8 @@
<item>
<name>CPU usage: $2</name>
<key>system.cpu.util[,nice,avg1]</key>
<delay>45s</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -265,8 +275,8 @@
<item>
<name>CPU usage: $2</name>
<key>system.cpu.util[,softirq,avg1]</key>
<delay>45s</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -279,8 +289,8 @@
<item>
<name>CPU usage: $2</name>
<key>system.cpu.util[,system,avg1]</key>
<delay>45s</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -293,8 +303,8 @@
<item>
<name>CPU usage: $2</name>
<key>system.cpu.util[,user,avg1]</key>
<delay>45s</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -308,7 +318,7 @@
<name>Hostname</name>
<key>system.hostname</key>
<delay>2h</delay>
<history>30d</history>
<history>15d</history>
<trends>0</trends>
<value_type>TEXT</value_type>
<applications>
@ -321,7 +331,9 @@
<item>
<name>Swapped in pages</name>
<key>system.swap.in[,pages]</key>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>pages/s</units>
<applications>
<application>
@ -339,7 +351,9 @@
<item>
<name>Swapped out pages</name>
<key>system.swap.out[,pages]</key>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>pages/s</units>
<applications>
<application>
@ -358,7 +372,8 @@
<name>Swap: Free</name>
<key>system.swap.size[,free]</key>
<delay>3m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -371,7 +386,8 @@
<name>Swap : Free %</name>
<key>system.swap.size[,pfree]</key>
<delay>3m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -385,7 +401,8 @@
<name>Swap: Used %</name>
<key>system.swap.size[,pused]</key>
<delay>3m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -399,7 +416,8 @@
<name>Swap: Total</name>
<key>system.swap.size[,total]</key>
<delay>1h</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -418,7 +436,8 @@
<name>Swap: Used</name>
<key>system.swap.size[,used]</key>
<delay>3m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -430,8 +449,9 @@
<item>
<name>Uptime</name>
<key>system.uptime</key>
<delay>5m</delay>
<trends>1825d</trends>
<delay>10m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>s</units>
<request_method>POST</request_method>
</item>
@ -439,7 +459,8 @@
<name>Connected users</name>
<key>system.users.num</key>
<delay>5m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<request_method>POST</request_method>
<triggers>
<trigger>
@ -719,7 +740,9 @@
<name>Memory: Used %</name>
<type>CALCULATED</type>
<key>vm.memory.size.pused</key>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<params>100-last(&quot;vm.memory.size[pavailable]&quot;)</params>
@ -734,7 +757,9 @@
<name>Memory: Used</name>
<type>CALCULATED</type>
<key>vm.memory.size.used</key>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<params>last(&quot;vm.memory.size[total]&quot;)-last(&quot;vm.memory.size[available]&quot;)</params>
<applications>
@ -747,7 +772,9 @@
<item>
<name>Memory: Available</name>
<key>vm.memory.size[available]</key>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -758,7 +785,9 @@
<item>
<name>Memory: Buffers</name>
<key>vm.memory.size[buffers]</key>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -770,7 +799,9 @@
<item>
<name>Memory: Cache</name>
<key>vm.memory.size[cached]</key>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -782,7 +813,9 @@
<item>
<name>Memory: Free</name>
<key>vm.memory.size[free]</key>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -794,7 +827,9 @@
<item>
<name>Memory: Available %</name>
<key>vm.memory.size[pavailable]</key>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -806,8 +841,9 @@
<item>
<name>Memory: Total</name>
<key>vm.memory.size[total]</key>
<delay>15m</delay>
<trends>1825d</trends>
<delay>3m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -842,8 +878,9 @@
<item_prototype>
<name>SMART: {#SMARTDRIVE}: $2</name>
<key>hardware.disk.smart[{#SMARTDRIVE},Current_Pending_Sector]</key>
<delay>15m</delay>
<trends>1825d</trends>
<delay>30m</delay>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Disks</name>
@ -854,8 +891,9 @@
<item_prototype>
<name>SMART: {#SMARTDRIVE}: $2</name>
<key>hardware.disk.smart[{#SMARTDRIVE},Offline_Uncorrectable]</key>
<delay>15m</delay>
<trends>1825d</trends>
<delay>30m</delay>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Disks</name>
@ -867,7 +905,8 @@
<name>SMART: {#SMARTDRIVE}: $2</name>
<key>hardware.disk.smart[{#SMARTDRIVE},Power_Cycle_Count]</key>
<delay>1h</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Disks</name>
@ -879,7 +918,8 @@
<name>SMART: {#SMARTDRIVE}: $2</name>
<key>hardware.disk.smart[{#SMARTDRIVE},Power_On_Hours]</key>
<delay>1h</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>s</units>
<applications>
<application>
@ -897,8 +937,9 @@
<item_prototype>
<name>SMART: {#SMARTDRIVE}: $2</name>
<key>hardware.disk.smart[{#SMARTDRIVE},Reallocated_Sector_Ct]</key>
<delay>15m</delay>
<trends>1825d</trends>
<delay>30m</delay>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Disks</name>
@ -938,7 +979,8 @@
<name>Temperature $1</name>
<key>hardware.sensor[{#SENSORNAME}]</key>
<delay>5m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>°C</units>
<applications>
@ -971,7 +1013,8 @@
<name>Rotation speed for {#SENSORNAME}</name>
<key>hardware.sensor[{#SENSORNAME},fan]</key>
<delay>10m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>rpm</units>
<applications>
@ -1010,8 +1053,9 @@
<item_prototype>
<name>Power consumption ({#SENSORNAME})</name>
<key>hardware.sensor[{#SENSORNAME},power]</key>
<delay>5m</delay>
<trends>1825d</trends>
<delay>10m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>{#SENSORUNIT}</units>
<applications>
<application>
@ -1041,8 +1085,8 @@
<item_prototype>
<name>Inbound trafic for {#IFNAME}</name>
<key>net.if.in[{#IFNAME},bytes]</key>
<delay>45s</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>bps</units>
<applications>
@ -1066,7 +1110,8 @@
<name>Errors on inbound trafic for {#IFNAME}</name>
<key>net.if.in[{#IFNAME},errors]</key>
<delay>15m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>errors</units>
<applications>
@ -1085,8 +1130,8 @@
<item_prototype>
<name>Outbound trafic for {#IFNAME}</name>
<key>net.if.out[{#IFNAME},bytes]</key>
<delay>45s</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>bps</units>
<applications>
@ -1110,7 +1155,8 @@
<name>Errors on outbound trafic for {#IFNAME}</name>
<key>net.if.out[{#IFNAME},errors]</key>
<delay>15m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>errors</units>
<applications>
@ -1196,7 +1242,8 @@
<name>Read operations on {#DEVNAME}</name>
<key>vfs.dev.read[{#DEVNAME},ops]</key>
<delay>2m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>iops</units>
<applications>
@ -1210,7 +1257,8 @@
<name>Reads on {#DEVNAME}</name>
<key>vfs.dev.read[{#DEVNAME},sectors]</key>
<delay>2m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>B/s</units>
<applications>
@ -1234,7 +1282,8 @@
<name>Write operations on {#DEVNAME}</name>
<key>vfs.dev.write[{#DEVNAME},ops]</key>
<delay>2m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>iops</units>
<applications>
@ -1254,7 +1303,8 @@
<name>Writes on {#DEVNAME}</name>
<key>vfs.dev.write[{#DEVNAME},sectors]</key>
<delay>2m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>B/s</units>
<applications>
@ -1302,7 +1352,8 @@
<name>Free inodes on {#FSNAME}</name>
<key>vfs.fs.inode[{#FSNAME},free]</key>
<delay>10m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>inodes</units>
<applications>
<application>
@ -1315,7 +1366,8 @@
<name>Used inodes on {#FSNAME} (%)</name>
<key>vfs.fs.inode[{#FSNAME},pused]</key>
<delay>15m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -1345,7 +1397,8 @@
<name>Used inodes on {#FSNAME}</name>
<key>vfs.fs.inode[{#FSNAME},used]</key>
<delay>15m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>inodes</units>
<applications>
<application>
@ -1358,7 +1411,8 @@
<name>Free space on {#FSNAME}</name>
<key>vfs.fs.size[{#FSNAME},free]</key>
<delay>10m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -1371,7 +1425,8 @@
<name>Used space on {#FSNAME} (%)</name>
<key>vfs.fs.size[{#FSNAME},pused]</key>
<delay>10m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -1410,7 +1465,8 @@
<name>Used space on {#FSNAME}</name>
<key>vfs.fs.size[{#FSNAME},used]</key>
<delay>10m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -1434,7 +1490,7 @@
<key>vfs.lvm.vg[{#LVMGRP},alloc_ct]</key>
<delay>15m</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<params>100*last(&quot;vfs.lvm.vg[{#LVMGRP},alloc_pe_size]&quot;)/last(&quot;vfs.lvm.vg[{#LVMGRP},vg_size]&quot;)</params>
@ -1453,7 +1509,7 @@
<key>vfs.lvm.vg[{#LVMGRP},alloc_pe_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -1479,7 +1535,7 @@
<key>vfs.lvm.vg[{#LVMGRP},cur_lv]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>!lv</units>
<applications>
<application>
@ -1505,7 +1561,7 @@
<key>vfs.lvm.vg[{#LVMGRP},vg_size]</key>
<delay>0</delay>
<history>60d</history>
<trends>1825d</trends>
<trends>1095d</trends>
<units>B</units>
<applications>
<application>
@ -1561,8 +1617,9 @@
<item_prototype>
<name>LVM: {#LVMSNAP} allocation</name>
<key>vfs.lvm.lv[{#LVMSNAP},allocation]</key>
<delay>5m</delay>
<trends>1825d</trends>
<delay>10m</delay>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -1602,7 +1659,8 @@
<name>Metadata allocation for pool {#LVMTHINP}</name>
<key>vfs.lvm.lv[{#LVMTHINP},allocation_metadata]</key>
<delay>10m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -1623,7 +1681,8 @@
<name>Data allocation for pool {#LVMTHINP}</name>
<key>vfs.lvm.lv[{#LVMTHINP},allocation_pool_data]</key>
<delay>10m</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-06-19T12:17:09Z</date>
<date>2021-09-22T16:04:27Z</date>
<groups>
<group>
<name>Modèles</name>
@ -112,7 +112,7 @@
</items>
<discovery_rules>
<discovery_rule>
<name>Découverte onduleurs</name>
<name>UPS discovery</name>
<key>hardware.ups.discovery[]</key>
<delay>1800</delay>
<lifetime>1d</lifetime>
@ -120,7 +120,9 @@
<item_prototype>
<name>UPS: Taux de charge de la batterie pour {#UPSNAME}</name>
<key>hardware.ups[{#UPSNAME},battery.charge]</key>
<trends>1825d</trends>
<delay>2m</delay>
<history>60d</history>
<trends>1095d</trends>
<units>%</units>
<applications>
<application>
@ -132,7 +134,9 @@
<item_prototype>
<name>UPS: Charge de l'onduleur {#UPSNAME}</name>
<key>hardware.ups[{#UPSNAME},ups.load]</key>
<trends>1825d</trends>
<delay>2m</delay>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -152,6 +156,8 @@
<item_prototype>
<name>UPS: État de l'onduleur {#UPSNAME}</name>
<key>hardware.ups[{#UPSNAME},ups.status]</key>
<delay>2m</delay>
<history>30d</history>
<trends>0</trends>
<value_type>CHAR</value_type>
<applications>
@ -231,7 +237,8 @@
<type>DEPENDENT</type>
<key>httpd[{#HTTPD_STATUS_URI},busyworkers]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Applications</name>
@ -253,7 +260,8 @@
<type>DEPENDENT</type>
<key>httpd[{#HTTPD_STATUS_URI},bytesperreq]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>B</units>
<applications>
@ -277,7 +285,8 @@
<type>DEPENDENT</type>
<key>httpd[{#HTTPD_STATUS_URI},bytespersec]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>b/s</units>
<applications>
@ -312,7 +321,8 @@
<type>DEPENDENT</type>
<key>httpd[{#HTTPD_STATUS_URI},idleworkers]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Applications</name>
@ -334,7 +344,8 @@
<type>DEPENDENT</type>
<key>httpd[{#HTTPD_STATUS_URI},reqpersec]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>r/s</units>
<applications>
@ -425,8 +436,8 @@
<type>DEPENDENT</type>
<key>nginx.status.dep[{#NGINX_STATUS_URI},active_conn]</key>
<delay>0</delay>
<history>180d</history>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Applications</name>
@ -448,8 +459,8 @@
<type>DEPENDENT</type>
<key>nginx.status.dep[{#NGINX_STATUS_URI},conn_per_sec]</key>
<delay>0</delay>
<history>180d</history>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>c/s</units>
<applications>
@ -477,8 +488,8 @@
<type>DEPENDENT</type>
<key>nginx.status.dep[{#NGINX_STATUS_URI},keep_alive_conn]</key>
<delay>0</delay>
<history>180d</history>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Applications</name>
@ -500,8 +511,8 @@
<type>DEPENDENT</type>
<key>nginx.status.dep[{#NGINX_STATUS_URI},req_per_sec]</key>
<delay>0</delay>
<history>180d</history>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>r/s</units>
<applications>
@ -585,11 +596,12 @@
<lifetime>1h</lifetime>
<item_prototypes>
<item_prototype>
<name>Latence du node {#PVE_NODE_NAME}</name>
<name>Latency for node {#PVE_NODE_NAME}</name>
<type>DEPENDENT</type>
<key>net.icmp.info[{#PVE_NODE_IP},latency]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>s</units>
<applications>
@ -618,11 +630,12 @@
</trigger_prototypes>
</item_prototype>
<item_prototype>
<name>Taux de perte de ping pour le node {#PVE_NODE_NAME}</name>
<name>Ping loss for node {#PVE_NODE_NAME}</name>
<type>DEPENDENT</type>
<key>net.icmp.info[{#PVE_NODE_IP},loss]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<value_type>FLOAT</value_type>
<units>%</units>
<applications>
@ -651,11 +664,12 @@
</trigger_prototypes>
</item_prototype>
<item_prototype>
<name>Ping du node {#PVE_NODE_NAME}</name>
<name>Ping response for node {#PVE_NODE_NAME}</name>
<type>DEPENDENT</type>
<key>net.icmp.info[{#PVE_NODE_IP},respond]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<applications>
<application>
<name>Latency</name>
@ -675,9 +689,9 @@
</master_item>
</item_prototype>
<item_prototype>
<name>Stat ICMP du node {#PVE_NODE_NAME}</name>
<name>ICMP stats for node {#PVE_NODE_NAME}</name>
<key>net.icmp[{#PVE_NODE_IP},all]</key>
<delay>5s</delay>
<delay>30s</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
@ -768,6 +782,7 @@
<name>MegaRAID status ({#CONTROLLERNO})</name>
<key>raid.mega.status[{$MEGARAID_SPARES},{$MEGARAID_IGNORE_OTHER},{$MEGARAID_IGNORE_MEDIA},{#CONTROLLERNO}]</key>
<delay>15m</delay>
<history>30d</history>
<trends>0</trends>
<value_type>CHAR</value_type>
<applications>
@ -836,7 +851,7 @@
<item_prototype>
<name>{#DEVICE} status</name>
<key>raid.sw.status[{#DEVICE}]</key>
<delay>5m</delay>
<delay>10m</delay>
<history>30d</history>
<trends>0</trends>
<value_type>CHAR</value_type>
@ -892,7 +907,7 @@
<item_prototype>
<name>VDO: Info for {#VDO_VOL}</name>
<key>vfs.vdo.vol.all[{#VDO_VOL}]</key>
<delay>10m</delay>
<delay>15m</delay>
<history>0</history>
<trends>0</trends>
<value_type>TEXT</value_type>
@ -907,6 +922,7 @@
<type>DEPENDENT</type>
<key>vfs.vdo.vol.info[{#VDO_VOL},operating_mode]</key>
<delay>0</delay>
<history>30d</history>
<trends>0</trends>
<value_type>CHAR</value_type>
<applications>
@ -939,7 +955,8 @@
<type>DEPENDENT</type>
<key>vfs.vdo.vol.info[{#VDO_VOL},saving_percent]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>%</units>
<applications>
<application>
@ -964,7 +981,8 @@
<type>DEPENDENT</type>
<key>vfs.vdo.vol.info[{#VDO_VOL},used_percent]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<units>%</units>
<applications>
<application>
@ -996,7 +1014,8 @@
<type>DEPENDENT</type>
<key>vfs.vdo.vol.info[{#VDO_VOL},version]</key>
<delay>0</delay>
<trends>1825d</trends>
<history>60d</history>
<trends>1095d</trends>
<preprocessing>
<step>
<type>JSONPATH</type>
@ -1019,6 +1038,7 @@
<type>DEPENDENT</type>
<key>vfs.vdo.vol.info[{#VDO_VOL},write_policy]</key>
<delay>0</delay>
<history>30d</history>
<trends>0</trends>
<value_type>CHAR</value_type>
<applications>