diff --git a/zabbix_conf/gluster.conf b/zabbix_conf/gluster.conf new file mode 100644 index 0000000..e0cf06a --- /dev/null +++ b/zabbix_conf/gluster.conf @@ -0,0 +1,8 @@ + +# Discover GLusterFS volume or peers, based on the argument +UserParameter=gluster.discovery[*],/usr/bin/sudo /var/lib/zabbix/bin/disco_gluster_sudo --what=$1 + +# Check GlusterFS volume or peer status +UserParameter=gluster.volume.status[*],/usr/bin/sudo /var/lib/zabbix/bin/check_gluster_sudo --what=volume --volume=$1 --bricks=$2 +UserParameter=gluster.peer.status[*],/usr/bin/sudo /var/lib/zabbix/bin/check_gluster_sudo --what=peer --peer=$1 + diff --git a/zabbix_scripts/check_gluster_sudo b/zabbix_scripts/check_gluster_sudo new file mode 100644 index 0000000..d992bf4 --- /dev/null +++ b/zabbix_scripts/check_gluster_sudo @@ -0,0 +1,109 @@ +#!/usr/bin/perl -w + +use strict; +use File::Which; +use Getopt::Long; + +my $what = 'volume'; +my $volume = undef; +my $peer = undef; +my $bricks = undef; + +my $gluster = which('gluster'); + +unless($gluster){ + # Gluster is not installed, exit with an error + die "gluster command not found"; +} + + +GetOptions( + "what=s" => \$what, + "volume=s" => \$volume, + "bricks=i" => \$bricks, + "peer=s" => \$peer +); + +sub usage(){ + print <<"EOF"; + +usage: $0 --what=[peer|volume] + +If --what=volume you need to pass --volume=. The optional --bricks arg can be used to pass the number of expected bricks +If --what=peer you need to pass --peer= + +EOF +} + +if (($what eq 'volume' && !$volume) || + ($what eq 'peer' && !$peer) || + ($what ne 'volume' && $what ne 'peer')){ + usage(); +} + +if ($what eq 'volume'){ + open (VOLUMEINFO, "$gluster vol status $volume |") + || die "error: Could not execute gluster vol status $volume"; + my $bricksfound = 0; + my $status = 1; + foreach my $line (){ + # Check that all bricks are online + if ($line =~ m/^Brick\ [\w\.]+:\/[\w\.\/]+\s+\d+\s+(Y|N)/){ + $bricksfound++; + $status = 0 if ($1 ne 'Y'); + } + # Check the Self-Heal daemons are up and running + elsif ($line =~ m/^Self-heal\ Daemon\ on\ [\w\.]+\s+N\/A\\s+(Y|N)/){ + $status = 0 if ($1 ne 'Y'); + } + } + # Check the number of bricks is the one we expect + if ($bricks && $bricks != $bricksfound){ + $status = 0; + } + close VOLUMEINFO; + open (VOLUMEINFO, "$gluster vol heal $volume info heal-failed |") + || die "error: Could not execute gluster vol heal $volume info heal-failed"; + foreach my $line (){ + # Now, check we don't have any file which the Self-Heal daemon couldn't sync + if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/){ + $status = 0 if ($1 gt 0); + } + } + close VOLUMEINFO; + open (VOLUMEINFO, "$gluster vol heal $volume info split-brain |") + || die "error: Could not execute gluster vol heal $volume info split-brain"; + foreach my $line (){ + # Now, check we don't have any file in a split-brain situation + if ($line =~ m/^Number\ of\ entries:\s+(\d+)$/){ + $status = 0 if ($1 gt 0); + } + } + close VOLUMEINFO; + open (VOLUMEINFO, "$gluster vol info $volume |") + || die "error: Could not execute gluster vol info $volume"; + foreach my $line (){ + # Check the volume is started + if ($line =~ m/^Status:\s+(\w+)$/){ + $status = 0 unless ($1 eq 'Started'); + } + } + close VOLUMEINFO; + print $status; +} +elsif ($what eq 'peer'){ + open (PEERLIST, "$gluster pool list |") + || die "error: Could not execute gluster pool list"; + + my $status = 0; + foreach my $line (){ + if (($line =~ m/^$peer\s+/) || + ($line =~ m/^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}\s+$peer\s+/)){ + my (undef,undef,$state) = split(/\s+/, $line); + $status = 1 if ($state eq 'Connected'); + } + } + close PEERLIST; + print $status; +} + diff --git a/zabbix_scripts/disco_gluster_sudo b/zabbix_scripts/disco_gluster_sudo new file mode 100644 index 0000000..b6ded03 --- /dev/null +++ b/zabbix_scripts/disco_gluster_sudo @@ -0,0 +1,106 @@ +#!/usr/bin/perl -w + +use strict; +use File::Which; +use Getopt::Long; +use JSON; + +my $json; +@{$json->{data}} = (); + +my $gluster = which('gluster'); + +unless($gluster){ + # Gluster is not installed, just return an empty JSON object + print to_json($json); + exit(0); +} + +my $what = 'volumes'; +GetOptions( + "what=s" => \$what, +); + +sub usage (){ + print <<"EOF"; + +Usage: $0 --what=[volumes|peers] + +EOF +} + +if ($what eq 'volumes'){ + open (VOLUMES, "$gluster vol info all |") + || die "error: Could not execute gluster vol info all"; + + foreach my $line (){ + if ($line =~ m/^Volume\ Name:\ (\w+)$/){ + my $vol = $1; + my ($type,$bricks,$uuid,$status,$transport) = ('unknown'); + open (VOLUMEINFO, "$gluster vol info $vol |") + || die "error: Could not execute gluster vol info $vol"; + foreach my $info (){ + if ($info =~ m/^Type:\ (.*)$/){ + $type = $1; + } + elsif ($info =~ m/^Volume\ ID:\ ([0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12})$/){ + $uuid = $1; + } + elsif ($info =~ m/^Status:\ (\w+)$/){ + $status = $1; + } + elsif ($info =~ m/^Transport-type:\ (\w+)$/){ + $transport = $1; + } + elsif ($info =~ m/^Number\ of\ Bricks:\ \d+\ x\ \d+\ =\ (\d+)$/){ + $bricks = $1; + } + } + close VOLUMEINFO; + push @{$json->{data}}, { + "{#GLUSTER_VOL_NAME}" => $vol, + "{#GLUSTER_VOL_TYPE}" => $type, + "{#GLUSTER_VOL_UUID}" => $uuid, + "{#GLUSTER_VOL_STATUS}" => $status, + "{#GLUSTER_VOL_TRANSPORT}" => $transport, + "{#GLUSTER_VOL_BRICKS}" => $bricks + }; + } + } + close VOLUMES; +} +elsif ($what eq 'peers'){ + open (PEERS, "$gluster peer status |") + || die "error: Could not execute gluster peer status"; + + my $peerno = 0; + my ($host,$uuid,$status) = ('unknown'); + foreach my $line (){ + if ($line =~ m/^Number of Peers:\ (\d+)$/){ + $peerno = $1; + } + elsif ($line =~ m/^Hostname:\ ([\w\.]+)$/){ + $host = $1; + } + elsif ($line =~ m/Uuid:\ ([0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12})$/){ + $uuid = $1; + } + elsif ($line =~ m/State:\ [\w\s]+\((\w+)\)$/){ + $status = $1; + push @{$json->{data}}, { + "{#GLUSTER_PEER_HOST}" => $host, + "{#GLUSTER_PEER_UUID}" => $uuid, + "{#GLUSTER_PEER_STATUS}" => $status + }; + } + } + close PEERS; +} +else{ + usage(); + exit(1); +} + +print to_json($json); +exit(0); +